code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# The model for the skin cancer classifier
# Import the libraries
import numpy as np
import keras
from keras import backend as K
from keras.layers.core import Dense, Dropout
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.models import model_from_yaml
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint
from sklearn.metrics import confusion_matrix
import itertools
import matplotlib.pyplot as plt
# Check if GPU is available
K.tensorflow_backend._get_available_gpus()
# The paths for the training and validation images
train_path = 'base_dir/train_dir'
valid_path = 'base_dir/val_dir'
# Declare a few useful values
num_train_samples = 9013
num_val_samples = 1002
train_batch_size = 10
val_batch_size = 10
image_size = 224
# Declare how many steps are needed in an iteration
train_steps = np.ceil(num_train_samples / train_batch_size)
val_steps = np.ceil(num_val_samples / val_batch_size)
# Set up generators
train_batches = ImageDataGenerator(
preprocessing_function= \
keras.applications.mobilenet.preprocess_input).flow_from_directory(
train_path,
target_size=(image_size, image_size),
batch_size=train_batch_size)
valid_batches = ImageDataGenerator(
preprocessing_function= \
keras.applications.mobilenet.preprocess_input).flow_from_directory(
valid_path,
target_size=(image_size, image_size),
batch_size=val_batch_size)
test_batches = ImageDataGenerator(
preprocessing_function= \
keras.applications.mobilenet.preprocess_input).flow_from_directory(
valid_path,
target_size=(image_size, image_size),
batch_size=val_batch_size,
shuffle=False)
print(valid_batches.class_indices)
# Create a MobileNet model
from keras.utils.vis_utils import plot_model
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
mobile = keras.applications.mobilenet.MobileNet()
plot_model(mobile, to_file='model_plot.png', show_shapes=True, show_layer_names=True, rankdir='TB')
# See a summary of the layers in the model
mobile.summary()
# Modify the model
# Exclude the last 5 layers of the model
x = mobile.layers[-6].output
# Add a dropout and dense layer for predictions
x = Dropout(0.25)(x)
predictions = Dense(7, activation='softmax')(x)
# Create a new model with the new outputs
model = Model(inputs=mobile.input, outputs=predictions)
# See a summary of the new layers in the model
model.summary()
model_yaml = model.to_yaml()
with open("model.yaml", "w") as yaml_file:
yaml_file.write(model_yaml)
#Save the model as a json file
model_json = model.to_json()
with open("model.json", "w") as j_file:
j_file.write(model_json)
# Freeze the weights of the layers that we aren't training (training the last 23)
for layer in model.layers[:-23]:
layer.trainable = False
# Train the model
# Define Top2 and Top3 Accuracy
from keras.metrics import categorical_accuracy, top_k_categorical_accuracy
def top_3_accuracy(y_true, y_pred):
return top_k_categorical_accuracy(y_true, y_pred, k=3)
def top_2_accuracy(y_true, y_pred):
return top_k_categorical_accuracy(y_true, y_pred, k=2)
# Compile the model
model.compile(Adam(lr=0.01), loss='categorical_crossentropy', metrics=[categorical_accuracy, top_2_accuracy, top_3_accuracy])
# Add weights to make the model more sensitive to melanoma
class_weights={
0: 1.0, # akiec
1: 1.0, # bcc
2: 1.0, # bkl
3: 1.0, # df
4: 3.0, # mel
5: 1.0, # nv
6: 1.0, # vasc
}
# Declare the filepath for the saved model
filepath = "model.h5"
# Declare a checkpoint to save the best version of the model
checkpoint = ModelCheckpoint(filepath, monitor='val_top_3_accuracy', verbose=1,
save_best_only=True, mode='max')
# Reduce the learning rate as the learning stagnates
reduce_lr = ReduceLROnPlateau(monitor='val_top_3_accuracy', factor=0.5, patience=2,
verbose=1, mode='max', min_lr=0.00001)
callbacks_list = [checkpoint, reduce_lr]
# Fit the model
history = model.fit_generator(train_batches,
steps_per_epoch=train_steps,
class_weight=class_weights,
validation_data=valid_batches,
validation_steps=val_steps,
epochs=30,
verbose=1,
callbacks=callbacks_list)
# Evaluate the model
# Evaluation of the last epoch
val_loss, val_cat_acc, val_top_2_acc, val_top_3_acc = \
model.evaluate_generator(test_batches, steps=val_steps)
print('val_loss:', val_loss)
print('val_cat_acc:', val_cat_acc)
print('val_top_2_acc:', val_top_2_acc)
print('val_top_3_acc:', val_top_3_acc)
# Evaluation of the best epoch
model.load_weights('model.h5')
val_loss, val_cat_acc, val_top_2_acc, val_top_3_acc = \
model.evaluate_generator(test_batches, steps=val_steps)
print('val_loss:', val_loss)
print('val_cat_acc:', val_cat_acc)
print('val_top_2_acc:', val_top_2_acc)
print('val_top_3_acc:', val_top_3_acc)
# Create a confusion matrix of the test images
test_labels = test_batches.classes
# Make predictions
predictions = model.predict_generator(test_batches, steps=val_steps, verbose=1)
# Declare a function for plotting the confusion matrix
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
cm = confusion_matrix(test_labels, predictions.argmax(axis=1))
cm_plot_labels = ['akiec', 'bcc', 'bkl', 'df', 'mel','nv', 'vasc']
plot_confusion_matrix(cm, cm_plot_labels)
| [
"matplotlib.pyplot.ylabel",
"keras.utils.vis_utils.plot_model",
"keras.preprocessing.image.ImageDataGenerator",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"keras.models.Model",
"keras.optimizers.Adam",
"numpy.ceil",
"matplotlib.pyplot.xticks",
"keras.call... | [((524, 566), 'keras.backend.tensorflow_backend._get_available_gpus', 'K.tensorflow_backend._get_available_gpus', ([], {}), '()\n', (564, 566), True, 'from keras import backend as K\n'), ((890, 935), 'numpy.ceil', 'np.ceil', (['(num_train_samples / train_batch_size)'], {}), '(num_train_samples / train_batch_size)\n', (897, 935), True, 'import numpy as np\n'), ((948, 989), 'numpy.ceil', 'np.ceil', (['(num_val_samples / val_batch_size)'], {}), '(num_val_samples / val_batch_size)\n', (955, 989), True, 'import numpy as np\n'), ((1932, 1972), 'keras.applications.mobilenet.MobileNet', 'keras.applications.mobilenet.MobileNet', ([], {}), '()\n', (1970, 1972), False, 'import keras\n'), ((1973, 2076), 'keras.utils.vis_utils.plot_model', 'plot_model', (['mobile'], {'to_file': '"""model_plot.png"""', 'show_shapes': '(True)', 'show_layer_names': '(True)', 'rankdir': '"""TB"""'}), "(mobile, to_file='model_plot.png', show_shapes=True,\n show_layer_names=True, rankdir='TB')\n", (1983, 2076), False, 'from keras.utils.vis_utils import plot_model\n'), ((2392, 2439), 'keras.models.Model', 'Model', ([], {'inputs': 'mobile.input', 'outputs': 'predictions'}), '(inputs=mobile.input, outputs=predictions)\n', (2397, 2439), False, 'from keras.models import Model\n'), ((3706, 3809), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""val_top_3_accuracy"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""'}), "(filepath, monitor='val_top_3_accuracy', verbose=1,\n save_best_only=True, mode='max')\n", (3721, 3809), False, 'from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint\n'), ((3901, 4013), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_top_3_accuracy"""', 'factor': '(0.5)', 'patience': '(2)', 'verbose': '(1)', 'mode': '"""max"""', 'min_lr': '(1e-05)'}), "(monitor='val_top_3_accuracy', factor=0.5, patience=2,\n verbose=1, mode='max', min_lr=1e-05)\n", (3918, 4013), False, 'from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint\n'), ((2276, 2289), 'keras.layers.core.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (2283, 2289), False, 'from keras.layers.core import Dense, Dropout\n'), ((2307, 2337), 'keras.layers.core.Dense', 'Dense', (['(7)'], {'activation': '"""softmax"""'}), "(7, activation='softmax')\n", (2312, 2337), False, 'from keras.layers.core import Dense, Dropout\n'), ((3062, 3109), 'keras.metrics.top_k_categorical_accuracy', 'top_k_categorical_accuracy', (['y_true', 'y_pred'], {'k': '(3)'}), '(y_true, y_pred, k=3)\n', (3088, 3109), False, 'from keras.metrics import categorical_accuracy, top_k_categorical_accuracy\n'), ((3158, 3205), 'keras.metrics.top_k_categorical_accuracy', 'top_k_categorical_accuracy', (['y_true', 'y_pred'], {'k': '(2)'}), '(y_true, y_pred, k=2)\n', (3184, 3205), False, 'from keras.metrics import categorical_accuracy, top_k_categorical_accuracy\n'), ((3241, 3254), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.01)'}), '(lr=0.01)\n', (3245, 3254), False, 'from keras.optimizers import Adam\n'), ((5838, 5888), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (5848, 5888), True, 'import matplotlib.pyplot as plt\n'), ((5893, 5909), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (5902, 5909), True, 'import matplotlib.pyplot as plt\n'), ((5914, 5928), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5926, 5928), True, 'import matplotlib.pyplot as plt\n'), ((5974, 6018), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)'}), '(tick_marks, classes, rotation=45)\n', (5984, 6018), True, 'import matplotlib.pyplot as plt\n'), ((6023, 6054), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (6033, 6054), True, 'import matplotlib.pyplot as plt\n'), ((6360, 6384), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (6370, 6384), True, 'import matplotlib.pyplot as plt\n'), ((6389, 6418), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (6399, 6418), True, 'import matplotlib.pyplot as plt\n'), ((6423, 6441), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6439, 6441), True, 'import matplotlib.pyplot as plt\n'), ((1027, 1120), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'keras.applications.mobilenet.preprocess_input'}), '(preprocessing_function=keras.applications.mobilenet.\n preprocess_input)\n', (1045, 1120), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((1261, 1354), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'keras.applications.mobilenet.preprocess_input'}), '(preprocessing_function=keras.applications.mobilenet.\n preprocess_input)\n', (1279, 1354), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((1492, 1585), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'keras.applications.mobilenet.preprocess_input'}), '(preprocessing_function=keras.applications.mobilenet.\n preprocess_input)\n', (1510, 1585), False, 'from keras.preprocessing.image import ImageDataGenerator\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import csv
import math
from sklearn.metrics import r2_score
from sklearn.linear_model import LinearRegression
from matplotlib import font_manager
resol = 0.1
query_TK = 293
total = 19
total_s = 15
sample = 6
sample_s = 1
ref_clk = 300000
TK_min = 273
TK_max = 373
N_TK = 21
N_TK_new = 21
N_sample = 5
file_name = ("hope.csv")
raw_data = []
SIMULATION = 0
MANUAL = 0
# Change Font to Arial Narrow
font_dirs = ['./']
font_files = font_manager.findSystemFonts(fontpaths=font_dirs)
for font_file in font_files:
font_manager.fontManager.addfont(font_file)
plt.rcParams['font.size'] = 12
plt.rcParams['font.family'] = 'Arial Narrow'
# Main Part Begins
with open(file_name) as f:
spamreader = list(csv.reader(f, delimiter='\t'))
for i in range(1, len(spamreader)):
for j in range(1, len(spamreader[i])):
if (spamreader[i][j] != ''): # remove empty list
raw_data.append(float(spamreader[i][j]))
raw_data = np.array(raw_data).reshape(N_TK, N_sample) # transform 1d string array into 3d floating matrix
TK_list = np.linspace(TK_min, TK_max, N_TK)
TK_list_new = np.linspace(TK_min, TK_max, N_TK_new)
FRO_list = raw_data[:,0]
FRO_list = raw_data[:,0]
FRO_fitting_list = np.log(FRO_list)*TK_list
k = np.polyfit(TK_list, np.log(FRO_list), 1)[0]
a = np.polyfit(TK_list, np.log(FRO_list), 1)[1]
FRO_list_new = np.exp(a)*np.exp(k*TK_list_new)
FRO_fitting_list_new = np.log(FRO_list_new)*TK_list_new
# Calculate PSS after fitting the given curve
f1 = 5040
f2 = 5096
PSS = np.log(f1/np.exp(a))/k-np.log(f2/np.exp(a))/k
print("PSS: "+str(round(PSS, 3))+("°C/V"))
#code_list = np.true_divide(FRO_fitting_list/TK_list,resol)
#T_conv_list = np.true_divide(code_list, FRO_list)
#print(FRO_list)
#print(code_list)
if (MANUAL):
FRO_list_new = 0.05*np.exp(TK_list*0.043)
############################## Resolution v.s # Total #############################################################
res_total = []
kk = []
cycle_total = []
range_total = []
for ttl in range(total_s, (total+1)):
res_readout_list_row = []
cycle_row = []
for idx, rofreq in enumerate(FRO_list_new):
MSB_flag = 0
stop_MSB = -1
sample_time = np.true_divide(sample, rofreq) # time taken to wait for #sample cycle from CNT1
stop = np.binary_repr(int(sample_time * ref_clk), width=32) # cycles CNT2 passed when CNT1 passes #sample cycles
for index in range(0, len(list(stop))):
if (MSB_flag == 0):
if (list(stop)[index]=="1"):
MSB_flag = 1
stop_MSB = len(list(stop)) - 1 -index
stop_final = ttl - stop_MSB - 1 # faster OSC, smaller sample_time, smaller MSB(stop1), larger stop_final(stop2) -> better resol
#print(stop_MSB, stop_final)
# if (not SIMULATION and stop_final > 18):
# stop_final = 18
stop_final_cycle = np.power(2, stop_final)
kk.append(stop_final_cycle)
stop_final_time = np.true_divide(stop_final_cycle,rofreq)
res_readout = FRO_list_new[idx]/(TK_list_new[idx]*stop_final_cycle) #TEST
#res_readout = TK_list_new[idx]/stop_final_cycle #TEST
res_readout_list_row.append(res_readout)
cycle_row.append(stop_final_cycle)
#print(stop_final_cycle, np.true_divide(rofreq, stop_final_cycle))
#print(stop_final_cycle, FRO_fitting_list[idx]/(TK_list[idx]*res_readout))
res_total.append(res_readout_list_row)
cycle_total.append(cycle_row)
res_total = np.asarray(res_total)
cycle_total = np.asarray(cycle_total)
print(cycle_total)
#print(cycl_total)
# for i in range(0, 5, 1):
# res_tmp = 1/np.power(10,i)
# code_tmp = np.true_divide(FRO_fitting_list/TK_list,res_tmp)
# T_conv_tmp = np.true_divide(code_tmp, FRO_list)
fig = plt.figure()
ax = plt.subplot(2,2,1)
for colidx in range(0, N_TK_new):
plt.plot(np.arange(total_s, (total+1)), res_total[:, colidx], "o-", label=str(TK_list_new[colidx]))
plt.xticks(range(total_s,(total+1)), rotation=45)
plt.xlabel("# Total")
plt.ylabel("Resolution")
plt.yscale("log")
#plt.legend(loc="upper right")
plt.title("Resolution vs #Total @ #Sample= "+str(sample))
plt.tight_layout()
plt.grid()
#plt.show()
with open('ressim.txt', 'w') as f:
# using csv.writer method from CSV package
write = csv.writer(f)
write.writerows(res_total)
#fig = plt.figure()
ax = plt.subplot(2,2,2)
for colidx in range(0, (total-total_s+1)):
plt.plot(TK_list_new, res_total[colidx, :], "o-", label=str(colidx+total_s))
#plt.plot(TK_list_new[1:], np.diff(TK_list_new)/np.diff(cycle_total[colidx, :],1), "o-", label=str(colidx+total_s))
#plt.xticks(TK_list_new, rotation=45)
# Remove repetitive legend
handles, labels = plt.gca().get_legend_handles_labels()
newLabels, newHandles = [], []
for handle, label in zip(handles, labels):
if label not in newLabels:
newLabels.append(label)
newHandles.append(handle)
plt.xlabel("TK")
plt.ylabel("Resolution")
plt.yscale("log")
#plt.legend(loc="upper right")
plt.title("Resolution vs TK for #Total @ #Sample= "+str(sample))
plt.tight_layout()
plt.grid()
#fig = plt.figure()
ax = plt.subplot(2,2,3)
for colidx in range(0, (total-total_s+1)):
range_row = []
index = np.argmax(res_total[colidx, :])
plt.plot(TK_list_new[index], res_total[colidx, :][index], "o", label=str(colidx+total_s))
for idx in range(0, len(res_total[colidx, :])-1):
if (res_total[colidx, idx]>res_total[colidx, idx+1]):
range_row.append(float(TK_list_new[idx]))
range_total.append(range_row)
# Remove repetitive legend
handles, labels = plt.gca().get_legend_handles_labels()
newLabels, newHandles = [], []
for handle, label in zip(handles, labels):
if label not in newLabels:
newLabels.append(label)
newHandles.append(handle)
range_total = np.asarray(range_total)
range_total_copy = np.empty([np.shape(range_total)[0], np.shape(range_total)[1]+2])
#plt.xticks(TK_list_new, rotation=45)
plt.xlabel("TK")
plt.ylabel("Resolution")
plt.yscale("log")
#plt.legend(loc="upper right")
plt.title("Worst Case TK & Res for #Total @ #Sample= "+str(sample))
plt.tight_layout()
plt.grid()
#fig = plt.figure()
ax = plt.subplot(2,2,4)
for colidx in range(0, (total-total_s+1)):
tmp_list = range_total[colidx, :].tolist()
tmp_list.append(TK_list_new[-1])
tmp_list.insert(0, TK_list_new[0])
range_total_copy[colidx, :] = np.array(tmp_list)
plt.plot(range_total_copy[colidx, :],np.repeat(colidx, len(tmp_list)),"o-", label=str(colidx+total_s))
# Remove repetitive legend
handles, labels = plt.gca().get_legend_handles_labels()
newLabels, newHandles = [], []
for handle, label in zip(handles, labels):
if label not in newLabels:
newLabels.append(label)
newHandles.append(handle)
plt.xlabel("TK")
plt.ylabel("# Total")
plt.xticks(range_total_copy[0, :])
#plt.legend(loc="upper right")
plt.title("Range for #Total @ #Sample= "+str(sample))#+"\n Ranges: "+str(range_total_copy[0, :]))
plt.grid()
plt.tight_layout()
plt.show()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"numpy.log",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"numpy.asarray",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.yscale",
"csv.reader",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.gca",
"matplotlib.font_mana... | [((535, 584), 'matplotlib.font_manager.findSystemFonts', 'font_manager.findSystemFonts', ([], {'fontpaths': 'font_dirs'}), '(fontpaths=font_dirs)\n', (563, 584), False, 'from matplotlib import font_manager\n'), ((1156, 1189), 'numpy.linspace', 'np.linspace', (['TK_min', 'TK_max', 'N_TK'], {}), '(TK_min, TK_max, N_TK)\n', (1167, 1189), True, 'import numpy as np\n'), ((1205, 1242), 'numpy.linspace', 'np.linspace', (['TK_min', 'TK_max', 'N_TK_new'], {}), '(TK_min, TK_max, N_TK_new)\n', (1216, 1242), True, 'import numpy as np\n'), ((3482, 3503), 'numpy.asarray', 'np.asarray', (['res_total'], {}), '(res_total)\n', (3492, 3503), True, 'import numpy as np\n'), ((3519, 3542), 'numpy.asarray', 'np.asarray', (['cycle_total'], {}), '(cycle_total)\n', (3529, 3542), True, 'import numpy as np\n'), ((3767, 3779), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3777, 3779), True, 'import matplotlib.pyplot as plt\n'), ((3786, 3806), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (3797, 3806), True, 'import matplotlib.pyplot as plt\n'), ((3995, 4016), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""# Total"""'], {}), "('# Total')\n", (4005, 4016), True, 'import matplotlib.pyplot as plt\n'), ((4018, 4042), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Resolution"""'], {}), "('Resolution')\n", (4028, 4042), True, 'import matplotlib.pyplot as plt\n'), ((4044, 4061), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (4054, 4061), True, 'import matplotlib.pyplot as plt\n'), ((4154, 4172), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4170, 4172), True, 'import matplotlib.pyplot as plt\n'), ((4174, 4184), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4182, 4184), True, 'import matplotlib.pyplot as plt\n'), ((4384, 4404), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (4395, 4404), True, 'import matplotlib.pyplot as plt\n'), ((4941, 4957), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""TK"""'], {}), "('TK')\n", (4951, 4957), True, 'import matplotlib.pyplot as plt\n'), ((4959, 4983), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Resolution"""'], {}), "('Resolution')\n", (4969, 4983), True, 'import matplotlib.pyplot as plt\n'), ((4985, 5002), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (4995, 5002), True, 'import matplotlib.pyplot as plt\n'), ((5102, 5120), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5118, 5120), True, 'import matplotlib.pyplot as plt\n'), ((5122, 5132), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5130, 5132), True, 'import matplotlib.pyplot as plt\n'), ((5164, 5184), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (5175, 5184), True, 'import matplotlib.pyplot as plt\n'), ((5841, 5864), 'numpy.asarray', 'np.asarray', (['range_total'], {}), '(range_total)\n', (5851, 5864), True, 'import numpy as np\n'), ((5992, 6008), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""TK"""'], {}), "('TK')\n", (6002, 6008), True, 'import matplotlib.pyplot as plt\n'), ((6010, 6034), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Resolution"""'], {}), "('Resolution')\n", (6020, 6034), True, 'import matplotlib.pyplot as plt\n'), ((6036, 6053), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (6046, 6053), True, 'import matplotlib.pyplot as plt\n'), ((6156, 6174), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6172, 6174), True, 'import matplotlib.pyplot as plt\n'), ((6176, 6186), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6184, 6186), True, 'import matplotlib.pyplot as plt\n'), ((6216, 6236), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (6227, 6236), True, 'import matplotlib.pyplot as plt\n'), ((6808, 6824), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""TK"""'], {}), "('TK')\n", (6818, 6824), True, 'import matplotlib.pyplot as plt\n'), ((6826, 6847), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# Total"""'], {}), "('# Total')\n", (6836, 6847), True, 'import matplotlib.pyplot as plt\n'), ((6849, 6883), 'matplotlib.pyplot.xticks', 'plt.xticks', (['range_total_copy[0, :]'], {}), '(range_total_copy[0, :])\n', (6859, 6883), True, 'import matplotlib.pyplot as plt\n'), ((7016, 7026), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (7024, 7026), True, 'import matplotlib.pyplot as plt\n'), ((7028, 7046), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7044, 7046), True, 'import matplotlib.pyplot as plt\n'), ((7048, 7058), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7056, 7058), True, 'import matplotlib.pyplot as plt\n'), ((620, 663), 'matplotlib.font_manager.fontManager.addfont', 'font_manager.fontManager.addfont', (['font_file'], {}), '(font_file)\n', (652, 663), False, 'from matplotlib import font_manager\n'), ((1315, 1331), 'numpy.log', 'np.log', (['FRO_list'], {}), '(FRO_list)\n', (1321, 1331), True, 'import numpy as np\n'), ((1454, 1463), 'numpy.exp', 'np.exp', (['a'], {}), '(a)\n', (1460, 1463), True, 'import numpy as np\n'), ((1464, 1487), 'numpy.exp', 'np.exp', (['(k * TK_list_new)'], {}), '(k * TK_list_new)\n', (1470, 1487), True, 'import numpy as np\n'), ((1510, 1530), 'numpy.log', 'np.log', (['FRO_list_new'], {}), '(FRO_list_new)\n', (1516, 1530), True, 'import numpy as np\n'), ((4302, 4315), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (4312, 4315), False, 'import csv\n'), ((5256, 5287), 'numpy.argmax', 'np.argmax', (['res_total[colidx, :]'], {}), '(res_total[colidx, :])\n', (5265, 5287), True, 'import numpy as np\n'), ((6428, 6446), 'numpy.array', 'np.array', (['tmp_list'], {}), '(tmp_list)\n', (6436, 6446), True, 'import numpy as np\n'), ((814, 843), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""'}), "(f, delimiter='\\t')\n", (824, 843), False, 'import csv\n'), ((1365, 1381), 'numpy.log', 'np.log', (['FRO_list'], {}), '(FRO_list)\n', (1371, 1381), True, 'import numpy as np\n'), ((1414, 1430), 'numpy.log', 'np.log', (['FRO_list'], {}), '(FRO_list)\n', (1420, 1430), True, 'import numpy as np\n'), ((1903, 1926), 'numpy.exp', 'np.exp', (['(TK_list * 0.043)'], {}), '(TK_list * 0.043)\n', (1909, 1926), True, 'import numpy as np\n'), ((2287, 2317), 'numpy.true_divide', 'np.true_divide', (['sample', 'rofreq'], {}), '(sample, rofreq)\n', (2301, 2317), True, 'import numpy as np\n'), ((2905, 2928), 'numpy.power', 'np.power', (['(2)', 'stop_final'], {}), '(2, stop_final)\n', (2913, 2928), True, 'import numpy as np\n'), ((2981, 3021), 'numpy.true_divide', 'np.true_divide', (['stop_final_cycle', 'rofreq'], {}), '(stop_final_cycle, rofreq)\n', (2995, 3021), True, 'import numpy as np\n'), ((3851, 3880), 'numpy.arange', 'np.arange', (['total_s', '(total + 1)'], {}), '(total_s, total + 1)\n', (3860, 3880), True, 'import numpy as np\n'), ((4734, 4743), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4741, 4743), True, 'import matplotlib.pyplot as plt\n'), ((5622, 5631), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5629, 5631), True, 'import matplotlib.pyplot as plt\n'), ((6601, 6610), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6608, 6610), True, 'import matplotlib.pyplot as plt\n'), ((1047, 1065), 'numpy.array', 'np.array', (['raw_data'], {}), '(raw_data)\n', (1055, 1065), True, 'import numpy as np\n'), ((5895, 5916), 'numpy.shape', 'np.shape', (['range_total'], {}), '(range_total)\n', (5903, 5916), True, 'import numpy as np\n'), ((1631, 1640), 'numpy.exp', 'np.exp', (['a'], {}), '(a)\n', (1637, 1640), True, 'import numpy as np\n'), ((1654, 1663), 'numpy.exp', 'np.exp', (['a'], {}), '(a)\n', (1660, 1663), True, 'import numpy as np\n'), ((5921, 5942), 'numpy.shape', 'np.shape', (['range_total'], {}), '(range_total)\n', (5929, 5942), True, 'import numpy as np\n')] |
import random
import cv2
cv2.setNumThreads(0)
import imgaug as ia
import numpy as np
import torch
from PIL import Image
from trains import Task
from imgaug import augmenters as iaa
from torchvision.transforms import functional as F
from torchvision.transforms import transforms
def get_transform(train, image_size):
transforms = [Resize(size=(image_size, image_size)),ToTensor()]
if train:
transforms= [Resize(size=(image_size, image_size)),
RandomGrayscale(p=0.05),
RandomApply([ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.15)], p=0.25),
ToTensor(),
RandomApply([AddGaussianNoise(0., 0.1)], p=0.25),
RandomHorizontalFlip(),
]
return Compose(transforms)
def get_augmentations(train, image_size):
augmentation = iaa.Sequential([
iaa.Resize({"height": image_size, "width": image_size})
])
if train:
brightness, contrast, saturation, hue, rotate, shear = get_transform_values(Task.current_task())
augmentation = iaa.Sequential([
iaa.Resize({"height": image_size, "width": image_size}),
iaa.Fliplr(0.5), # horizontal flips
# iaa.Crop(percent=(0, 0.1)), # random crops
# iaa.RemoveCBAsByOutOfImageFraction(0.6),
iaa.Sometimes(0.25, iaa.OneOf([iaa.SaltAndPepper(p=(0.0, 0.04), per_channel=0.5),
iaa.AdditiveGaussianNoise(scale=(0, 15), per_channel=0.5)])),
iaa.Sometimes(0.2, iaa.GaussianBlur(sigma=(0, 1.75))),
iaa.Sometimes(0.25, iaa.PerspectiveTransform(scale=(0.0, 0.05))),
iaa.ChannelShuffle(0.1),
iaa.LinearContrast((1-contrast, 1+contrast)), # Strengthen or weaken the contrast in each image.
iaa.Multiply((1-brightness, 1+brightness)), # Make some images brighter and some darker.
iaa.MultiplyHueAndSaturation(mul_hue=(1-hue, 1+hue), mul_saturation=(1-saturation, 1+saturation)),
# Apply affine transformations to each image.
iaa.Affine(
scale={"x": (0.8, 1.15), "y": (0.8, 1.15)},
translate_percent={"x": (-0.05, 0.05), "y": (-0.05, 0.05)},
rotate=(-rotate, rotate),
shear=(-shear, shear)),
# iaa.RemoveCBAsByOutOfImageFraction(0.6),
])
return augmentation
def get_transform_values(task):
configuration_data = task.get_model_config_dict()
configuration_data['brightness'] = configuration_data.get('brightness', 0.3)
configuration_data['contrast'] = configuration_data.get('contrast', 0.3)
configuration_data['saturation'] = configuration_data.get('saturation', 0.3)
configuration_data['hue'] = configuration_data.get('hue', 0.3)
configuration_data['rotate'] = configuration_data.get('rotate', 30)
configuration_data['shear'] = configuration_data.get('shear', 15)
for key, val in configuration_data.items():
configuration_data[key] = val
task.set_model_config(config_dict=configuration_data)
return configuration_data['brightness'], configuration_data['contrast'], configuration_data['saturation'], \
configuration_data['hue'], configuration_data['rotate'], configuration_data['shear']
def ToImgaug(image, target):
image = np.array(image)
target['boxes'] = ia.BoundingBoxesOnImage(
[ia.BoundingBox(x1=float(box[0]), y1=float(box[1]), x2=float(box[2]), y2=float(box[3]), label=label)
if len(box)>0 else [] for box, label in zip(target['boxes'], target['labels'].tolist())], shape=image.shape)
return image, target
def ImgaugToTensor(image, target):
# image = F.to_tensor(F.to_pil_image(image))
def safe_negative_frame(target):
target['labels'] = torch.zeros((0, 1), dtype=torch.int64) # torch.as_tensor([], dtype=torch.int64)
target['boxes'] = torch.zeros((0, 4), dtype=torch.float32) # torch.as_tensor([])
return target
if len(target['boxes']) == 0:
target = safe_negative_frame(target)
return image, target
else:
target['labels'] = np.array([box.label for box in
target['boxes'].remove_out_of_image().clip_out_of_image()], dtype=np.int64)
target['boxes'] = np.array([[box.x1, box.y1, box.x2, box.y2] for box in
target['boxes'].remove_out_of_image().clip_out_of_image()], dtype=np.float32)
if len(target['boxes']) == 0:
target = safe_negative_frame(target)
return image, target
class RandomGeneral:
def __call__(self, img, target):
NotImplementedError()
class RandomApply(RandomGeneral, transforms.RandomApply):
def __call__(self, img, target):
if self.p < random.random():
return img, target
for t in self.transforms:
img, target = t(img, target)
return img, target
class RandomGrayscale(RandomGeneral, transforms.RandomGrayscale):
def __call__(self, img, target):
num_output_channels = 1 if img.mode == 'L' else 3
if random.random() < self.p:
return F.to_grayscale(img, num_output_channels=num_output_channels), target
return img, target
class ColorJitter(RandomGeneral, transforms.ColorJitter):
def __call__(self, img, target):
transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
return transform(img), target
class AddGaussianNoise(object):
def __init__(self, mean=0., std=1.):
self.std = std
self.mean = mean
def __call__(self, image, target):
return (image + torch.randn(image.size()) * self.std + self.mean).clamp(0, 1), target
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
def _flip_coco_person_keypoints(kps, width):
flip_inds = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
flipped_data = kps[:, flip_inds]
flipped_data[..., 0] = width - flipped_data[..., 0]
# Maintain COCO convention that if visibility == 0, then x, y = 0
inds = flipped_data[..., 2] == 0
flipped_data[inds] = 0
return flipped_data
class ToTensor(object):
def __call__(self, image, target):
image = F.to_tensor(image)
return image, target
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target=None):
for t in self.transforms:
image, target = t(image, target)
return image, target
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
height, width = image.shape[-2:]
image = image.flip(-1)
if "boxes" in target and len(target["boxes"]) > 0:
bbox = target["boxes"]
bbox[:, [0, 2]] = width - bbox[:, [2, 0]]
target["boxes"] = bbox
if "masks" in target:
target["masks"] = target["masks"].flip(-1)
if "keypoints" in target:
keypoints = target["keypoints"]
keypoints = _flip_coco_person_keypoints(keypoints, width)
target["keypoints"] = keypoints
return image, target
class Resize(object):
"""Resize the input PIL image to given size.
If boxes is not None, resize boxes accordingly.
Args:
size: (tuple or int)
- if is tuple, resize image to the size.
- if is int, resize the shorter side to the size while maintaining the aspect ratio.
max_size: (int) when size is int, limit the image longer size to max_size.
This is essential to limit the usage of GPU memory.
random_interpolation: (bool) randomly choose a resize interpolation method.
Returns:
img: (PIL.Image) resized image.
boxes: (tensor) resized boxes.
Example:
>> img, boxes = resize(img, boxes, 600) # resize shorter side to 600
>> img, boxes = resize(img, boxes, (500,600)) # resize image size to (500,600)
>> img, _ = resize(img, None, (500,600)) # resize image only
"""
def __init__(self, size, max_size=1000, random_interpolation=False):
self.size = size
self.max_size = max_size
self.random_interpolation = random_interpolation
def __call__(self, image, target):
"""Resize the input PIL image to given size.
If boxes is not None, resize boxes accordingly.
Args:
image: (PIL.Image) image to be resized.
target: (tensor) object boxes, sized [#obj,4].
"""
w, h = image.size
if isinstance(self.size, int):
size_min = min(w, h)
size_max = max(w, h)
sw = sh = float(self.size) / size_min
if sw * size_max > self.max_size:
sw = sh = float(self.max_size) / size_max
ow = int(w * sw + 0.5)
oh = int(h * sh + 0.5)
else:
ow, oh = self.size
sw = float(ow) / w
sh = float(oh) / h
method = random.choice([
Image.BOX,
Image.NEAREST,
Image.HAMMING,
Image.BICUBIC,
Image.LANCZOS,
Image.BILINEAR]) if self.random_interpolation else Image.BILINEAR
image = image.resize((ow, oh), method)
if target is not None and "masks" in target:
resized_masks = torch.nn.functional.interpolate(
input=target["masks"][None].float(),
size=(512, 512),
mode="nearest",
)[0].type_as(target["masks"])
target["masks"] = resized_masks
if target is not None and "boxes" in target and len(target["boxes"]) > 0:
resized_boxes = target["boxes"] * torch.tensor([sw, sh, sw, sh])
target["boxes"] = resized_boxes
return image, target
#
# import cv2
# import numpy as np
# class RandomRotate(object):
# """Randomly rotates an image
#
# Bounding boxes which have an area of less than 25% in the remaining in the
# transformed image is dropped. The resolution is maintained, and the remaining
# area if any is filled by black color.
#
# Parameters
# ----------
# angle: float or tuple(float)
# if **float**, the image is rotated by a factor drawn
# randomly from a range (-`angle`, `angle`). If **tuple**,
# the `angle` is drawn randomly from values specified by the
# tuple
#
# Returns
# -------
#
# numpy.ndaaray
# Rotated image in the numpy format of shape `HxWxC`
#
# numpy.ndarray
# Tranformed bounding box co-ordinates of the format `n x 4` where n is
# number of bounding boxes and 4 represents `x1,y1,x2,y2` of the box
#
# """
#
# def __init__(self, angle=10):
# self.angle = angle
#
# if type(self.angle) == tuple:
# assert len(self.angle) == 2, "Invalid range"
#
# else:
# self.angle = (-self.angle, self.angle)
#
# def __call__(self, img, bboxes):
#
# angle = random.uniform(*self.angle)
#
# w, h = img.shape[1], img.shape[0]
# cx, cy = w // 2, h // 2
#
# img = rotate_im(img, angle)
#
# corners = get_corners(bboxes)
#
# corners = np.hstack((corners, bboxes[:, 4:]))
#
# corners[:, :8] = rotate_box(corners[:, :8], angle, cx, cy, h, w)
#
# new_bbox = get_enclosing_box(corners)
#
# scale_factor_x = img.shape[1] / w
#
# scale_factor_y = img.shape[0] / h
#
# img = cv2.resize(img, (w, h))
#
# new_bbox[:, :4] /= [scale_factor_x, scale_factor_y, scale_factor_x, scale_factor_y]
#
# bboxes = new_bbox
#
# bboxes = clip_box(bboxes, [0, 0, w, h], 0.25)
#
# return img, bboxes
#
#
# class RandomShear(object):
# """Randomly shears an image in horizontal direction
#
#
# Bounding boxes which have an area of less than 25% in the remaining in the
# transformed image is dropped. The resolution is maintained, and the remaining
# area if any is filled by black color.
#
# Parameters
# ----------
# shear_factor: float or tuple(float)
# if **float**, the image is sheared horizontally by a factor drawn
# randomly from a range (-`shear_factor`, `shear_factor`). If **tuple**,
# the `shear_factor` is drawn randomly from values specified by the
# tuple
#
# Returns
# -------
#
# numpy.ndaaray
# Sheared image in the numpy format of shape `HxWxC`
#
# numpy.ndarray
# Tranformed bounding box co-ordinates of the format `n x 4` where n is
# number of bounding boxes and 4 represents `x1,y1,x2,y2` of the box
#
# """
#
# def __init__(self, shear_factor=0.2):
# self.shear_factor = shear_factor
#
# if type(self.shear_factor) == tuple:
# assert len(self.shear_factor) == 2, "Invalid range for scaling factor"
# else:
# # self.shear_factor = (-self.shear_factor, self.shear_factor)
# self.shear_factor = (0, self.shear_factor)
#
# shear_factor = random.uniform(*self.shear_factor)
#
# def __call__(self, img, bboxes):
#
# shear_factor = random.uniform(*self.shear_factor)
#
# w, h = img.shape[1], img.shape[0]
#
# # if shear_factor < 0:
# # img, bboxes = HorizontalFlip()(img, bboxes)
#
# M = np.array([[1, abs(shear_factor), 0], [0, 1, 0]])
#
# nW = img.shape[1] + abs(shear_factor * img.shape[0])
#
# bboxes[:, [0, 2]] += ((bboxes[:, [1, 3]]) * abs(shear_factor)).astype(int)
#
# img = cv2.warpAffine(img, M, (int(nW), img.shape[0]))
#
# # if shear_factor < 0:
# # img, bboxes = HorizontalFlip()(img, bboxes)
#
# img = cv2.resize(img, (w, h))
#
# scale_factor_x = nW / w
#
# bboxes[:, :4] /= [scale_factor_x, 1, scale_factor_x, 1]
#
# return img, bboxes
#
#
# class RandomScale(object):
# """Randomly scales an image
#
#
# Bounding boxes which have an area of less than 25% in the remaining in the
# transformed image is dropped. The resolution is maintained, and the remaining
# area if any is filled by black color.
#
# Parameters
# ----------
# scale: float or tuple(float)
# if **float**, the image is scaled by a factor drawn
# randomly from a range (1 - `scale` , 1 + `scale`). If **tuple**,
# the `scale` is drawn randomly from values specified by the
# tuple
#
# Returns
# -------
#
# numpy.ndaaray
# Scaled image in the numpy format of shape `HxWxC`
#
# numpy.ndarray
# Tranformed bounding box co-ordinates of the format `n x 4` where n is
# number of bounding boxes and 4 represents `x1,y1,x2,y2` of the box
#
# """
#
# def __init__(self, scale=0.2, diff=False):
# self.scale = scale
#
# if type(self.scale) == tuple:
# assert len(self.scale) == 2, "Invalid range"
# assert self.scale[0] > -1, "Scale factor can't be less than -1"
# assert self.scale[1] > -1, "Scale factor can't be less than -1"
# else:
# assert self.scale > 0, "Please input a positive float"
# self.scale = (max(-1, -self.scale), self.scale)
#
# self.diff = diff
#
# def __call__(self, img, bboxes):
#
# # Chose a random digit to scale by
#
# img_shape = img.shape
#
# if self.diff:
# scale_x = random.uniform(*self.scale)
# scale_y = random.uniform(*self.scale)
# else:
# scale_x = random.uniform(*self.scale)
# scale_y = scale_x
#
# resize_scale_x = 1 + scale_x
# resize_scale_y = 1 + scale_y
#
# img = cv2.resize(img, None, fx=resize_scale_x, fy=resize_scale_y)
#
# bboxes[:, :4] *= [resize_scale_x, resize_scale_y, resize_scale_x, resize_scale_y]
#
# canvas = np.zeros(img_shape, dtype=np.uint8)
#
# y_lim = int(min(resize_scale_y, 1) * img_shape[0])
# x_lim = int(min(resize_scale_x, 1) * img_shape[1])
#
# canvas[:y_lim, :x_lim, :] = img[:y_lim, :x_lim, :]
#
# img = canvas
# bboxes = clip_box(bboxes, [0, 0, 1 + img_shape[1], img_shape[0]], 0.25)
#
# return img, bboxes
#
#
# def bbox_area(bbox):
# return (bbox[:, 2] - bbox[:, 0]) * (bbox[:, 3] - bbox[:, 1])
#
#
# def clip_box(bbox, clip_box, alpha):
# """Clip the bounding boxes to the borders of an image
#
# Parameters
# ----------
#
# bbox: numpy.ndarray
# Numpy array containing bounding boxes of shape `N X 4` where N is the
# number of bounding boxes and the bounding boxes are represented in the
# format `x1 y1 x2 y2`
#
# clip_box: numpy.ndarray
# An array of shape (4,) specifying the diagonal co-ordinates of the image
# The coordinates are represented in the format `x1 y1 x2 y2`
#
# alpha: float
# If the fraction of a bounding box left in the image after being clipped is
# less than `alpha` the bounding box is dropped.
#
# Returns
# -------
#
# numpy.ndarray
# Numpy array containing **clipped** bounding boxes of shape `N X 4` where N is the
# number of bounding boxes left are being clipped and the bounding boxes are represented in the
# format `x1 y1 x2 y2`
#
# """
# ar_ = (bbox_area(bbox))
# x_min = np.maximum(bbox[:, 0], clip_box[0]).reshape(-1, 1)
# y_min = np.maximum(bbox[:, 1], clip_box[1]).reshape(-1, 1)
# x_max = np.minimum(bbox[:, 2], clip_box[2]).reshape(-1, 1)
# y_max = np.minimum(bbox[:, 3], clip_box[3]).reshape(-1, 1)
#
# bbox = np.hstack((x_min, y_min, x_max, y_max, bbox[:, 4:]))
#
# delta_area = ((ar_ - bbox_area(bbox)) / ar_)
#
# mask = (delta_area < (1 - alpha)).astype(int)
#
# bbox = bbox[mask == 1, :]
#
# return bbox
#
#
# def rotate_im(image, angle):
# """Rotate the image.
#
# Rotate the image such that the rotated image is enclosed inside the tightest
# rectangle. The area not occupied by the pixels of the original image is colored
# black.
#
# Parameters
# ----------
#
# image : numpy.ndarray
# numpy image
#
# angle : float
# angle by which the image is to be rotated
#
# Returns
# -------
#
# numpy.ndarray
# Rotated Image
#
# """
# # grab the dimensions of the image and then determine the
# # centre
# (h, w) = image.shape[:2]
# (cX, cY) = (w // 2, h // 2)
#
# # grab the rotation matrix (applying the negative of the
# # angle to rotate clockwise), then grab the sine and cosine
# # (i.e., the rotation components of the matrix)
# M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)
# cos = np.abs(M[0, 0])
# sin = np.abs(M[0, 1])
#
# # compute the new bounding dimensions of the image
# nW = int((h * sin) + (w * cos))
# nH = int((h * cos) + (w * sin))
#
# # adjust the rotation matrix to take into account translation
# M[0, 2] += (nW / 2) - cX
# M[1, 2] += (nH / 2) - cY
#
# # perform the actual rotation and return the image
# image = cv2.warpAffine(image, M, (nW, nH))
#
# # image = cv2.resize(image, (w,h))
# return image
#
#
# def get_corners(bboxes):
# """Get corners of bounding boxes
#
# Parameters
# ----------
#
# bboxes: numpy.ndarray
# Numpy array containing bounding boxes of shape `N X 4` where N is the
# number of bounding boxes and the bounding boxes are represented in the
# format `x1 y1 x2 y2`
#
# returns
# -------
#
# numpy.ndarray
# Numpy array of shape `N x 8` containing N bounding boxes each described by their
# corner co-ordinates `x1 y1 x2 y2 x3 y3 x4 y4`
#
# """
# width = (bboxes[:, 2] - bboxes[:, 0]).reshape(-1, 1)
# height = (bboxes[:, 3] - bboxes[:, 1]).reshape(-1, 1)
#
# x1 = bboxes[:, 0].reshape(-1, 1)
# y1 = bboxes[:, 1].reshape(-1, 1)
#
# x2 = x1 + width
# y2 = y1
#
# x3 = x1
# y3 = y1 + height
#
# x4 = bboxes[:, 2].reshape(-1, 1)
# y4 = bboxes[:, 3].reshape(-1, 1)
#
# corners = np.hstack((x1, y1, x2, y2, x3, y3, x4, y4))
#
# return corners
#
#
# def rotate_box(corners, angle, cx, cy, h, w):
# """Rotate the bounding box.
#
#
# Parameters
# ----------
#
# corners : numpy.ndarray
# Numpy array of shape `N x 8` containing N bounding boxes each described by their
# corner co-ordinates `x1 y1 x2 y2 x3 y3 x4 y4`
#
# angle : float
# angle by which the image is to be rotated
#
# cx : int
# x coordinate of the center of image (about which the box will be rotated)
#
# cy : int
# y coordinate of the center of image (about which the box will be rotated)
#
# h : int
# height of the image
#
# w : int
# width of the image
#
# Returns
# -------
#
# numpy.ndarray
# Numpy array of shape `N x 8` containing N rotated bounding boxes each described by their
# corner co-ordinates `x1 y1 x2 y2 x3 y3 x4 y4`
# """
#
# corners = corners.reshape(-1, 2)
# corners = np.hstack((corners, np.ones((corners.shape[0], 1), dtype=type(corners[0][0]))))
#
# M = cv2.getRotationMatrix2D((cx, cy), angle, 1.0)
#
# cos = np.abs(M[0, 0])
# sin = np.abs(M[0, 1])
#
# nW = int((h * sin) + (w * cos))
# nH = int((h * cos) + (w * sin))
# # adjust the rotation matrix to take into account translation
# M[0, 2] += (nW / 2) - cx
# M[1, 2] += (nH / 2) - cy
# # Prepare the vector to be transformed
# calculated = np.dot(M, corners.T).T
#
# calculated = calculated.reshape(-1, 8)
#
# return calculated
#
#
# def get_enclosing_box(corners):
# """Get an enclosing box for ratated corners of a bounding box
#
# Parameters
# ----------
#
# corners : numpy.ndarray
# Numpy array of shape `N x 8` containing N bounding boxes each described by their
# corner co-ordinates `x1 y1 x2 y2 x3 y3 x4 y4`
#
# Returns
# -------
#
# numpy.ndarray
# Numpy array containing enclosing bounding boxes of shape `N X 4` where N is the
# number of bounding boxes and the bounding boxes are represented in the
# format `x1 y1 x2 y2`
#
# """
# x_ = corners[:, [0, 2, 4, 6]]
# y_ = corners[:, [1, 3, 5, 7]]
#
# xmin = np.min(x_, 1).reshape(-1, 1)
# ymin = np.min(y_, 1).reshape(-1, 1)
# xmax = np.max(x_, 1).reshape(-1, 1)
# ymax = np.max(y_, 1).reshape(-1, 1)
#
# final = np.hstack((xmin, ymin, xmax, ymax, corners[:, 8:]))
#
# return final | [
"imgaug.augmenters.GaussianBlur",
"numpy.array",
"imgaug.augmenters.Resize",
"imgaug.augmenters.Fliplr",
"imgaug.augmenters.ChannelShuffle",
"trains.Task.current_task",
"imgaug.augmenters.MultiplyHueAndSaturation",
"imgaug.augmenters.LinearContrast",
"random.choice",
"imgaug.augmenters.AdditiveGau... | [((25, 45), 'cv2.setNumThreads', 'cv2.setNumThreads', (['(0)'], {}), '(0)\n', (42, 45), False, 'import cv2\n'), ((3390, 3405), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3398, 3405), True, 'import numpy as np\n'), ((3854, 3892), 'torch.zeros', 'torch.zeros', (['(0, 1)'], {'dtype': 'torch.int64'}), '((0, 1), dtype=torch.int64)\n', (3865, 3892), False, 'import torch\n'), ((3960, 4000), 'torch.zeros', 'torch.zeros', (['(0, 4)'], {'dtype': 'torch.float32'}), '((0, 4), dtype=torch.float32)\n', (3971, 4000), False, 'import torch\n'), ((6421, 6439), 'torchvision.transforms.functional.to_tensor', 'F.to_tensor', (['image'], {}), '(image)\n', (6432, 6439), True, 'from torchvision.transforms import functional as F\n'), ((911, 966), 'imgaug.augmenters.Resize', 'iaa.Resize', (["{'height': image_size, 'width': image_size}"], {}), "({'height': image_size, 'width': image_size})\n", (921, 966), True, 'from imgaug import augmenters as iaa\n'), ((1072, 1091), 'trains.Task.current_task', 'Task.current_task', ([], {}), '()\n', (1089, 1091), False, 'from trains import Task\n'), ((4863, 4878), 'random.random', 'random.random', ([], {}), '()\n', (4876, 4878), False, 'import random\n'), ((5187, 5202), 'random.random', 'random.random', ([], {}), '()\n', (5200, 5202), False, 'import random\n'), ((6868, 6883), 'random.random', 'random.random', ([], {}), '()\n', (6881, 6883), False, 'import random\n'), ((9324, 9430), 'random.choice', 'random.choice', (['[Image.BOX, Image.NEAREST, Image.HAMMING, Image.BICUBIC, Image.LANCZOS,\n Image.BILINEAR]'], {}), '([Image.BOX, Image.NEAREST, Image.HAMMING, Image.BICUBIC,\n Image.LANCZOS, Image.BILINEAR])\n', (9337, 9430), False, 'import random\n'), ((1145, 1200), 'imgaug.augmenters.Resize', 'iaa.Resize', (["{'height': image_size, 'width': image_size}"], {}), "({'height': image_size, 'width': image_size})\n", (1155, 1200), True, 'from imgaug import augmenters as iaa\n'), ((1214, 1229), 'imgaug.augmenters.Fliplr', 'iaa.Fliplr', (['(0.5)'], {}), '(0.5)\n', (1224, 1229), True, 'from imgaug import augmenters as iaa\n'), ((1720, 1743), 'imgaug.augmenters.ChannelShuffle', 'iaa.ChannelShuffle', (['(0.1)'], {}), '(0.1)\n', (1738, 1743), True, 'from imgaug import augmenters as iaa\n'), ((1757, 1805), 'imgaug.augmenters.LinearContrast', 'iaa.LinearContrast', (['(1 - contrast, 1 + contrast)'], {}), '((1 - contrast, 1 + contrast))\n', (1775, 1805), True, 'from imgaug import augmenters as iaa\n'), ((1867, 1913), 'imgaug.augmenters.Multiply', 'iaa.Multiply', (['(1 - brightness, 1 + brightness)'], {}), '((1 - brightness, 1 + brightness))\n', (1879, 1913), True, 'from imgaug import augmenters as iaa\n'), ((1969, 2078), 'imgaug.augmenters.MultiplyHueAndSaturation', 'iaa.MultiplyHueAndSaturation', ([], {'mul_hue': '(1 - hue, 1 + hue)', 'mul_saturation': '(1 - saturation, 1 + saturation)'}), '(mul_hue=(1 - hue, 1 + hue), mul_saturation=(1 -\n saturation, 1 + saturation))\n', (1997, 2078), True, 'from imgaug import augmenters as iaa\n'), ((2138, 2310), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'scale': "{'x': (0.8, 1.15), 'y': (0.8, 1.15)}", 'translate_percent': "{'x': (-0.05, 0.05), 'y': (-0.05, 0.05)}", 'rotate': '(-rotate, rotate)', 'shear': '(-shear, shear)'}), "(scale={'x': (0.8, 1.15), 'y': (0.8, 1.15)}, translate_percent={\n 'x': (-0.05, 0.05), 'y': (-0.05, 0.05)}, rotate=(-rotate, rotate),\n shear=(-shear, shear))\n", (2148, 2310), True, 'from imgaug import augmenters as iaa\n'), ((5232, 5292), 'torchvision.transforms.functional.to_grayscale', 'F.to_grayscale', (['img'], {'num_output_channels': 'num_output_channels'}), '(img, num_output_channels=num_output_channels)\n', (5246, 5292), True, 'from torchvision.transforms import functional as F\n'), ((10042, 10072), 'torch.tensor', 'torch.tensor', (['[sw, sh, sw, sh]'], {}), '([sw, sh, sw, sh])\n', (10054, 10072), False, 'import torch\n'), ((1594, 1627), 'imgaug.augmenters.GaussianBlur', 'iaa.GaussianBlur', ([], {'sigma': '(0, 1.75)'}), '(sigma=(0, 1.75))\n', (1610, 1627), True, 'from imgaug import augmenters as iaa\n'), ((1662, 1705), 'imgaug.augmenters.PerspectiveTransform', 'iaa.PerspectiveTransform', ([], {'scale': '(0.0, 0.05)'}), '(scale=(0.0, 0.05))\n', (1686, 1705), True, 'from imgaug import augmenters as iaa\n'), ((1407, 1456), 'imgaug.augmenters.SaltAndPepper', 'iaa.SaltAndPepper', ([], {'p': '(0.0, 0.04)', 'per_channel': '(0.5)'}), '(p=(0.0, 0.04), per_channel=0.5)\n', (1424, 1456), True, 'from imgaug import augmenters as iaa\n'), ((1501, 1558), 'imgaug.augmenters.AdditiveGaussianNoise', 'iaa.AdditiveGaussianNoise', ([], {'scale': '(0, 15)', 'per_channel': '(0.5)'}), '(scale=(0, 15), per_channel=0.5)\n', (1526, 1558), True, 'from imgaug import augmenters as iaa\n')] |
import numpy as np
def B_to_b(B):
x_indices = [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]
y_indices = [0, 0, 3, 1, 3, 1, 3, 2, 3, 2, 3]
return np.array(B[x_indices, y_indices])
def b_to_B(b):
B = np.zeros((6, 4))
x_indices = [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]
y_indices = [0, 0, -1, 1, -1, 1, -1, 2, -1, 2, -1]
B[x_indices, y_indices] = b
return B
def Bchain_to_bchain(B_chain):
chain_length = B_chain.shape[0]
x_indices = [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]
y_indices = [0, 0, 3, 1, 3, 1, 3, 2, 3, 2, 3]
b_chain = np.zeros((chain_length, 11))
for sample in range(chain_length):
b_chain[sample] = B_chain[sample, x_indices, y_indices]
return b_chain
def Lambdachain_to_lambdachain(Lambda_chain):
chain_length = Lambda_chain.shape[0]
x_indices = [0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8]
y_indices = [0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 4, 5]
# lambda_chain = np.zeros((chain_length, 12))
# for sample in range(chain_length):
# lambda_chain[sample] = Lambda_chain[sample, x_indices, y_indices]
lambda_chain = Lambda_chain[:, x_indices, y_indices]
return lambda_chain
def Tauchain_to_tauchain(Tau_chain):
chain_length = Tau_chain.shape[0]
x_indices = [0, 0, 1, 1, 2, 2]
y_indices = [0, 1, 2, 3, 4, 5]
# tau_chain = np.zeros((chain_length, 3, 2))
# for sample in range(chain_length):
# tau_chain[sample] = Tau_chain[sample, x_indices, y_indices].reshape((3, 2))
tau_chain = Tau_chain[:, x_indices, y_indices].reshape(chain_length, 3, 2)
return tau_chain
def Tau_to_tau(Tau):
x_indices = [0, 0, 1, 1, 2, 2]
y_indices = [0, 1, 2, 3, 4, 5]
tau = Tau[x_indices, y_indices].reshape((3, 2))
return tau
def w_to_W(w):
row_indices = [0,1,1,2,2,3,3,4,4,5,5]
col_indices = [0,1,2,3,4,5,6,7,8,9,10]
N = w.shape[0]
W = np.zeros((N, 6, 11))
for n in range(N):
li = w[n, [0,0,3,1,3,1,3,2,3,2,3]]
W[n, row_indices, col_indices] = li
return W
def lambda_to_Lambda(lambda_):
Lambda = np.zeros((9, 6))
li = lambda_[[0, 1, 2, 3, 4, 5, 6, 7, 4, 5, 6, 7]]
row_indices = [0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8]
col_indices = [0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 4, 5]
Lambda[row_indices, col_indices] = li
return Lambda
def Lambda_to_lambda(Lambda):
row_indices = [0,0,1,2,3,3,4,5,6,6,7,8]
col_indices = [0,1,0,1,2,3,2,3,4,5,4,5]
return Lambda[row_indices, col_indices] | [
"numpy.array",
"numpy.zeros"
] | [((147, 180), 'numpy.array', 'np.array', (['B[x_indices, y_indices]'], {}), '(B[x_indices, y_indices])\n', (155, 180), True, 'import numpy as np\n'), ((205, 221), 'numpy.zeros', 'np.zeros', (['(6, 4)'], {}), '((6, 4))\n', (213, 221), True, 'import numpy as np\n'), ((554, 582), 'numpy.zeros', 'np.zeros', (['(chain_length, 11)'], {}), '((chain_length, 11))\n', (562, 582), True, 'import numpy as np\n'), ((1856, 1876), 'numpy.zeros', 'np.zeros', (['(N, 6, 11)'], {}), '((N, 6, 11))\n', (1864, 1876), True, 'import numpy as np\n'), ((2045, 2061), 'numpy.zeros', 'np.zeros', (['(9, 6)'], {}), '((9, 6))\n', (2053, 2061), True, 'import numpy as np\n')] |
import NeuralNetwork as NN
import numpy as np
import matplotlib.pyplot as plt
import tools
def train(path_to_datas, save_model_path):
# 读取MNIST数据集
train_datas, labels = tools.load_mnist(path_to_datas, 'train')
print("The total numbers of datas : ", len(train_datas))
train_labels = np.zeros((labels.shape[0], 10))
train_labels[np.arange(labels.shape[0]), labels.astype('int').reshape(-1)-1] = 1.0
# 设置训练所需的超参数
batch_size = 200 #100
# 训练次数
train_epochs = 50 #10
# 学习率
lr = 0.01 #0.01
decay = False
regularization = False
input_features_numbers = train_datas.shape[1]
layer_structure = [input_features_numbers, 512, 256, 128, 10]
display = True
net_name = 'nn'
# 定义我们的神经网络分类器
net = NN.MLP(name=net_name, layer_structure=layer_structure, task_model='multi', batch_size=batch_size)
# 开始训练
print("---------开始训练---------")
net.train(train_datas=train_datas, train_targets=train_labels, train_epoch=train_epochs, lr=lr, lr_decay=decay, loss='BE', regularization=regularization, display=display)
# 保存模型
net.save_model(path=save_model_path)
# 绘制网络的训练损失和精度
total_net_loss = [net.total_loss]
total_net_accuracy = [net.total_accuracy]
tools.drawDataCurve(total_net_loss, total_net_accuracy)
def test(path_to_datas, save_model_path):
# 读取xlsx文件
test_datas, all_label = tools.load_mnist(path_to_datas, 'test')
print("The total numbers of datas : ", len(test_datas))
test_labels = np.zeros((all_label.shape[0], 10))
test_labels[np.arange(all_label.shape[0]), all_label.astype('int').reshape(-1)-1] = 1.0
# 设置训练所需的超参数
batch_size = 200
input_features_numbers = test_datas.shape[1]
layer_structure = [input_features_numbers, 512, 256, 128, 10]
net_name = 'nn'
# 测试代码
print("---------测试---------")
# 载入训练好的模型
net = NN.MLP(name=net_name, layer_structure=layer_structure, task_model='multi', batch_size=batch_size, load_model=save_model_path)
# 网络进行预测
test_steps = test_datas.shape[0] // batch_size
accuracy = 0
for i in range(test_steps):
input_data = test_datas[batch_size*i : batch_size*(i+1), :].reshape(batch_size, test_datas.shape[1])
targets = test_labels[batch_size*i : batch_size*(i+1), :].reshape(batch_size, test_labels.shape[1])
pred = net(input_data)
# 计算准确率
accuracy += np.sum(np.argmax(pred,1) == np.argmax(targets,1)) / targets.shape[0]
print("网络识别的准确率 : ", accuracy / test_steps)
if __name__ == "__main__":
path_to_datas = 'mnist/'
save_model_path = 'model/'
train(path_to_datas, save_model_path)
test(path_to_datas, save_model_path)
| [
"tools.load_mnist",
"numpy.argmax",
"numpy.zeros",
"tools.drawDataCurve",
"NeuralNetwork.MLP",
"numpy.arange"
] | [((178, 218), 'tools.load_mnist', 'tools.load_mnist', (['path_to_datas', '"""train"""'], {}), "(path_to_datas, 'train')\n", (194, 218), False, 'import tools\n'), ((299, 330), 'numpy.zeros', 'np.zeros', (['(labels.shape[0], 10)'], {}), '((labels.shape[0], 10))\n', (307, 330), True, 'import numpy as np\n'), ((758, 859), 'NeuralNetwork.MLP', 'NN.MLP', ([], {'name': 'net_name', 'layer_structure': 'layer_structure', 'task_model': '"""multi"""', 'batch_size': 'batch_size'}), "(name=net_name, layer_structure=layer_structure, task_model='multi',\n batch_size=batch_size)\n", (764, 859), True, 'import NeuralNetwork as NN\n'), ((1237, 1292), 'tools.drawDataCurve', 'tools.drawDataCurve', (['total_net_loss', 'total_net_accuracy'], {}), '(total_net_loss, total_net_accuracy)\n', (1256, 1292), False, 'import tools\n'), ((1379, 1418), 'tools.load_mnist', 'tools.load_mnist', (['path_to_datas', '"""test"""'], {}), "(path_to_datas, 'test')\n", (1395, 1418), False, 'import tools\n'), ((1497, 1531), 'numpy.zeros', 'np.zeros', (['(all_label.shape[0], 10)'], {}), '((all_label.shape[0], 10))\n', (1505, 1531), True, 'import numpy as np\n'), ((1869, 1998), 'NeuralNetwork.MLP', 'NN.MLP', ([], {'name': 'net_name', 'layer_structure': 'layer_structure', 'task_model': '"""multi"""', 'batch_size': 'batch_size', 'load_model': 'save_model_path'}), "(name=net_name, layer_structure=layer_structure, task_model='multi',\n batch_size=batch_size, load_model=save_model_path)\n", (1875, 1998), True, 'import NeuralNetwork as NN\n'), ((348, 374), 'numpy.arange', 'np.arange', (['labels.shape[0]'], {}), '(labels.shape[0])\n', (357, 374), True, 'import numpy as np\n'), ((1548, 1577), 'numpy.arange', 'np.arange', (['all_label.shape[0]'], {}), '(all_label.shape[0])\n', (1557, 1577), True, 'import numpy as np\n'), ((2401, 2419), 'numpy.argmax', 'np.argmax', (['pred', '(1)'], {}), '(pred, 1)\n', (2410, 2419), True, 'import numpy as np\n'), ((2422, 2443), 'numpy.argmax', 'np.argmax', (['targets', '(1)'], {}), '(targets, 1)\n', (2431, 2443), True, 'import numpy as np\n')] |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
from dace.transformation.dataflow import MapFusion
from dace.transformation.interstate import FPGATransformSDFG
from mapfusion_test import multiple_fusions, fusion_with_transient
import numpy as np
def multiple_fusions_fpga():
sdfg = multiple_fusions.to_sdfg()
sdfg.apply_strict_transformations()
assert sdfg.apply_transformations_repeated(MapFusion) >= 2
assert sdfg.apply_transformations_repeated(FPGATransformSDFG) == 1
A = np.random.rand(10, 20).astype(np.float32)
B = np.zeros_like(A)
C = np.zeros_like(A)
out = np.zeros(shape=1, dtype=np.float32)
sdfg(A=A, B=B, C=C, out=out)
diff1 = np.linalg.norm(A * A + 1 - B)
diff2 = np.linalg.norm(A * A + 2 - C)
assert diff1 <= 1e-4
assert diff2 <= 1e-4
def fusion_with_transient_fpga():
A = np.random.rand(2, 20)
expected = A * A * 2
sdfg = fusion_with_transient.to_sdfg()
sdfg.apply_strict_transformations()
assert sdfg.apply_transformations_repeated(MapFusion) >= 2
assert sdfg.apply_transformations_repeated(FPGATransformSDFG) == 1
sdfg(A=A)
assert np.allclose(A, expected)
if __name__ == "__main__":
multiple_fusions_fpga()
fusion_with_transient_fpga()
| [
"numpy.allclose",
"numpy.random.rand",
"numpy.zeros",
"mapfusion_test.multiple_fusions.to_sdfg",
"numpy.linalg.norm",
"numpy.zeros_like",
"mapfusion_test.fusion_with_transient.to_sdfg"
] | [((316, 342), 'mapfusion_test.multiple_fusions.to_sdfg', 'multiple_fusions.to_sdfg', ([], {}), '()\n', (340, 342), False, 'from mapfusion_test import multiple_fusions, fusion_with_transient\n'), ((575, 591), 'numpy.zeros_like', 'np.zeros_like', (['A'], {}), '(A)\n', (588, 591), True, 'import numpy as np\n'), ((600, 616), 'numpy.zeros_like', 'np.zeros_like', (['A'], {}), '(A)\n', (613, 616), True, 'import numpy as np\n'), ((627, 662), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1)', 'dtype': 'np.float32'}), '(shape=1, dtype=np.float32)\n', (635, 662), True, 'import numpy as np\n'), ((708, 737), 'numpy.linalg.norm', 'np.linalg.norm', (['(A * A + 1 - B)'], {}), '(A * A + 1 - B)\n', (722, 737), True, 'import numpy as np\n'), ((750, 779), 'numpy.linalg.norm', 'np.linalg.norm', (['(A * A + 2 - C)'], {}), '(A * A + 2 - C)\n', (764, 779), True, 'import numpy as np\n'), ((874, 895), 'numpy.random.rand', 'np.random.rand', (['(2)', '(20)'], {}), '(2, 20)\n', (888, 895), True, 'import numpy as np\n'), ((932, 963), 'mapfusion_test.fusion_with_transient.to_sdfg', 'fusion_with_transient.to_sdfg', ([], {}), '()\n', (961, 963), False, 'from mapfusion_test import multiple_fusions, fusion_with_transient\n'), ((1163, 1187), 'numpy.allclose', 'np.allclose', (['A', 'expected'], {}), '(A, expected)\n', (1174, 1187), True, 'import numpy as np\n'), ((525, 547), 'numpy.random.rand', 'np.random.rand', (['(10)', '(20)'], {}), '(10, 20)\n', (539, 547), True, 'import numpy as np\n')] |
#!/usr/bin/env python
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__company__ = "Robotic Beverage Technologies Inc"
__status__ = "Development"
__date__ = "Late Updated: 2020-05-11"
__doc__ = "Class to operate at least 64 servos, 16 relays, and 32 motors at once with latency less then 100 ms"
# Useful documentation:
# https://gpiozero.readthedocs.io/en/stable/installing.html
# https://gpiozero.readthedocs.io/en/stable/
# https://gpiozero.readthedocs.io/en/stable/api_output.html
# https://gpiozero.readthedocs.io/en/stable/api_input.html
# Replacement code if GPIOzero doesn't work...
# https://www.adafruit.com/product/2348
# https://learn.adafruit.com/adafruit-dc-and-stepper-motor-hat-for-raspberry-pi/installing-software
# https://learn.adafruit.com/circuitpython-on-raspberrypi-linux/circuitpython-raspi
# https://learn.adafruit.com/adafruit-dc-and-stepper-motor-hat-for-raspberry-pi
#TODO REMOVE? import datetime
#TODO REMOVE? import time
# Allow program to pause operation and create local timestamps
from time import sleep
# Robotic Beverage Technologies code for custom data logging and terminal debugging output
from Debug import *
# Create an array of specific length to restrict resizing and appending (like Pythom list) to improve performance
import numpy as np
from numpy import ndarray, empty #Pick the one that is faster
try:
# The following imports do NOT work in a Mac oor PC dev enviroment (but are needed for Pi product)
# CircuitPython library for the DC & Stepper Motor Pi Hat kits using I2C interface
from adafruit_motorkit import MotorKit
# Allow asynchrous event to occur in parallel and pause threads as needed
# Might work on Windows in the future https://github.com/vibora-io/vibora/issues/126
from signal import pause
# Allow control of input devices such as Buttons
from gpiozero import Button
# Allow control of output devices such as Motors, Servos, LEDs, and Relays
from gpiozero import Motor, Servo, LED, Energenie, OutputDevice
# Check status of network / new device IP addresses and Pi hardware
from gpiozero import PingServer, pi_info
# Useful pin status tools and math tools
from gpiozero.tools import all_values, negated, sin_values
# Useful for controlling devices based on date and time
from gpiozero import TimeOfDay
except ImportError:
#TODO DO LOW LEVEL PIN CONTROL THAT WORKS EVER WHERE? http://wiringpi.com/the-gpio-utility/
ImportDebugObject = Debug(True, "Actuator.py")
Debug.Dprint(ImportDebugObject, "WARNING: You are running code on Mac or PC (NOT a Raspberry Pi 4), thus hardware control is not possible.")
class Actuator:
# Class attributes that can be accessed using ActuatorControl.X (not actuatorcontrol.X)
MAX_NUM_OF_SERVOS = 0 # Circular servos
MAX_NUM_OF_MOTORS = 12 # Circular stepper or brushless DC motors
MAX_NUM_OF_LINEAR_ACT = 5 # Linear actuators
N_A = 0 # Not Applicable
# Circular & linear actuator direction CONSTANTS
CCW = -1 # Counter-Clockwise
CW = 1 # Clockwise
LINEAR_IN = CCW # Towardsbase of linear actuator
LINEAR_OUT = CW # Away from base of linear
SERVO_SLACK = 0.2 # Positional accuaracy slack for servo so that control system does not go crazy
FORWARD = 1
BACKWARD = -1
# Pin value CONSTANTS
LOW = 0
HIGH = 1
# Wire value CONTSTANTS
# Raspberry Pi 4 Pin Layout https://pinout.xyz/pinout/pin1_3v3_power
NO_PIN = -1 #TODO This constant may not be needed :)
NO_WIRE = 0
VCC_3_3V = 1
VCC_3_3V_NAME = "BOARD1" # 3.3 Volts @ upto 0.050 Amps = 0.165 Watts https://pinout.xyz/pinout/pin1_3v3_power
VCC_5V = 2
VCC_5V_NAME = "BOARD2" # 5 Volts @ upto ~1.5 Amps (Power Adapter - Pi usgae) = 7.5 Watts https://pinout.xyz/pinout/pin2_5v_power
GND = "BOARD6&9&14&20&25&30&34&39" # Digital Ground (0 Volts) https://pinout.xyz/pinout/ground
# Negative to NOT confuse it with Pi BOARD 12 https://pinout.xyz/pinout/pin12_gpio18
HIGH_PWR_5V = 5 # 5.00 Volts @ upto 5.0 Amps = 25.0 Watts to power Pi, force / load cell sensor and servos
HIGH_PWR_12V = 12 # 12.0 Volts @ upto 5.0 Amps = 70.0 Watts to power linear actuators
HIGH_PWR_36V = 36 # TODO (30 or 36) Volts @ upto 5 Amps = 150 Watts to power Stepper Motors
# wires are on the actuator side of hardwrae schematic. While pins are on the CPU side, but often have similar names
wires = [NO_WIRE, NO_WIRE, NO_WIRE, NO_WIRE, NO_WIRE, NO_WIRE, NO_WIRE]
def __init__(self, aType, actuatorID, pins, partNumber, direction):
"""
Constructor to initialize an Actutator object, which can be a Servo(), Motor(), or Relay()
Key arguments:
self - Newly created object
aType - Single String character to select type of actuator to create (S=Servo, M=Motor, R=Relay)
actuatorID - Interger CONSTANT defined in Driver.py to enable quick array searches
pins - Array to document wires / pins being used by Raspberry Pi to control an actuator
partNumber - Vendor part number string variable (e.g. Seamuing MG996R)
direction - Set counter-clockwise (CCW) / Linear IN or clockwise (CW) / Linear OUT as the forward direction
Return value:
Newly created Actuator object
"""
self.DebugObject = Debug(True)
self.actuatorID = actuatorID
self.actuatorType = aType
numOfWires = len(pins)
wires = np.empty(numOfWires, dtype=object) # TODO wires = ndarray((len(pins),),int) OR wires = [None] * len(pins) # Create an array on same length as pins[?, ?, ?]
for i in range(numOfWires):
#TODO REMOVE print("PIN: " + repr(i))
self.wires[i] = pins[i]
self.partNumber = partNumber
self.forwardDirection = direction
# https://gist.github.com/johnwargo/ea5edc8516b24e0658784ae116628277
# https://gpiozero.readthedocs.io/en/stable/api_output.html
# https://stackoverflow.com/questions/14301967/bare-asterisk-in-function-arguments/14302007#14302007
if(aType == "S"):
# The last wire in array is the PWM control pin
self.actuatorObject = Servo.AngularServo(wires[len(wires)-1])
#TODO If above DOES NOT WORK: self.actuatorType = Servo(wires[0], initial_value=0, min_pulse_width=1/1000, max_pulse_width=2/1000, frame_width=20/1000, pin_factory=None)
elif(aType == "M"):
# The last two wires in array are the INPUT control pins
self.actuatorObject = Motor(wires[len(wires)-2], wires[len(wires)-1])
#TODO If above DOES NOT WORK: self.actuatorType = Motor(wires[0], wires[1], pwm=true, pin_factory=None)
elif(aType == "R"):
# The last wire in array is the relay control pin
self.actuatorObject = OutputDevice(wires[len(wires)-1])
#TODO If above DOES NOT WORK: self.actuatorObject = gpiozero.OutputDevice(wired[0], active_high=False, initial_value=False)
else:
Debug.Dprint(self.DebugObject, "INVALID Actutator Type in __init__ method, please use S, M, or R string as first parameter to Actuator() Object")
def Run(self, duration, newPosition, speed, direction):
#TODO https://www.google.com/search?q=pass+object+to+python+function&rlz=1C1GCEA_enUS892US892&oq=pass+object+to+python+function&aqs=chrome..69i57.5686j0j7&sourceid=chrome&ie=UTF-8
#TODO https://stackoverflow.com/questions/20725699/how-do-i-pass-instance-of-an-object-as-an-argument-in-a-function-in-python
"""
Run an actuator for a given number of milliseconds to a given position at percentage of max speed in FORWARD or BACKWARDS direction
self - Instance of object being called
duration - Time actuator is in motion, for Servo() objects this can be used to control speed of movement
newPosition - New position between -1 and 1 that actuator should move to
speed - Speed at which actuator moves at, for Servo() objects this parameter is NOT used
direction - Set counter-clockwise (CCW or LINEAR_IN) or clockwise (CW or LINEAR_OUT) as the forward direction
return NOTHING
"""
Debug.Dprint(DebugObject, "Actuator.py Run() function started!")
if(type == "S"):
currentPosition = Servo.value()
if(currentPosition < (newPosition - Actuator.SERVO_SLACK)):
actuatorObject.max() #TODO THIS MAY NOT STOP AND GO ALL THE WAY TO MAX POS
elif(currentPosition > (newPosition - Actuator.SERVO_SLACK)):
actuatorObject.min() #TODO THIS MAY NOT STOP AND GO ALL THE WAY TO MIN POS
else:
# NEAR to new position DO NOTHING
Servo.dettach()
elif(type == "M"):
Debug.Dprint(DebugObject, "Write motor control code")
Motor.enable()
currentPosition = actuatorObject.value
while(currentPosition != newPosition):
if(actuatorObject.forwardDirection == Actuator.CW):
Motor.forward(speed)
else:
Motor.reverse(speed)
currentPosition = actuatorObject.value
sleep(duration) #TODO signal.pause(duration)
Motor.disable()
elif(type == "R"):
relay.on()
sleep(duration) #TODO signal.pause(duration)
relay.off()
else:
Debug.Dprint(DebugObject, "INVALID Actutator Type sent to Run method, please use S, M, R as first parameter to Actuator() Object")
Debug.Dprint(DebugObject, "Run function completed!")
def setAngularPosition(self, newAngle):
"""
Set the rotational position of a AngularServo() or Motor() object
self - Instance of object being called
newAngle - Rotational angle to set actuator to, more exact for Servo() objects then Motor() object
return NOTHING
"""
if(self.actuatorType == "S"):
self.angle = newAngle
elif(self.actuatorType == "M"):
Debug.Dprint(self.DebugObject, "THIS CODE IS GOING TO BE HARD")
#TODO Possible global variable with dead recoking needed
elif(self.actuatorType == "R"):
Debug.Dprint(self.DebugObject, "Relays do not have rotational positions. Are you sure you called the correct object?")
else:
Debug.Dprint(DebugObject, "INVALID Actutator Type sent to SetAngularPosition method, please use S, M, R as first parameter to Actuator() Object")
def getPosition(self):
"""
Read the linear or rotational positon on an actuator
Return value:
The position of actuator, with value between -1.0 and 1.0 inclusively
"""
if(self.actuatorType == "S"):
print("TODO")
#TODO return self.value
def isActive(self):
"""
Determine if actuator is moving
Return value:
TRUE if actuator is powered on and moving, FALSE otherwise
"""
return self.isActive
def setAngle(self, angle):
print("TODO")
if __name__ == "__main__":
try:
UnitTest()
relay = gpiozero.OutputDevice(8) #BCM-8
relay.on()
time.sleep(20) # seconds or milliseconds?
relay.off()
except NameError:
DebugObject = Debug(True, "Actuator.py")
Debug.Dprint(DebugObject, "WARNING: IDIOT! You are running code on Mac or PC (NOT a Raspberry Pi 4), thus hardware control is not possible.")
print("END ACTUATOR.PY MAIN")
def UnitTest():
pins = [HIGH_PWR_12V, GND, I2C_SDA, I2C_SCL]
coconutLiftingLinearMotor1 = Actuator("L", pins, "PA-07-12-5V", Actuator.LINEAR_OUT)
coconutLiftingLinearMotor2 = Actuator("L", pins, "PA-07-12-5V", Actuator.LINEAR_OUT)
coconutLiftingLinearMotor1.Run(Actuator.N_A, 1, Actuator.N_A, Actuator.FORWARD)
coconutLiftingLinearMotor2.Run(Actuator.N_A, 1, Actuator.N_A, Actuator.FORWARD)
| [
"gpiozero.Motor.reverse",
"gpiozero.Motor.enable",
"time.sleep",
"numpy.empty",
"gpiozero.Motor.forward",
"gpiozero.Servo.dettach",
"gpiozero.Servo.value",
"gpiozero.Motor.disable"
] | [((5488, 5522), 'numpy.empty', 'np.empty', (['numOfWires'], {'dtype': 'object'}), '(numOfWires, dtype=object)\n', (5496, 5522), True, 'import numpy as np\n'), ((8218, 8231), 'gpiozero.Servo.value', 'Servo.value', ([], {}), '()\n', (8229, 8231), False, 'from gpiozero import Motor, Servo, LED, Energenie, OutputDevice\n'), ((8767, 8781), 'gpiozero.Motor.enable', 'Motor.enable', ([], {}), '()\n', (8779, 8781), False, 'from gpiozero import Motor, Servo, LED, Energenie, OutputDevice\n'), ((9124, 9139), 'time.sleep', 'sleep', (['duration'], {}), '(duration)\n', (9129, 9139), False, 'from time import sleep\n'), ((9184, 9199), 'gpiozero.Motor.disable', 'Motor.disable', ([], {}), '()\n', (9197, 9199), False, 'from gpiozero import Motor, Servo, LED, Energenie, OutputDevice\n'), ((8646, 8661), 'gpiozero.Servo.dettach', 'Servo.dettach', ([], {}), '()\n', (8659, 8661), False, 'from gpiozero import Motor, Servo, LED, Energenie, OutputDevice\n'), ((9263, 9278), 'time.sleep', 'sleep', (['duration'], {}), '(duration)\n', (9268, 9278), False, 'from time import sleep\n'), ((8972, 8992), 'gpiozero.Motor.forward', 'Motor.forward', (['speed'], {}), '(speed)\n', (8985, 8992), False, 'from gpiozero import Motor, Servo, LED, Energenie, OutputDevice\n'), ((9035, 9055), 'gpiozero.Motor.reverse', 'Motor.reverse', (['speed'], {}), '(speed)\n', (9048, 9055), False, 'from gpiozero import Motor, Servo, LED, Energenie, OutputDevice\n')] |
from __future__ import print_function
import math
import numpy
import theano
import itertools
from theano import tensor, Op
from theano.gradient import disconnected_type
from fuel.utils import do_not_pickle_attributes
from picklable_itertools.extras import equizip
from collections import defaultdict, deque
from toposort import toposort_flatten
from lvsr.error_rate import (
reward_matrix, gain_matrix, edit_distance, _edit_distance_matrix, _bleu)
class RewardOp(Op):
__props__ = ()
def __init__(self, eos_label, alphabet_size):
"""Computes matrices of rewards and gains."""
self.eos_label = eos_label
self.alphabet_size = alphabet_size
def perform(self, node, inputs, output_storage):
groundtruth, recognized = inputs
if (groundtruth.ndim != 2 or recognized.ndim != 2
or groundtruth.shape[1] != recognized.shape[1]):
raise ValueError
batch_size = groundtruth.shape[1]
all_rewards = numpy.zeros(
recognized.shape + (self.alphabet_size,), dtype='int64')
all_gains = numpy.zeros(
recognized.shape + (self.alphabet_size,), dtype='int64')
alphabet = list(range(self.alphabet_size))
for index in range(batch_size):
y = list(groundtruth[:, index])
y_hat = list(recognized[:, index])
try:
eos_pos = y.index(self.eos_label)
y = y[:eos_pos + 1]
except:
# Sometimes groundtruth is in fact also a prediction
# and in this case it might not have EOS label
pass
if self.eos_label in y_hat:
y_hat_eos_pos = y_hat.index(self.eos_label)
y_hat_trunc = y_hat[:y_hat_eos_pos + 1]
else:
y_hat_trunc = y_hat
rewards_trunc = reward_matrix(
y, y_hat_trunc, alphabet, self.eos_label)
# pass freshly computed rewards to gain_matrix to speed things up
# a bit
gains_trunc = gain_matrix(y, y_hat_trunc, alphabet,
given_reward_matrix=rewards_trunc)
gains = numpy.ones((len(y_hat), len(alphabet))) * -1000
gains[:(gains_trunc.shape[0] - 1), :] = gains_trunc[:-1, :]
rewards = numpy.ones((len(y_hat), len(alphabet))) * -1
rewards[:(rewards_trunc.shape[0] - 1), :] = rewards_trunc[:-1, :]
all_rewards[:, index, :] = rewards
all_gains[:, index, :] = gains
output_storage[0][0] = all_rewards
output_storage[1][0] = all_gains
def grad(self, *args, **kwargs):
return disconnected_type(), disconnected_type()
def make_node(self, groundtruth, recognized):
recognized = tensor.as_tensor_variable(recognized)
groundtruth = tensor.as_tensor_variable(groundtruth)
return theano.Apply(
self, [groundtruth, recognized], [tensor.ltensor3(), tensor.ltensor3()])
def trim(y, mask):
try:
return y[:mask.index(0.)]
except ValueError:
return y
class EditDistanceOp(Op):
__props__ = ()
def __init__(self, bos_label, eos_label, deltas=False):
self.bos_label = bos_label
self.eos_label = eos_label
self.deltas = deltas
def perform(self, node, inputs, output_storage):
prediction, prediction_mask, groundtruth, groundtruth_mask = inputs
if (groundtruth.ndim != 2 or prediction.ndim != 2
or groundtruth.shape[1] != prediction.shape[1]):
raise ValueError
batch_size = groundtruth.shape[1]
results = numpy.zeros_like(prediction[:, :, None])
for index in range(batch_size):
y = trim(list(groundtruth[:, index]),
list(groundtruth_mask[:, index]))
y_hat = trim(list(prediction[:, index]),
list(prediction_mask[:, index]))
if self.deltas:
matrix = _edit_distance_matrix(
y, y_hat, special_tokens={self.bos_label, self.eos_label})
row = matrix[-1, :].copy()
results[:len(y_hat), index, 0] = row[1:] - matrix[-1, :-1]
else:
results[len(y_hat) - 1, index, 0] = edit_distance(y, y_hat)
output_storage[0][0] = results
def grad(self, *args, **kwargs):
return theano.gradient.disconnected_type()
def make_node(self, prediction, prediction_mask,
groundtruth, groundtruth_mask):
prediction = tensor.as_tensor_variable(prediction)
prediction_mask = tensor.as_tensor_variable(prediction_mask)
groundtruth = tensor.as_tensor_variable(groundtruth)
groundtruth_mask = tensor.as_tensor_variable(groundtruth_mask)
return theano.Apply(
self, [prediction, prediction_mask,
groundtruth, groundtruth_mask], [tensor.ltensor3()])
class BleuOp(Op):
__props__ = ()
def __init__(self, bos_label, eos_label, deltas=False):
self.n = 4
self.deltas = deltas
self.special_tokens = set([bos_label, eos_label])
def grad(self, *args, **kwargs):
return [theano.gradient.disconnected_type()] * 4
def perform(self, node, inputs, output_storage):
prediction, prediction_mask, groundtruth, groundtruth_mask = inputs
if (groundtruth.ndim != 2 or prediction.ndim != 2
or groundtruth.shape[1] != prediction.shape[1]):
raise ValueError
batch_size = groundtruth.shape[1]
results = numpy.zeros_like(prediction[:, :, None]).astype('float32')
for index in range(batch_size):
y = trim(list(groundtruth[:, index]),
list(groundtruth_mask[:, index]))
y_no_special = [token for token in y
if token not in self.special_tokens]
y_hat = trim(list(prediction[:, index]),
list(prediction_mask[:, index]))
y_hat_no_special = [token for token in y_hat
if token not in self.special_tokens]
blues, _, _, _ = _bleu(y_no_special, y_hat_no_special, self.n)
reward = blues[:, self.n - 1].copy()
if self.deltas:
reward[1:] = reward[1:] - reward[:-1]
pos = -1
for i in range(len(y_hat)):
if y_hat[i] not in self.special_tokens:
pos = pos + 1
results[i, index, 0] = reward[pos]
else:
results[i, index, 0] = 0.
elif len(reward):
results[len(y_hat) - 1, index, 0] = reward[-1]
output_storage[0][0] = results
def make_node(self, prediction, prediction_mask,
groundtruth, groundtruth_mask):
prediction = tensor.as_tensor_variable(prediction)
prediction_mask = tensor.as_tensor_variable(prediction_mask)
groundtruth = tensor.as_tensor_variable(groundtruth)
groundtruth_mask = tensor.as_tensor_variable(groundtruth_mask)
return theano.Apply(
self,
[prediction, prediction_mask,
groundtruth, groundtruth_mask],
[tensor.tensor3()])
| [
"lvsr.error_rate.edit_distance",
"theano.tensor.ltensor3",
"lvsr.error_rate._bleu",
"theano.tensor.tensor3",
"theano.gradient.disconnected_type",
"lvsr.error_rate._edit_distance_matrix",
"numpy.zeros",
"theano.tensor.as_tensor_variable",
"lvsr.error_rate.reward_matrix",
"numpy.zeros_like",
"lvsr... | [((990, 1058), 'numpy.zeros', 'numpy.zeros', (['(recognized.shape + (self.alphabet_size,))'], {'dtype': '"""int64"""'}), "(recognized.shape + (self.alphabet_size,), dtype='int64')\n", (1001, 1058), False, 'import numpy\n'), ((1092, 1160), 'numpy.zeros', 'numpy.zeros', (['(recognized.shape + (self.alphabet_size,))'], {'dtype': '"""int64"""'}), "(recognized.shape + (self.alphabet_size,), dtype='int64')\n", (1103, 1160), False, 'import numpy\n'), ((2805, 2842), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['recognized'], {}), '(recognized)\n', (2830, 2842), False, 'from theano import tensor, Op\n'), ((2865, 2903), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['groundtruth'], {}), '(groundtruth)\n', (2890, 2903), False, 'from theano import tensor, Op\n'), ((3672, 3712), 'numpy.zeros_like', 'numpy.zeros_like', (['prediction[:, :, None]'], {}), '(prediction[:, :, None])\n', (3688, 3712), False, 'import numpy\n'), ((4430, 4465), 'theano.gradient.disconnected_type', 'theano.gradient.disconnected_type', ([], {}), '()\n', (4463, 4465), False, 'import theano\n'), ((4591, 4628), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['prediction'], {}), '(prediction)\n', (4616, 4628), False, 'from theano import tensor, Op\n'), ((4655, 4697), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['prediction_mask'], {}), '(prediction_mask)\n', (4680, 4697), False, 'from theano import tensor, Op\n'), ((4720, 4758), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['groundtruth'], {}), '(groundtruth)\n', (4745, 4758), False, 'from theano import tensor, Op\n'), ((4786, 4829), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['groundtruth_mask'], {}), '(groundtruth_mask)\n', (4811, 4829), False, 'from theano import tensor, Op\n'), ((6947, 6984), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['prediction'], {}), '(prediction)\n', (6972, 6984), False, 'from theano import tensor, Op\n'), ((7011, 7053), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['prediction_mask'], {}), '(prediction_mask)\n', (7036, 7053), False, 'from theano import tensor, Op\n'), ((7076, 7114), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['groundtruth'], {}), '(groundtruth)\n', (7101, 7114), False, 'from theano import tensor, Op\n'), ((7142, 7185), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['groundtruth_mask'], {}), '(groundtruth_mask)\n', (7167, 7185), False, 'from theano import tensor, Op\n'), ((1870, 1925), 'lvsr.error_rate.reward_matrix', 'reward_matrix', (['y', 'y_hat_trunc', 'alphabet', 'self.eos_label'], {}), '(y, y_hat_trunc, alphabet, self.eos_label)\n', (1883, 1925), False, 'from lvsr.error_rate import reward_matrix, gain_matrix, edit_distance, _edit_distance_matrix, _bleu\n'), ((2067, 2139), 'lvsr.error_rate.gain_matrix', 'gain_matrix', (['y', 'y_hat_trunc', 'alphabet'], {'given_reward_matrix': 'rewards_trunc'}), '(y, y_hat_trunc, alphabet, given_reward_matrix=rewards_trunc)\n', (2078, 2139), False, 'from lvsr.error_rate import reward_matrix, gain_matrix, edit_distance, _edit_distance_matrix, _bleu\n'), ((2692, 2711), 'theano.gradient.disconnected_type', 'disconnected_type', ([], {}), '()\n', (2709, 2711), False, 'from theano.gradient import disconnected_type\n'), ((2713, 2732), 'theano.gradient.disconnected_type', 'disconnected_type', ([], {}), '()\n', (2730, 2732), False, 'from theano.gradient import disconnected_type\n'), ((6211, 6256), 'lvsr.error_rate._bleu', '_bleu', (['y_no_special', 'y_hat_no_special', 'self.n'], {}), '(y_no_special, y_hat_no_special, self.n)\n', (6216, 6256), False, 'from lvsr.error_rate import reward_matrix, gain_matrix, edit_distance, _edit_distance_matrix, _bleu\n'), ((2979, 2996), 'theano.tensor.ltensor3', 'tensor.ltensor3', ([], {}), '()\n', (2994, 2996), False, 'from theano import tensor, Op\n'), ((2998, 3015), 'theano.tensor.ltensor3', 'tensor.ltensor3', ([], {}), '()\n', (3013, 3015), False, 'from theano import tensor, Op\n'), ((4022, 4107), 'lvsr.error_rate._edit_distance_matrix', '_edit_distance_matrix', (['y', 'y_hat'], {'special_tokens': '{self.bos_label, self.eos_label}'}), '(y, y_hat, special_tokens={self.bos_label, self.eos_label}\n )\n', (4043, 4107), False, 'from lvsr.error_rate import reward_matrix, gain_matrix, edit_distance, _edit_distance_matrix, _bleu\n'), ((4313, 4336), 'lvsr.error_rate.edit_distance', 'edit_distance', (['y', 'y_hat'], {}), '(y, y_hat)\n', (4326, 4336), False, 'from lvsr.error_rate import reward_matrix, gain_matrix, edit_distance, _edit_distance_matrix, _bleu\n'), ((4959, 4976), 'theano.tensor.ltensor3', 'tensor.ltensor3', ([], {}), '()\n', (4974, 4976), False, 'from theano import tensor, Op\n'), ((5243, 5278), 'theano.gradient.disconnected_type', 'theano.gradient.disconnected_type', ([], {}), '()\n', (5276, 5278), False, 'import theano\n'), ((5627, 5667), 'numpy.zeros_like', 'numpy.zeros_like', (['prediction[:, :, None]'], {}), '(prediction[:, :, None])\n', (5643, 5667), False, 'import numpy\n'), ((7333, 7349), 'theano.tensor.tensor3', 'tensor.tensor3', ([], {}), '()\n', (7347, 7349), False, 'from theano import tensor, Op\n')] |
import sys
from torch.utils.data import Dataset, DataLoader
import os
import os.path as osp
import glob
import numpy as np
import random
import cv2
import pickle as pkl
import json
import h5py
import torch
import matplotlib.pyplot as plt
from lib.utils.misc import process_dataset_for_video
class SurrealDataset(Dataset):
def __init__(self, config, is_train=True):
self.is_train = is_train
self.frame_interval = config.DATA.FRAME_INTERVAL
# randomization will lead to inferior performance
# since diff will only be used when training
self.data_path = config.DATA.TRAIN_PATH if self.is_train else config.DATA.VALID_PATH
self.use_same_norm_2d = config.DATA.USE_SAME_NORM_2D
self.use_same_norm_3d = config.DATA.USE_SAME_NORM_2D
self.seed_set = False
self.head_root_distance = 1 / config.TRAIN.CAMERA_SKELETON_DISTANCE
# whether to use dataset adapted from k[MaÌ]inetics
self.use_gt = config.USE_GT
self.min_diff_dist = config.DATA.MIN_DIFF_DIST
self.bound_azim = config.TRAIN.BOUND_AZIM # y axis rotation
self.bound_elev = config.TRAIN.BOUND_ELEV
self._load_data_set()
def get_seqnames(self):
return self.sequence_names
def _load_data_set(self):
# self.v3d_2d_to_ours = np.arange(17)
if self.is_train:
print('start loading surreal {} data.'.format("train" if self.is_train else "test"))
key = "original_joint_2d_gt" if self.use_gt else "joint_2d_pre"
assert self.use_gt
fp = h5py.File(self.data_path, "r")
self.kp2ds = np.array(fp[key])
self.kp2ds[:, :, 0] = (self.kp2ds[:, :, 0] - 160.0) / 160.0
self.kp2ds[:, :, 1] = (self.kp2ds[:, :, 1] - 160.0) / 160.0
# locate root at the origin
# self.kp2ds[:, 12] = (self.kp2ds[:, 8] + self.kp2ds[:, 9]) / 2
self.kp2ds = self.kp2ds - self.kp2ds[:, 13:14]
self.kp2ds[:, 13] = 1e-5
# imagenames will be used to sample frames
self.imagenames = [name.decode() for name in fp['imagename'][:]]
if 'seqname' not in fp.keys():
fp.close()
print("Process corresponding dataset...")
process_dataset_for_video(self.data_path, is_surreal=True)
fp = h5py.File(self.data_path, "r")
self.sequence_lens = np.array(fp['seqlen'])
self.sequence_names = [name.decode() for name in fp['seqname'][:]]
self.indices_in_seq = np.array(fp['index_in_seq'])
# normlize again so that the mean distance of head and root is 1/c
if not self.use_same_norm_2d:
factor_gt = self.head_root_distance / (np.tile(np.linalg.norm(self.kp2ds[:, -1] - self.kp2ds[:, 13], axis=1).reshape(-1, 1, 1), (1, 17, 2)) + 1e-8)
else:
factor_gt = self.head_root_distance / np.linalg.norm(self.kp2ds[:, -1] - self.kp2ds[:, 13], axis=1).mean()
self.kp2ds = self.kp2ds * factor_gt
self.kp3ds = np.array(fp['joint_3d_gt'])
# self.kp3ds[:, 12] = (self.kp3ds[:, 8] + self.kp3ds[:, 9]) / 2
factor_3d = np.linalg.norm(self.kp3ds[:, -1] - self.kp3ds[:, 13], axis=1).mean()
factor_filename = "../data/surreal_{}_factor_3d.pkl".format("train" if self.is_train else "test")
if not self.use_same_norm_3d and not osp.exists(factor_filename):
factor_3d = (np.tile(np.linalg.norm(self.kp3ds[:, -1] - self.kp3ds[:, 13], axis=1).reshape(-1, 1, 1), (1, 17, 3)) + 1e-8)
with open(factor_filename, "wb") as f:
pkl.dump(factor_3d, f)
fp.close()
print('finished load surreal {} data, total {} samples'.format("train" if self.is_train else "test", \
self.kp2ds.shape[0]))
# get random diff1
self.diff_indices = []
for index in range(self.kp2ds.shape[0]):
index_in_seq = self.indices_in_seq[index]
seq_len = self.sequence_lens[index]
if seq_len == 1:
diff1_index = index
elif index_in_seq + self.frame_interval < seq_len:
diff_index = index + self.frame_interval
else:
diff_index = index - self.frame_interval
self.diff_indices.append(diff_index)
# generate the rotation factors
num_examples = self.kp2ds.shape[0]
np.random.seed(2019)
rotation_y = (2 * np.random.random_sample((num_examples, 1)) - 1) * self.bound_azim
rotation_x = (2 * np.random.random_sample((num_examples, 1)) - 1) * self.bound_elev
rotation_z = (2 * np.random.random_sample((num_examples, 1)) - 1) * self.bound_elev / 2
rotation_1 = np.concatenate((rotation_y, rotation_x, rotation_z), axis=1)
rotation_2 = rotation_1.copy()
rotation_2[:, 0] = rotation_2[:, 0] + np.pi
self.rotation = np.concatenate((rotation_1, rotation_2), axis=0)
np.random.shuffle(self.rotation)
self.rotation = torch.from_numpy(self.rotation).float()
self.kp2ds = torch.from_numpy(self.kp2ds).float()
self.kp3ds = torch.from_numpy(self.kp3ds).float()
def __len__(self):
return self.kp2ds.shape[0]
def __getitem__(self, index):
if not self.seed_set:
self.seed_set = True
random.seed(index)
np.random.seed(index)
seq_len = self.sequence_lens[index]
index_in_seq = self.indices_in_seq[index]
kps_3d = self.kp3ds[index]
rot = self.rotation[index]
# index in its sequence
kps_2d = self.kp2ds[index]
kps_3d = self.kp3ds[index]
diff1 = self.kp2ds[self.diff_indices[index]]
if seq_len == 1:
diff_dist = 0
else:
diff_dist = np.random.randint(-index_in_seq, seq_len-index_in_seq)
while abs(diff_dist) < self.min_diff_dist:
diff_dist = np.random.randint(-index_in_seq, seq_len-index_in_seq)
diff2_index = index + diff_dist
diff2 = self.kp2ds[diff2_index]
# current form: F * J * 2
# we need to swap the last two axis, so that the item will be in the form J * 2 * F where
# J is the number of keypoints and F is the number of frames
# kps_2d = kps_2d.permute(1, 2, 0).contiguous()
# diff = self.diff[all_indices].permute(1, 2, 0).contiguous()
kps_2d = self.kp2ds[index]
rot = self.rotation[index]
# the flag will always be 1 when no extra data is used
# flag = self.flags[index]
# for valdiation, simply ignore scale
scale = 0
return kps_2d, kps_3d, rot, diff1, diff2, scale
| [
"os.path.exists",
"pickle.dump",
"numpy.random.random_sample",
"random.seed",
"h5py.File",
"torch.from_numpy",
"numpy.array",
"lib.utils.misc.process_dataset_for_video",
"numpy.random.randint",
"numpy.random.seed",
"numpy.concatenate",
"numpy.linalg.norm",
"numpy.random.shuffle"
] | [((1614, 1644), 'h5py.File', 'h5py.File', (['self.data_path', '"""r"""'], {}), "(self.data_path, 'r')\n", (1623, 1644), False, 'import h5py\n'), ((1667, 1684), 'numpy.array', 'np.array', (['fp[key]'], {}), '(fp[key])\n', (1675, 1684), True, 'import numpy as np\n'), ((2422, 2444), 'numpy.array', 'np.array', (["fp['seqlen']"], {}), "(fp['seqlen'])\n", (2430, 2444), True, 'import numpy as np\n'), ((2552, 2580), 'numpy.array', 'np.array', (["fp['index_in_seq']"], {}), "(fp['index_in_seq'])\n", (2560, 2580), True, 'import numpy as np\n'), ((3064, 3091), 'numpy.array', 'np.array', (["fp['joint_3d_gt']"], {}), "(fp['joint_3d_gt'])\n", (3072, 3091), True, 'import numpy as np\n'), ((4468, 4488), 'numpy.random.seed', 'np.random.seed', (['(2019)'], {}), '(2019)\n', (4482, 4488), True, 'import numpy as np\n'), ((4796, 4856), 'numpy.concatenate', 'np.concatenate', (['(rotation_y, rotation_x, rotation_z)'], {'axis': '(1)'}), '((rotation_y, rotation_x, rotation_z), axis=1)\n', (4810, 4856), True, 'import numpy as np\n'), ((4975, 5023), 'numpy.concatenate', 'np.concatenate', (['(rotation_1, rotation_2)'], {'axis': '(0)'}), '((rotation_1, rotation_2), axis=0)\n', (4989, 5023), True, 'import numpy as np\n'), ((5033, 5065), 'numpy.random.shuffle', 'np.random.shuffle', (['self.rotation'], {}), '(self.rotation)\n', (5050, 5065), True, 'import numpy as np\n'), ((2284, 2342), 'lib.utils.misc.process_dataset_for_video', 'process_dataset_for_video', (['self.data_path'], {'is_surreal': '(True)'}), '(self.data_path, is_surreal=True)\n', (2309, 2342), False, 'from lib.utils.misc import process_dataset_for_video\n'), ((2361, 2391), 'h5py.File', 'h5py.File', (['self.data_path', '"""r"""'], {}), "(self.data_path, 'r')\n", (2370, 2391), False, 'import h5py\n'), ((5428, 5446), 'random.seed', 'random.seed', (['index'], {}), '(index)\n', (5439, 5446), False, 'import random\n'), ((5460, 5481), 'numpy.random.seed', 'np.random.seed', (['index'], {}), '(index)\n', (5474, 5481), True, 'import numpy as np\n'), ((5905, 5961), 'numpy.random.randint', 'np.random.randint', (['(-index_in_seq)', '(seq_len - index_in_seq)'], {}), '(-index_in_seq, seq_len - index_in_seq)\n', (5922, 5961), True, 'import numpy as np\n'), ((3186, 3247), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.kp3ds[:, -1] - self.kp3ds[:, 13])'], {'axis': '(1)'}), '(self.kp3ds[:, -1] - self.kp3ds[:, 13], axis=1)\n', (3200, 3247), True, 'import numpy as np\n'), ((3408, 3435), 'os.path.exists', 'osp.exists', (['factor_filename'], {}), '(factor_filename)\n', (3418, 3435), True, 'import os.path as osp\n'), ((3641, 3663), 'pickle.dump', 'pkl.dump', (['factor_3d', 'f'], {}), '(factor_3d, f)\n', (3649, 3663), True, 'import pickle as pkl\n'), ((5091, 5122), 'torch.from_numpy', 'torch.from_numpy', (['self.rotation'], {}), '(self.rotation)\n', (5107, 5122), False, 'import torch\n'), ((5155, 5183), 'torch.from_numpy', 'torch.from_numpy', (['self.kp2ds'], {}), '(self.kp2ds)\n', (5171, 5183), False, 'import torch\n'), ((5214, 5242), 'torch.from_numpy', 'torch.from_numpy', (['self.kp3ds'], {}), '(self.kp3ds)\n', (5230, 5242), False, 'import torch\n'), ((6046, 6102), 'numpy.random.randint', 'np.random.randint', (['(-index_in_seq)', '(seq_len - index_in_seq)'], {}), '(-index_in_seq, seq_len - index_in_seq)\n', (6063, 6102), True, 'import numpy as np\n'), ((4516, 4558), 'numpy.random.random_sample', 'np.random.random_sample', (['(num_examples, 1)'], {}), '((num_examples, 1))\n', (4539, 4558), True, 'import numpy as np\n'), ((4610, 4652), 'numpy.random.random_sample', 'np.random.random_sample', (['(num_examples, 1)'], {}), '((num_examples, 1))\n', (4633, 4652), True, 'import numpy as np\n'), ((2925, 2986), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.kp2ds[:, -1] - self.kp2ds[:, 13])'], {'axis': '(1)'}), '(self.kp2ds[:, -1] - self.kp2ds[:, 13], axis=1)\n', (2939, 2986), True, 'import numpy as np\n'), ((4704, 4746), 'numpy.random.random_sample', 'np.random.random_sample', (['(num_examples, 1)'], {}), '((num_examples, 1))\n', (4727, 4746), True, 'import numpy as np\n'), ((3471, 3532), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.kp3ds[:, -1] - self.kp3ds[:, 13])'], {'axis': '(1)'}), '(self.kp3ds[:, -1] - self.kp3ds[:, 13], axis=1)\n', (3485, 3532), True, 'import numpy as np\n'), ((2758, 2819), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.kp2ds[:, -1] - self.kp2ds[:, 13])'], {'axis': '(1)'}), '(self.kp2ds[:, -1] - self.kp2ds[:, 13], axis=1)\n', (2772, 2819), True, 'import numpy as np\n')] |
import numpy as np
class BayesDiscri:
def __init__(self):
'''
:__init__: 初始化BayesDiscri类
'''
self.varipro=[] # 各个特征xk在各个类别yi下的条件概率
self.priorpro={} # 各个类别yi的先验概率
self.respro=[] # 测试集中每个样本向量属于各个类别的概率
def train(self, data, rowvar=False):
'''
:train: 使用训练集进行训练
:param data: 训练数据集矩阵,矩阵元素可以是数字或者表示特征取值的字符串,其最后一行或者最后一列为样本的类别标签,训练数据集矩阵至少有两个样本和两个特征
:type data: np.array
:param rowvar: 指定每行或者每列代表一个变量;rowvar=True指定每行作为一个变量,每列作为一个样本向量;rowvar=False指定每列作为一个变量,每行作为一个样本向量。默认值为rowvar=False
:type rowvar: bool
'''
# 1. 首先训练集矩阵统一转换为rowvar=False的情况,即每行为一个样本向量
if rowvar==True:
data=data.T
# 2. 计算各个类别yi的先验概率,最后一列为样本标签
size=np.shape(data)[0] # 样本数量
count=np.shape(data)[1] # 特征数量
dic={}
for i in range(size):
if data[i][count-1] in dic.keys():
dic[str(data[i][count-1])]+=1
else:
dic[str(data[i][count-1])]=1
for i in dic.keys():
dic[i]/=size
self.priorpro=dic
# 3. 计算各个特征xk在各个类别yi下的条件概率
for i in range(count-1):
dic={}
for k in range(size):
temp=str(data[k][i])+'|'+str(data[k][count-1]) # dic的标签形式为: 特征取值+'|'+类别标签,表示条件概率p(特征取值|类别标签)
if temp in dic.keys():
dic[temp]+=1
else:
dic[temp]=1
for k in dic.keys():
kind=k.split('|')[1] # 抽取类别标签
dic[k]/=data[:,count-1].tolist().count(kind) # 统计类别标签的数目
self.varipro.append(dic)
# print(self.priorpro)
# print(self.varipro)
return
def discriminate(self, data, rowvar=False):
'''
:discriminate: 对测试集进行分类
:param data: 测试数据集矩阵,矩阵元素可以是数字或者表示特征取值的字符串
:type data: np.array
:param rowvar: 指定每行或者每列代表一个变量;rowvar=True指定每行作为一个变量,每列作为一个样本向量;rowvar=False指定每列作为一个变量,每行作为一个样本向量。默认值为rowvar=False
:type rowvar: bool
:return: 元组(res, respro)
: res: 分类结果列表,类型为list,其中res[i]为行数或者列数下标为i(下标从0开始)的样本向量的类别标签
: respro: 样本属于各个类别的概率列表,类型为list,其中respro[i]为行数或者列数下标为i(下标从0开始)的样本向量属于各个类别的概率
: 示例: 假设有两个测试集样本,可能的一个返回值为(res,respro),其中res=['类别A','类别A'],respro=[{'类别A':0.22,'类别B':0.78}, {'类别A':0.99,'类别B':0.01}]
:rtype: tuple
'''
# 1. 首先训练集矩阵统一转换为rowvar=False的情况,即每行为一个样本向量
if rowvar==True:
data=data.T
if data.ndim==1:
data=np.array([data])
# 2. 对于各个测试集的样本向量,对类别的每一个取值yi,首先计算p(x|yi)p(yi)=p(x1|yi)*p(x2|yi)*...*p(xn|yi)p(yi),计算结果最大的一个作为分类结果
size=np.shape(data)[0]
count=np.shape(data)[1]
res=[] #分类结果
for i in range(size):
p=[]
kind=[]
for k in self.priorpro.keys():
prior=self.priorpro[k]
for m in range(count):
name=str(data[i][m])+'|'+str(k)
if name in self.varipro[m].keys():
prior*=self.varipro[m][name]
else:
prior*=0
break
p.append(prior) # 类别yi的后验概率的分子部分p(x|yi)p(yi)
kind.append(k) # 类别yi的对应标签
res.append(kind[p.index(max(p))])
add=sum(p)
p=[x/add for x in p] # 计算后验概率,因为后验概率的分母部分均相同,因此后验概率的分母部分即为各个分子部分之和,而无需重新计算
self.respro.append(dict(zip(kind,p)))
return (res,self.respro)
| [
"numpy.array",
"numpy.shape"
] | [((797, 811), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (805, 811), True, 'import numpy as np\n'), ((840, 854), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (848, 854), True, 'import numpy as np\n'), ((2711, 2727), 'numpy.array', 'np.array', (['[data]'], {}), '([data])\n', (2719, 2727), True, 'import numpy as np\n'), ((2852, 2866), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (2860, 2866), True, 'import numpy as np\n'), ((2885, 2899), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (2893, 2899), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
import six
from keras.models import Model
from keras.layers import (
Input,
Activation,
Dense,
Flatten
)
from keras.layers.convolutional import (
Conv2D,
MaxPooling2D,
AveragePooling2D
)
from keras.layers.merge import add
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras import backend as K
input_shape=np.zeros((1,34,34,3))
residual_shape=np.zeros((1,10,10,3))
ROW_AXIS=1
COL_AXIS=2
stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))#round 四舍五入到整数
shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],#这个变形操作复杂一点.论文上是用一个矩阵来做线性变换,这里面是用卷积压缩
kernel_size=(1, 1),#第一个filters保证了channel的维度不变.
strides=(stride_width, stride_height),#下面证明为什么kernal_size(1,1) strides取这个数值时候会输出residual_shape
padding="valid", #虽然具体画图比较显然,严格证明感觉还是吃力.
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001))(input)
model = shortcut((img_channels, img_rows, img_cols), nb_classes)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test),
shuffle=True,
callbacks=[lr_reducer, early_stopper, csv_logger])
| [
"numpy.zeros",
"keras.regularizers.l2"
] | [((436, 460), 'numpy.zeros', 'np.zeros', (['(1, 34, 34, 3)'], {}), '((1, 34, 34, 3))\n', (444, 460), True, 'import numpy as np\n'), ((473, 497), 'numpy.zeros', 'np.zeros', (['(1, 10, 10, 3)'], {}), '((1, 10, 10, 3))\n', (481, 497), True, 'import numpy as np\n'), ((1135, 1145), 'keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (1137, 1145), False, 'from keras.regularizers import l2\n')] |
from typing import Tuple
import numpy as np
import math
class Point2D:
def __init__(self, x_init, y_init):
self.x = x_init
self.y = y_init
def as_tuple(self):
return self.x, self.y
def as_int_tuple(self):
return int(self.x), int(self.y)
def shift(self, x, y):
self.x += x
self.y += y
def __repr__(self):
return "".join(["Point(", str(self.x), ",", str(self.y), ")"])
def distance_from(self, other_point: 'Point2D') -> float:
return math.sqrt((self.x - other_point.x) ** 2 + (self.y - other_point.y) ** 2)
def closest_point_in_list(self, *points: Tuple['Point2D']):
distances = [self.distance_from(point) for point in points]
idx = np.argmin(distances)
return points[idx], idx
def __json_encode__(self):
return self.__dict__
def __json_decode__(self, **attrs):
self.x = attrs['x']
self.y = attrs['y']
class Rectangle:
def __init__(self, upper_left: Point2D, bottom_right: Point2D) -> None:
self.upper_left = upper_left
self.bottom_right = bottom_right
self.height = bottom_right.y - upper_left.y
self.width = bottom_right.x - upper_left.x
def surface(self) -> float:
return self.height * self.width
def center(self):
return Point2D(self.upper_left.x + self.width / 2.0, self.upper_left.y + self.height / 2.0)
def contains(self, point: Point2D):
return self.upper_left.x < point.x < self.bottom_right.x and self.upper_left.y < point.y < self.bottom_right.y
def distance_from_point(self, point: Point2D):
# You can clamp to the nearest point in the rectangle, and find the distance to that point.
# The nearest point on the rectangle is given by
nearest_point_in_rect = Point2D(point.x, point.y)
if point.x < self.upper_left.x:
nearest_point_in_rect.x = self.upper_left.x
elif point.x > self.bottom_right.x:
nearest_point_in_rect.x = self.bottom_right.x
if point.y < self.upper_left.y:
nearest_point_in_rect.y = self.upper_left.y
elif point.y > self.bottom_right.y:
nearest_point_in_rect.y = self.bottom_right.y
return nearest_point_in_rect.distance_from(point)
def __json_encode__(self):
return self.__dict__
def __json_decode__(self, attrs):
self.upper_left = attrs['upper_left']
self.bottom_right = attrs['bottom_right']
self.width = attrs['width']
self.height = attrs['height']
@classmethod
def from_center_and_dimension(cls, center: Point2D, width, height):
upper_left = Point2D(center.x - width / 2.0, center.y - height / 2.0)
bottom_right = Point2D(center.x + width / 2.0, center.y + height / 2.0)
return cls(upper_left, bottom_right)
| [
"numpy.argmin",
"math.sqrt"
] | [((528, 600), 'math.sqrt', 'math.sqrt', (['((self.x - other_point.x) ** 2 + (self.y - other_point.y) ** 2)'], {}), '((self.x - other_point.x) ** 2 + (self.y - other_point.y) ** 2)\n', (537, 600), False, 'import math\n'), ((748, 768), 'numpy.argmin', 'np.argmin', (['distances'], {}), '(distances)\n', (757, 768), True, 'import numpy as np\n')] |
'''
Usage 1: python3 split_and_run.py --dataset [dataset name] --num_split [# of split] --metric [distance measure] --num_leaves [num_leaves] --num_search [num_leaves_to_search] --coarse_training_size [coarse traing sample size] --fine_training_size [fine training sample size] --threshold [threshold] --reorder [reorder size] [--split] [--eval_split]
Usage 2: python3 split_and_run.py --dataset [dataset name] --groundtruth --metric [distance measure]
'''
import sys
import numpy as np
import time
import argparse
import os
import h5py
import math
parser = argparse.ArgumentParser(description='Options')
parser.add_argument('--program', type=str, help='scann, faiss ...')
parser.add_argument('--dataset', type=str, default=None, help='sift1b, glove ...')
parser.add_argument('--num_split', type=int, default=-1, help='# of splits')
parser.add_argument('--metric', type=str, default=None, help='dot_product, squared_l2')
## Common algorithm parameters
parser.add_argument('--L', type=int, default=-1, help='# of coarse codewords')
parser.add_argument('--w', type=int, default=-1, help='# of clusters to search')
parser.add_argument('--m', type=int, default=-1, help='# of dimension chunks')
parser.add_argument('--batch', type=int, default=1, help='query batch size')
parser.add_argument('--csize', type=int, default=10000, help='query size in fast scan cache')
## ScaNN parameters
parser.add_argument('--coarse_training_size', type=int, default=250000, help='coarse training sample size')
parser.add_argument('--fine_training_size', type=int, default=100000, help='fine training sample size')
parser.add_argument('--threshold', type=float, default=0.2, help='anisotropic_quantization_threshold')
parser.add_argument('--reorder', type=int, default=-1, help='reorder size')
## Faiss parameters
parser.add_argument('--k_star', type=int, default=-1, help='# of a single finegrained codewords')
parser.add_argument('--is_gpu', action='store_true')
parser.add_argument('--opq', type=int, default=-1, help='new desired dimension after applying OPQ')
parser.add_argument('--sq', type=int, default=-1, help='desired amount of bits per component after SQ')
parser.add_argument('--flat', type=int, default=-1, help='1 if you want to perform exhaustive search')
## Annoy parameters
parser.add_argument('--n_trees', type=int, default=-1, help='# of trees')
## ScaNN & Annoy common parameters
parser.add_argument('--num_search', type=int, default=-1, help='# of searching leaves for ScaNN, # of searching datapoints for Annoy')
parser.add_argument('--topk', type=int, default=-1, help='# of final result')
## Run options
parser.add_argument('--split', action='store_true')
parser.add_argument('--eval_split', action='store_true')
parser.add_argument('--groundtruth', action='store_true')
parser.add_argument('--sweep', action='store_true')
args = parser.parse_args()
assert args.dataset != None and args.topk <= 1000
if args.split != True:
assert args.metric == "squared_l2" or args.metric == "dot_product" or args.metric=="angular"
if args.eval_split or args.sweep:
assert args.program!=None and args.metric!=None and args.num_split!=-1 and args.topk!=-1
if args.groundtruth:
import ctypes
assert args.metric!=None
if args.program=='scann':
import scann
assert args.is_gpu == False and (args.topk <= args.reorder if args.reorder!=-1 else True)
if args.sweep == False:
assert args.L!=-1 and args.w!=-1 and args.topk!=-1 and args.k_star == -1 and args.m!=-1
assert args.topk!=-1
elif args.program == "faiss":
#if os.environ.get('LD_PRELOAD') == None:
# assert False, "Please set LD_PRELOAD environment path and retry"
# export LD_PRELOAD=/opt/intel/mkl/lib/intel64/libmkl_def.so:/opt/intel/mkl/lib/intel64/libmkl_avx2.so:/opt/intel/mkl/lib/intel64/libmkl_core.so:/opt/intel/mkl/lib/intel64/libmkl_intel_lp64.so:/opt/intel/mkl/lib/intel64/libmkl_intel_thread.so:/opt/intel/lib/intel64_lin/libiomp5.so
from runfaiss import build_faiss, faiss_search, check_cached, faiss_search_flat
import math
if args.sweep == False:
assert args.L!=-1 and args.k_star!=-1 and args.w!=-1 and args.m!=-1
elif args.program == "annoy":
import annoy
if args.batch > 1:
from multiprocessing.pool import ThreadPool
assert args.topk!=-1 and args.is_gpu==False and (args.num_search!=-1 and args.n_trees!=-1 if args.sweep!=True else True)
def compute_recall(neighbors, true_neighbors):
total = 0
for gt_row, row in zip(true_neighbors, neighbors):
# print("SHAPE =", np.shape(np.intersect1d(gt_row, row)))
total += np.intersect1d(gt_row, row).shape[0]
return total / true_neighbors.size
def compute_more_recalls(neighbors, true_neighbors, target, base):
total = 0
trimmed_neighbors = neighbors[:,:base]
trimmed_gt = true_neighbors[:,:target]
num_queries, _ = np.shape(trimmed_gt)
# print("trimmed_neighbors shape =", np.shape(trimmed_neighbors))
# print("trimmed_gt shape =", np.shape(trimmed_gt))
for i in range(num_queries):
curr_neighbors_row = trimmed_neighbors[i]
curr_gt_row = trimmed_gt[i]
for curr_gt_elem in curr_gt_row:
if curr_gt_elem in curr_neighbors_row:
total += 1
return total / trimmed_gt.size
def print_more_recalls(final_neighbors, gt):
print("final_neighbors :", final_neighbors.shape)
print("gt :", gt.shape)
top1_10 = compute_more_recalls(final_neighbors, gt, 1, 10)
top1_100 = compute_more_recalls(final_neighbors, gt, 1, 100)
top10_100 = compute_more_recalls(final_neighbors, gt, 10, 100)
top1_1000 = compute_more_recalls(final_neighbors, gt, 1, 1000)
top10_1000 = compute_more_recalls(final_neighbors, gt, 10, 1000)
top100_1000 = compute_more_recalls(final_neighbors, gt, 100, 1000)
print("Recall 1@10:", top1_10)
print("Recall 1@100:", top1_100)
print("Recall 10@100:", top10_100)
print("Recall 1@1000:", top1_1000)
print("Recall 10@1000:", top10_1000)
print("Recall 100@1000:", top100_1000)
return top1_10, top1_100, top10_100, top1_1000, top10_1000, top100_1000
def ivecs_read(fname):
a = np.fromfile(fname, dtype='int32')
d = a[0]
return a.reshape(-1, d + 1)[:, 1:]
def ivecs_write(fname, m):
n, d = m.shape
dimension_arr = np.zeros((n, 1), dtype=np.int32)
dimension_arr[:, 0] = d
m = np.append(dimension_arr, m, axis=1)
m.tofile(fname)
def bvecs_mmap(fname, offset_=None, shape_=None):
if offset_!=None and shape_!=None:
x = np.memmap(fname, dtype=np.uint8, mode='r', offset=offset_*132, shape=(shape_*132))
else:
x = np.memmap(fname, dtype=np.uint8, mode='r')
d = x[:4].view('int32')[0]
return x.reshape(-1, d + 4)[:, 4:]
def bvecs_write(fname, m):
n, d = m.shape
dimension_arr = np.zeros((n, 4), dtype=np.uint8)
dimension_arr[:, 0] = d
m = np.append(dimension_arr, m, axis=1)
m.tofile(fname)
def bvecs_read(fname):
b = np.fromfile(fname, dtype=np.uint8)
d = b[:4].view('int32')[0]
return b.reshape(-1, d+4)[:, 4:]
def mmap_fvecs(fname, offset_=None, shape_=None):
if offset_!=None and shape_!=None:
x = np.memmap(fname, dtype='int32', mode='r', offset=(offset_*(D+1)*4), shape=(shape_*(D+1)))
else:
x = np.memmap(fname, dtype='int32', mode='r')
d = x[0]
return x.reshape(-1, d + 1)[:, 1:].view(np.float32)
# Use for synthetic billion dataset
def mmap_fvecs2(fname, offset_=None, shape_=None):
if offset_!=None and shape_!=None:
x = np.memmap(fname, dtype='float16', mode='r', offset=(offset_*(D+1)*2), shape=(shape_*(D+1)))
else:
x = np.memmap(fname, dtype='float16', mode='r')
d = int(x[0])
return x.reshape(-1, d + 1)[:, 1:].view(np.float16)
def fvecs_write(fname, m):
m = m.astype('float32')
n, d = m.shape
m1 = np.empty((n, d + 1), dtype='int32')
m1[:, 0] = d
m1[:, 1:] = m.view('int32')
m1.tofile(fname)
def txt_to_fvecs(fname):
txt_arr = np.loadtxt(fname)
if "_clognormal" in fname:
if "query" in fname:
fvecs_write(dataset_basedir + "clognormal1m_query.fvecs", txt_arr)
else:
fvecs_write(dataset_basedir + "clognormal1m_base.fvecs", txt_arr)
elif "_cnormal" in fname:
if "query" in fname:
fvecs_write(dataset_basedir + "cnormal1m_query.fvecs", txt_arr)
else:
fvecs_write(dataset_basedir + "cnormal1m_base.fvecs", txt_arr)
elif "_lognormal" in fname:
if "query" in fname:
fvecs_write(dataset_basedir + "lognormal1m_query.fvecs", txt_arr)
else:
fvecs_write(dataset_basedir + "lognormal1m_base.fvecs", txt_arr)
elif "_normal" in fname:
if "query" in fname:
fvecs_write(dataset_basedir + "normal1m_query.fvecs", txt_arr)
else:
fvecs_write(dataset_basedir + "normal1m_base.fvecs", txt_arr)
def read_fbin(filename, start_idx=0, chunk_size=None):
""" Read *.fbin file that contains float32 vectors
Args:
:param filename (str): path to *.fbin file
:param start_idx (int): start reading vectors from this index
:param chunk_size (int): number of vectors to read.
If None, read all vectors
Returns:
Array of float32 vectors (numpy.ndarray)
"""
with open(filename, "rb") as f:
nvecs, dim = np.fromfile(f, count=2, dtype=np.int32)
nvecs = (nvecs - start_idx) if chunk_size is None else chunk_size
arr = np.fromfile(f, count=nvecs * dim, dtype=np.float32,
offset=start_idx * 4 * dim)
return arr.reshape(nvecs, dim)
def read_ibin(filename, start_idx=0, chunk_size=None):
""" Read *.ibin file that contains int32 vectors
Args:
:param filename (str): path to *.ibin file
:param start_idx (int): start reading vectors from this index
:param chunk_size (int): number of vectors to read.
If None, read all vectors
Returns:
Array of int32 vectors (numpy.ndarray)
"""
with open(filename, "rb") as f:
nvecs, dim = np.fromfile(f, count=2, dtype=np.int32)
nvecs = (nvecs - start_idx) if chunk_size is None else chunk_size
arr = np.fromfile(f, count=nvecs * dim, dtype=np.int32,
offset=start_idx * 4 * dim)
return arr.reshape(nvecs, dim)
def write_fbin(filename, vecs):
""" Write an array of float32 vectors to *.fbin file
Args:s
:param filename (str): path to *.fbin file
:param vecs (numpy.ndarray): array of float32 vectors to write
"""
assert len(vecs.shape) == 2, "Input array must have 2 dimensions"
with open(filename, "wb") as f:
nvecs, dim = vecs.shape
f.write(struct.pack('<i', nvecs))
f.write(struct.pack('<i', dim))
vecs.astype('float32').flatten().tofile(f)
def write_ibin(filename, vecs):
""" Write an array of int32 vectors to *.ibin file
Args:
:param filename (str): path to *.ibin file
:param vecs (numpy.ndarray): array of int32 vectors to write
"""
assert len(vecs.shape) == 2, "Input array must have 2 dimensions"
with open(filename, "wb") as f:
nvecs, dim = vecs.shape
f.write(struct.pack('<i', nvecs))
f.write(struct.pack('<i', dim))
vecs.astype('int32').flatten().tofile(f)
def read_data(dataset_path, offset_=None, shape_=None, base=True):
if "sift1m" in args.dataset:
file = dataset_path + "sift_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "deep1m" in args.dataset:
file = dataset_path + "deep1m_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "deepm96" in args.dataset:
file = dataset_path + "deepm96_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "test" in args.dataset:
file = dataset_path + "test.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "clognormal1m":
file = dataset_path + "clognormal1m_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "cnormal1m":
file = dataset_path + "cnormal1m_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "lognormal1m":
file = dataset_path + "lognormal1m_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "normal1m":
file = dataset_path + "normal1m_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "clognormal1b":
file = dataset_path + "1000000000_128_clognormal.txt" if base else dataset_path
return mmap_fvecs2(file, offset_=offset_, shape_=shape_) if base else mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "cnormal1b":
file = dataset_path + "1000000000_128_cnormal.txt" if base else dataset_path
return mmap_fvecs2(file, offset_=offset_, shape_=shape_) if base else mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "lognormal1b":
file = dataset_path + "1000000000_128_lognormal.txt" if base else dataset_path
return mmap_fvecs2(file, offset_=offset_, shape_=shape_) if base else mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "normal1b":
file = dataset_path + "1000000000_128_normal.txt" if base else dataset_path
return mmap_fvecs2(file, offset_=offset_, shape_=shape_) if base else mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "music1m" in args.dataset:
# file = dataset_path + "database_music100.bin" if base else dataset_path
# return np.fromfile(file, dtype = np.float32).reshape(N, D)
file = dataset_path + "split_data/music1m_1_0" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "gist" in args.dataset:
file = dataset_path + "gist_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "sift1b" in args.dataset:
file = dataset_path+"bigann_base.bvecs" if base else dataset_path
return bvecs_mmap(file, offset_=offset_, shape_=shape_)
elif "deep1b" in args.dataset:
file = dataset_path+"deep1B_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "tti1m" in args.dataset:
file = dataset_path+"/split_data/tti1m_1_0" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "tti1b" in args.dataset:
file = dataset_path+"base.1B.fbin" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "glove" in args.dataset:
file = dataset_path+"glove-100-angular.hdf5" if base else dataset_path
if base:
dataset = h5py.File(file, "r")
dataset = np.array(dataset['train'], dtype='float32')
if args.metric == "dot_product":
dataset = dataset / np.linalg.norm(dataset, axis=1)[:, np.newaxis]
if offset_!=None and shape_!=None:
return dataset[offset_:offset_+shape_]
else:
return dataset
else:
dataset = h5py.File(dataset_path, "r")
return np.array(dataset['dataset'], dtype='float32')
else:
assert(false)
def write_split_data(split_data_path, split_data):
if "sift1b" in args.dataset:
bvecs_write(split_data_path, split_data)
elif "sift1m" in args.dataset or "gist" in args.dataset or "deep1m" in args.dataset or "deepm96" in args.dataset or "deep1b" in args.dataset or "music1m" in args.dataset or args.dataset == "clognormal1m" or args.dataset == "cnormal1m" or args.dataset == "lognormal1m" or args.dataset == "normal1m" or args.dataset == "clognormal1b" or args.dataset == "cnormal1b" or args.dataset == "lognormal1b" or args.dataset == "normal1b" or args.dataset == "test" or "tti1m" in args.dataset or "tti1b" in args.dataset:
fvecs_write(split_data_path, split_data)
elif "glove" in args.dataset:
hf = h5py.File(split_data_path, 'w')
hf.create_dataset('dataset', data=split_data)
print("Wrote to ", split_data_path, ", shape ", split_data.shape)
print("arcm::write_split_data done\n");
def write_gt_data(gt_data):
if "sift1b" in args.dataset or "sift1m" in args.dataset or "gist" in args.dataset or "deep1m" in args.dataset or "deepm96" in args.dataset or "deep1b" in args.dataset or "music1m" in args.dataset or args.dataset == "clognormal1m" or args.dataset == "cnormal1m" or args.dataset == "lognormal1m" or args.dataset == "normal1m" or args.dataset == "clognormal1b" or args.dataset == "cnormal1b" or args.dataset == "lognormal1b" or args.dataset == "normal1b" or args.dataset == "test" or "tti1m" in args.dataset or "tti1b" in args.dataset:
ivecs_write(groundtruth_path, gt_data)
elif "glove" in args.dataset:
hf = h5py.File(groundtruth_path, 'w')
hf.create_dataset('dataset', data=gt_data)
print("Wrote to ", groundtruth_path, ", shape ", gt_data.shape)
print("arcm::write_gt_data done\n");
def mmap_split(filename, N):
num_per_split = int(N/args.num_split)
sampling_rate = 0.1
for i in range(args.num_split):
split_data = read_data(dataset_basedir, offset_=num_per_split*i, shape_=num_per_split)
print("Split ", i, " / split_data.size: ", split_data.shape)
print(split_data[0])
write_split_data(split_dataset_path + str(args.num_split) + "_" + str(i), split_data)
if i==0:
trainset = np.random.choice(num_per_split, int(sampling_rate*num_per_split), replace=False)
write_split_data(split_dataset_path + "learn" + str(args.num_split) + "_" + str(i), split_data[trainset])
def split(filename, num_iter, N, D):
num_per_split = int(N/args.num_split)
dataset_per_iter = int(N/num_iter)
dataset = np.empty((0, D), dtype=np.uint8 if 'sift1b' in args.dataset else np.float32)
print("dataset_per_iter: ", dataset_per_iter, " / num_per_split: ", num_per_split)
num_split_list=[]
split = 0
sampling_rate = 0.1
for it in range(num_iter):
print("Iter: ", it)
if it==num_iter-1:
dataset = np.append(dataset, read_data(dataset_basedir, offset_=it*dataset_per_iter, shape_=(N-it*dataset_per_iter)), axis=0)
else:
dataset = np.append(dataset, read_data(dataset_basedir, offset_=it*dataset_per_iter, shape_=dataset_per_iter), axis=0)
count=0
while split<args.num_split:
if (split+1)*num_per_split > dataset_per_iter*(it+1):
if it!=num_iter-1:
print("Entering next iter.. start index ", count*num_per_split, " ", dataset.shape[0])
dataset = dataset[count*num_per_split:]
else:
split_size = dataset[count*num_per_split:].shape[0]
print("Split ", split, ": ", count*num_per_split, " ", N-1)
print(dataset[count*num_per_split])
write_split_data(split_dataset_path + str(args.num_split) + "_" + str(split), dataset[count*num_per_split:])
if "glove" in args.dataset:
trainset = np.random.choice(split_size, int(sampling_rate*split_size), replace=False)
write_split_data(split_dataset_path + args.metric + "_learn" + str(args.num_split) + "_" + str(split), dataset[count*num_per_split:][trainset])
elif "music1m" in args.dataset or "deepm96" in args.dataset or args.dataset == "clognormal1m" or args.dataset == "cnormal1m" or args.dataset == "lognormal1m" or args.dataset == "normal1m" or args.dataset == "clognormal1b" or args.dataset == "cnormal1b" or args.dataset == "lognormal1b" or args.dataset == "normal1b":
trainset = np.random.choice(split_size, int(sampling_rate*split_size), replace=False)
write_split_data(split_dataset_path + "learn" + str(args.num_split) + "_" + str(split), dataset[count*num_per_split:][trainset])
num_split_list.append(dataset[count*num_per_split:].shape[0])
split = split+1
break
elif split < args.num_split:
split_size = dataset[count*num_per_split:(count+1)*num_per_split].shape[0]
print("Split ", split, ": ", count*num_per_split, " ", (count+1)*num_per_split)
print(dataset[count*num_per_split])
write_split_data(split_dataset_path + str(args.num_split) + "_" + str(split), dataset[count*num_per_split:(count+1)*num_per_split])
if "glove" in args.dataset:
trainset = np.random.choice(split_size, int(sampling_rate*split_size), replace=False)
write_split_data(split_dataset_path + args.metric + "_learn" + str(args.num_split) + "_" + str(split), dataset[count*num_per_split:(count+1)*num_per_split][trainset])
elif "music1m" in args.dataset or "deepm96" in args.dataset or args.dataset == "clognormal1m" or args.dataset == "cnormal1m" or args.dataset == "lognormal1m" or args.dataset == "normal1m" or args.dataset == "clognormal1b" or args.dataset == "cnormal1b" or args.dataset == "lognormal1b" or args.dataset == "normal1b":
trainset = np.random.choice(split_size, int(sampling_rate*split_size), replace=False)
write_split_data(split_dataset_path + "learn" + str(args.num_split) + "_" + str(split), dataset[count*num_per_split:(count+1)*num_per_split][trainset])
num_split_list.append(dataset[count*num_per_split:(count+1)*num_per_split].shape[0])
split = split+1
count = count+1
print("num_split_lists: ", num_split_list)
print("arcm::split done\n");
def random_split(filename, num_iter, N, D):
num_per_split = int(N/args.num_split)
dataset = np.empty((0, D), dtype=np.uint8 if 'sift1b' in args.dataset else np.float32)
dataset_per_iter = int(N/num_iter)
num_per_split = int(N/args.num_split)
print("dataset_per_iter: ", dataset_per_iter, " / num_per_split: ", num_per_split)
num_split_list=[]
split = 0
sampling_rate = 0.1
import random
random.seed(100)
data_ids = list(range(N))
random.shuffle(data_ids)
# print(data_ids[0:1000])
dataset = read_data(dataset_basedir)
for split in range(args.num_split):
if split < args.num_split:
write_split_data(split_dataset_path + str(args.num_split) + "_" + str(split), dataset[data_ids[split*num_per_split:(split+1)*num_per_split]])
else:
write_split_data(split_dataset_path + str(args.num_split) + "_" + str(split), dataset[data_ids[split*num_per_split:]])
np.array(data_ids, dtype=np.uint32).tofile(remapping_file_path)
print("Wrote remapping index file to ", remapping_file_path)
def run_groundtruth():
print("Making groundtruth file")
import ctypes
groundtruth_dir = dataset_basedir + "groundtruth/"
if os.path.isdir(groundtruth_dir)!=True:
os.mkdir(groundtruth_dir)
queries = np.array(get_queries(), dtype='float32')
groundtruth = np.empty([qN, 1000], dtype=np.int32)
groundtruth_simil = np.empty([qN, 1000], dtype=np.float32)
ypp_handles = [np.ctypeslib.as_ctypes(row) for row in queries]
gpp_handles = [np.ctypeslib.as_ctypes(row) for row in groundtruth]
gspp_handles = [np.ctypeslib.as_ctypes(row) for row in groundtruth_simil]
ypp = (ctypes.POINTER(ctypes.c_float) * qN)(*ypp_handles)
gpp = (ctypes.POINTER(ctypes.c_int) * qN)(*gpp_handles)
gspp = (ctypes.POINTER(ctypes.c_float) * qN)(*gspp_handles)
if(args.num_split == -1):
dataset = read_data(dataset_basedir, base=True, offset_=0, shape_=None).astype('float32')
xpp_handles = [np.ctypeslib.as_ctypes(row) for row in dataset]
xpp = (ctypes.POINTER(ctypes.c_float) * N)(*xpp_handles)
libc = ctypes.CDLL('./groundtruth.so')
libc.compute_groundtruth.restype=None
libc.compute_groundtruth(0, N, D, qN, xpp, ypp, gpp, gspp, True if args.metric=="dot_product" else False)
write_gt_data(groundtruth)
else:
for num in range(args.num_split):
print("Working on", str(num+1), "th out of", str(args.num_split), "splits...")
num_per_split = int(N/args.num_split)
partial_split_dataset_path = split_dataset_path+str(args.num_split)+"_"+str(num)
dataset = read_data(partial_split_dataset_path, base=False, offset_=0, shape_=None).astype('float32')
xpp_handles = [np.ctypeslib.as_ctypes(row) for row in dataset]
xpp = (ctypes.POINTER(ctypes.c_float) * N)(*xpp_handles)
libc = ctypes.CDLL('./groundtruth.so')
libc.compute_groundtruth.restype=None
libc.compute_groundtruth(num, num_per_split, D, qN, xpp, ypp, gpp, gspp, True if args.metric=="dot_product" else False)
# split_gt_path = groundtruth_path
write_gt_data(groundtruth)
def sort_neighbors(distances, neighbors):
if "dot_product" == args.metric or "angular" == args.metric:
return np.take_along_axis(neighbors, np.argsort(-distances, axis=-1), -1)[:,:,:args.topk], -np.sort(-distances, axis=-1)[:,:,:args.topk]
elif "squared_l2" == args.metric:
return np.take_along_axis(neighbors, np.argsort(distances, axis=-1), -1)[:,:,:args.topk], np.sort(distances, axis=-1)[:,:,:args.topk]
# return np.take_along_axis(neighbors, np.argsort(distances, axis=-1), -1), np.sort(distances, axis=-1)
else:
assert False
def prepare_eval():
gt = get_groundtruth()
queries = get_queries()
# num_of_queries = 10000
# gt = gt[0:num_of_queries, :]
# queries = queries[0:num_of_queries, :]
# qN = num_of_queries
print("gt shape: ", np.shape(gt))
print("queries shape: ", np.shape(queries))
# print("gt: ", gt[0])
assert gt.shape[1] == 1000
return gt, queries
def print_recall(final_neighbors, gt):
print("final_neighbors :", final_neighbors.shape)
print("gt :", gt.shape)
# print("final_neighbors :", final_neighbors[44])
# print("gt :", gt[44])
top1 = compute_recall(final_neighbors[:,:1], gt[:, :1])
top10 = compute_recall(final_neighbors[:,:10], gt[:, :10])
top100 = compute_recall(final_neighbors[:,:100], gt[:, :100])
top1000 = compute_recall(final_neighbors[:,:1000], gt[:, :1000])
# top1000_10000 = compute_recall(final_neighbors[:,:10000], gt[:, :1000])
print("Recall 1@1:", top1)
print("Recall 10@10:", top10)
print("Recall 100@100:", top100)
print("Recall 1000@1000:", top1000)
# print("Recall 1000@10000:", top1000_10000)
return top1, top10, top100, top1000
def get_searcher_path(split):
searcher_dir = basedir + args.program + '_searcher_' + args.metric + '/' + args.dataset + '/Split_' + str(args.num_split) + '/'
os.makedirs(searcher_dir, exist_ok=True)
searcher_path = searcher_dir + args.dataset + '_searcher_' + str(args.num_split)+'_'+str(split)
return searcher_dir, searcher_path
def check_available_search_config(program, bc, search_config):
sc_list = list()
if program == "scann":
num_leaves, threshold, dims, metric = bc
for idx, sc in enumerate(search_config):
leaves_to_search = sc[0]
if leaves_to_search > num_leaves or (D%dims!=0 and args.sweep==True) or (metric == 'squared_l2' and (4**(D/dims) < N)):
continue
elif (D/dims) <= 4:
continue
else:
sc_list.append(idx)
elif program == "faiss":
L, m, log2kstar, metric = bc
for idx, sc in enumerate(search_config):
nprobe, args.reorder = sc[0], sc[1]
if nprobe > L or (nprobe > 2048 and args.is_gpu) or (D%m!=0 and args.sweep==True) or (m > 96 and args.is_gpu) or (not args.is_gpu and log2kstar>8) or (args.is_gpu and log2kstar != 8) or (metric == 'dot_product' and ((2**log2kstar)**m < N)) or (args.opq != -1 and args.opq%m != 0):
continue
elif m <= 4:
continue
else:
sc_list.append(idx)
else:
assert False
return sc_list
def run_scann():
gt, queries = prepare_eval()
train_dataset = get_train()
num_per_split = int(N/args.num_split)
if args.sweep:
if "sift1b" in args.dataset or "deep1b" in args.dataset or "tti1b" in args.dataset:
# For sift 1b
# build_config = [[7000, 0.55, 2, args.metric], [7000, 0.2, 4, args.metric], [7000, 0.2, 2, args.metric], [7000, 0.2, 1, args.metric], \
# [8000, 0.55, 2, args.metric], [8000, 0.2, 4, args.metric], [8000, 0.2, 2, args.metric], [8000, 0.2, 1, args.metric], \
# [6000, 0.55, 2, args.metric], [6000, 0.2, 4 , args.metric], [6000, 0.2, 2, args.metric], [6000, 0.2, 1, args.metric]]
build_config = [
[20000, 0.2, 1, args.metric],
[20000, 0.2, 2, args.metric],
[20000, 0.2, 4, args.metric]
]
# search_config = [[1, args.reorder], [16, args.reorder], [32, args.reorder], [64, args.reorder], [128, args.reorder], \
# [256, args.reorder], [320, args.reorder], [384, args.reorder], [448, args.reorder], [512, args.reorder], [576, args.reorder], [640, args.reorder], [704, args.reorder], [768, args.reorder], \
# [1024, args.reorder], [1280, args.reorder], [1536, args.reorder], [2048, args.reorder], [2560, args.reorder], [3072, args.reorder], [4096, args.reorder], [4608, args.reorder], \
# [5120, args.reorder], [5632, args.reorder], [6144, args.reorder], [6656, args.reorder], [7168, args.reorder], [7680, args.reorder], \
# [8192, args.reorder], [16384, args.reorder]]
if "tti1b" in args.dataset:
search_config = [[1, args.reorder], [2, args.reorder], [4, args.reorder], [8, args.reorder], [16, args.reorder], [32, args.reorder], [64, args.reorder], [128, args.reorder], [256, args.reorder], [512, args.reorder], [1024, args.reorder]]
else:
search_config = [[1, args.reorder], [2, args.reorder], [4, args.reorder], [8, args.reorder], [16, args.reorder], [32, args.reorder], [64, args.reorder], [128, args.reorder], [256, args.reorder]]
else:
# build_config = [
# [4000, 0.2, 1, args.metric],[4000, 0.2, 2, args.metric], [4000, 0.2, 3, args.metric], [4000, 0.2, 4, args.metric], [4000, 0.2, 5, args.metric], [4000, 0.2, 8, args.metric], [4000, 0.2, 10, args.metric], [4000, 0.2, 16, args.metric], [4000, 0.2, 25, args.metric], [4000, 0.2, 32, args.metric], [4000, 0.2, 50, args.metric], [4000, 0.2, 64, args.metric], \
# # [4000, 0.4, 1, args.metric], [4000, 0.4, 2, args.metric], [4000, 0.4, 3, args.metric], [4000, 0.4, 4, args.metric], [4000, 0.4, 5, args.metric], [4000, 0.4, 8, args.metric], [4000, 0.4, 10, args.metric], [4000, 0.4, 16, args.metric], [4000, 0.4, 25, args.metric], [4000, 0.4, 32, args.metric], [4000, 0.4, 50, args.metric], [4000, 0.4, 64, args.metric], \
# # [4000, 0.55, 1, args.metric], [4000, 0.55, 2, args.metric], [4000, 0.55, 3, args.metric], [4000, 0.55, 4, args.metric], [4000, 0.55, 5, args.metric], [4000, 0.55, 8, args.metric], [4000, 0.55, 10, args.metric], [4000, 0.55, 16, args.metric], [4000, 0.55, 25, args.metric], [4000, 0.55, 32, args.metric], [4000, 0.55, 50, args.metric], [4000, 0.55, 64, args.metric], \
# [2000, 0.2, 1, args.metric], [2000, 0.2, 2, args.metric], [2000, 0.2, 3, args.metric], [2000, 0.2, 4, args.metric], [2000, 0.2, 5, args.metric], [2000, 0.2, 8, args.metric], [2000, 0.2, 10, args.metric], [2000, 0.2, 16, args.metric], [2000, 0.2, 25, args.metric], [2000, 0.2, 32, args.metric], [2000, 0.2, 50, args.metric], [2000, 0.2, 64, args.metric], \
# # [2000, 0.4, 1, args.metric], [2000, 0.4, 2, args.metric], [2000, 0.4, 3, args.metric], [2000, 0.4, 4, args.metric], [2000, 0.4, 5, args.metric], [2000, 0.4, 8, args.metric], [2000, 0.4, 10, args.metric], [2000, 0.4, 16, args.metric], [2000, 0.4, 25, args.metric], [2000, 0.4, 32, args.metric], [2000, 0.4, 50, args.metric], [2000, 0.4, 64, args.metric], \
# # [2000, 0.55, 1, args.metric], [2000, 0.55, 2, args.metric], [2000, 0.55, 3, args.metric], [2000, 0.55, 4, args.metric], [2000, 0.55, 5, args.metric], [2000, 0.55, 8, args.metric], [2000, 0.55, 10, args.metric], [2000, 0.55, 16, args.metric], [2000, 0.55, 25, args.metric], [2000, 0.55, 32, args.metric], [2000, 0.55, 50, args.metric], [2000, 0.55, 64, args.metric], \
# # [1500, 0.2, 1, args.metric], [1500, 0.2, 2, args.metric], [1500, 0.2, 3, args.metric], [1500, 0.2, 4, args.metric], [1500, 0.2, 5, args.metric], [1500, 0.2, 8, args.metric], [1500, 0.2, 10, args.metric], [1500, 0.2, 16, args.metric], [1500, 0.2, 25, args.metric], [1500, 0.2, 32, args.metric], [1500, 0.2, 50, args.metric], [1500, 0.2, 64, args.metric], \
# # [1500, 0.4, 1, args.metric], [1500, 0.4, 2, args.metric], [1500, 0.4, 3, args.metric], [1500, 0.4, 4, args.metric], [1500, 0.4, 5, args.metric], [1500, 0.4, 8, args.metric], [1500, 0.4, 10, args.metric], [1500, 0.4, 16, args.metric], [1500, 0.4, 25, args.metric], [1500, 0.4, 32, args.metric], [1500, 0.4, 50, args.metric], [1500, 0.4, 64, args.metric], \
# # [1500, 0.55, 1, args.metric], [1500, 0.55, 2, args.metric], [1500, 0.55, 3, args.metric], [1500, 0.55, 4, args.metric], [1500, 0.55, 5, args.metric], [1500, 0.55, 8, args.metric], [1500, 0.55, 10, args.metric], [1500, 0.55, 16, args.metric], [1500, 0.55, 25, args.metric], [1500, 0.55, 32, args.metric], [1500, 0.55, 50, args.metric], [1500, 0.55, 64, args.metric], \
# [1000, 0.2, 1, args.metric], [1000, 0.2, 2, args.metric], [1000, 0.2, 3, args.metric], [1000, 0.2, 4, args.metric], [1000, 0.2, 5, args.metric], [1000, 0.2, 8, args.metric], [1000, 0.2, 10, args.metric], [1000, 0.2, 16, args.metric], [1000, 0.2, 25, args.metric], [1000, 0.2, 32, args.metric], [1000, 0.2, 50, args.metric], [1000, 0.2, 64, args.metric], \
# # [1000, 0.4, 1, args.metric], [1000, 0.4, 2, args.metric], [1000, 0.4, 3, args.metric], [1000, 0.4, 4, args.metric], [1000, 0.4, 5, args.metric], [1000, 0.4, 8, args.metric], [1000, 0.4, 10, args.metric], [1000, 0.4, 16, args.metric], [1000, 0.4, 25, args.metric], [1000, 0.4, 32, args.metric], [1000, 0.4, 50, args.metric], [1000, 0.4, 64, args.metric], \
# # [1000, 0.55, 1, args.metric], [1000, 0.55, 2, args.metric], [1000, 0.55, 3, args.metric], [1000, 0.55, 4, args.metric], [1000, 0.55, 5, args.metric], [1000, 0.55, 8, args.metric], [1000, 0.55, 10, args.metric], [1000, 0.55, 16, args.metric], [1000, 0.55, 25, args.metric], [1000, 0.55, 32, args.metric], [1000, 0.55, 50, args.metric], [1000, 0.55, 64, args.metric], \
# [800, 0.2, 1, args.metric], [800, 0.2, 2, args.metric], [800, 0.2, 3, args.metric], [800, 0.2, 4, args.metric], [800, 0.2, 5, args.metric], [800, 0.2, 8, args.metric], [800, 0.2, 10, args.metric], [800, 0.2, 16, args.metric], [800, 0.2, 25, args.metric], [800, 0.2, 32, args.metric], [800, 0.2, 50, args.metric], [800, 0.2, 64, args.metric], \
# # [800, 0.4, 1, args.metric], [800, 0.4, 2, args.metric], [800, 0.4, 3, args.metric], [800, 0.4, 4, args.metric], [800, 0.4, 5, args.metric], [800, 0.4, 8, args.metric], [800, 0.4, 10, args.metric], [800, 0.4, 16, args.metric], [800, 0.4, 25, args.metric], [800, 0.4, 32, args.metric], [800, 0.4, 50, args.metric], [800, 0.4, 64, args.metric], \
# # [800, 0.55, 1, args.metric], [800, 0.55, 2, args.metric], [800, 0.55, 3, args.metric], [800, 0.55, 4, args.metric], [800, 0.55, 5, args.metric], [800, 0.55, 8, args.metric], [800, 0.55, 10, args.metric], [800, 0.55, 16, args.metric], [800, 0.55, 25, args.metric], [800, 0.55, 32, args.metric], [800, 0.55, 50, args.metric], [800, 0.55, 64, args.metric], \
# # [600, 0.2, 1, args.metric], [600, 0.2, 2, args.metric], [600, 0.2, 4, args.metric], [600, 0.2, 5, args.metric], [600, 0.2, 8, args.metric], [600, 0.2, 10, args.metric],\
# [500, 0.2, 1, args.metric], [500, 0.2, 2, args.metric], [500, 0.2, 3, args.metric], [500, 0.2, 4, args.metric], [500, 0.2, 5, args.metric], [500, 0.2, 8, args.metric], [500, 0.2, 10, args.metric], \
# # [400, 0.2, 1, args.metric], [400, 0.2, 2, args.metric], [400, 0.2, 4, args.metric], [400, 0.2, 5, args.metric], [400, 0.2, 8, args.metric], [400, 0.2, 10, args.metric], \
# [250, 0.2, 1, args.metric], [250, 0.2, 2, args.metric], [250, 0.2, 4, args.metric], [250, 0.2, 5, args.metric], [250, 0.2, 8, args.metric], [250, 0.2, 10, args.metric]]
build_config = [
[250, 0.2, 1, args.metric],
[250, 0.2, 2, args.metric],
[250, 0.2, 4, args.metric]
]
# build_config = [
# [4000, 0.2, 1, args.metric],[4000, 0.2, 2, args.metric], [4000, 0.2, 3, args.metric], [4000, 0.2, 4, args.metric], [4000, 0.2, 5, args.metric], [4000, 0.2, 8, args.metric], [4000, 0.2, 10, args.metric], \
# [2000, 0.2, 1, args.metric], [2000, 0.2, 2, args.metric], [2000, 0.2, 3, args.metric], [2000, 0.2, 4, args.metric], [2000, 0.2, 5, args.metric], [2000, 0.2, 8, args.metric], [2000, 0.2, 10, args.metric], \
# [1000, 0.2, 1, args.metric], [1000, 0.2, 2, args.metric], [1000, 0.2, 3, args.metric], [1000, 0.2, 4, args.metric], [1000, 0.2, 5, args.metric], [1000, 0.2, 8, args.metric], [1000, 0.2, 10, args.metric],\
# [800, 0.2, 1, args.metric], [800, 0.2, 2, args.metric], [800, 0.2, 3, args.metric], [800, 0.2, 4, args.metric], [800, 0.2, 5, args.metric], [800, 0.2, 8, args.metric], [800, 0.2, 10, args.metric], \
# [500, 0.2, 1, args.metric], [500, 0.2, 2, args.metric], [500, 0.2, 3, args.metric], [500, 0.2, 4, args.metric], [500, 0.2, 5, args.metric], [500, 0.2, 8, args.metric], [500, 0.2, 10, args.metric], \
# [250, 0.2, 1, args.metric], [250, 0.2, 2, args.metric], [250, 0.2, 3, args.metric], [250, 0.2, 4, args.metric], [250, 0.2, 5, args.metric], [250, 0.2, 8, args.metric], [250, 0.2, 10, args.metric]]
# search_config = [[1, args.reorder], [2, args.reorder], [4, args.reorder], [8, args.reorder], [16, args.reorder], [25, args.reorder], [30, args.reorder], [35, args.reorder], [40, args.reorder], \
# [45, args.reorder], [50, args.reorder], [55, args.reorder], [60, args.reorder], [65, args.reorder], [75, args.reorder], [90, args.reorder], [110, args.reorder], [130, args.reorder], [150, args.reorder], \
# [170, args.reorder], [200, args.reorder], [220, args.reorder], [250, args.reorder], [310, args.reorder], [400, args.reorder], [500, args.reorder], [800, args.reorder], [1000, args.reorder], \
# [1250, args.reorder], [1500, args.reorder], [1750, args.reorder], [1900, args.reorder], [2000, args.reorder], [2048, args.reorder]]
search_config = [[1, args.reorder], [2, args.reorder], [4, args.reorder], [8, args.reorder], [16, args.reorder], [32, args.reorder], [64, args.reorder], [128, args.reorder], [160, args.reorder], [192, args.reorder], [256, args.reorder]]
# if args.dataset == "sift1m" or args.dataset == "glove":
# build_config = [
# [250, 0.2, 1, args.metric], [250, 0.2, 2, args.metric], [250, 0.2, 4, args.metric],
# [500, 0.2, 2, args.metric], [1000, 0.2, 2, args.metric], [2000, 0.2, 2, args.metric], [4000, 0.2, 2, args.metric],
# [1000, 0.2, 1, args.metric], [1000, 0.2, 4, args.metric], [1000, 0.2, 8, args.metric], [1000, 0.2, 10, args.metric],
# ]
# search_config = [
# [6, args.reorder], [12, args.reorder], [18, args.reorder], [25, args.reorder],
# [38, args.reorder], [50, args.reorder],
# [75, args.reorder], [100, args.reorder],
# [150, args.reorder], [160, args.reorder], [192, args.reorder], [200, args.reorder],
# [300, args.reorder], [400, args.reorder],
# ]
f = open(sweep_result_path, "w")
f.write("Program: " + args.program + " Topk: " + str(args.topk) + " Num_split: " + str(args.num_split)+ " Batch: "+str(args.batch)+" CSize: "+str(args.csize)+"\n")
f.write("L\tThreashold\tm\t|\tw\tr\tMetric\n")
else:
# assert D%args.m == 0
build_config = [(args.L, args.threshold, int(D/args.m), args.metric)]
search_config = [[args.w, args.reorder]]
for bc in build_config:
num_leaves, threshold, dims, metric = bc
sc_list = check_available_search_config(args.program, bc, search_config)
if len(sc_list) > 0:
neighbors=np.empty((len(sc_list), queries.shape[0],0), dtype=np.int32)
distances=np.empty((len(sc_list), queries.shape[0],0), dtype=np.float32)
total_latency = np.zeros(len(sc_list))
base_idx = 0
os.makedirs(coarse_dir, exist_ok=True)
coarse_path = coarse_dir+"coarse_codebook_L_"+str(num_leaves)+"_threshold_"+str(threshold)+"_dims_"+str(dims)+"_metric_"+metric
fine_path = scann_fine_dir+"fine_codebook_L_"+str(num_leaves)+"_threshold_"+str(threshold)+"_dims_"+str(dims)+"_metric_"+metric
for split in range(args.num_split):
# if split > 0:
# break
searcher_dir, searcher_path = get_searcher_path(split)
print("Split ", split)
# Load splitted dataset
batch_size = min(args.batch, queries.shape[0])
searcher = None
searcher_path = searcher_path + '_' + str(num_leaves) + '_' + str(threshold) + '_' + str(dims) + '_' + metric + ("_reorder" if args.reorder!=-1 else '')
if args.flat != -1:
load_coarse = True if os.path.isfile(coarse_path) else False
load_fine = True if os.path.isfile(fine_path) else False
dataset = read_data(split_dataset_path + str(args.num_split) + "_" + str(split) if args.num_split>1 else dataset_basedir, base=False if args.num_split>1 else True, offset_=None if args.num_split>1 else 0, shape_=None)
print("Setting up score_brute_force builder..")
searcher = scann.scann_ops_pybind.builder(dataset, train_dataset, load_coarse, coarse_path, load_fine, fine_path, 10, metric).score_brute_force().build()
else:
if os.path.isdir(searcher_path):
print("Loading searcher from ", searcher_path)
searcher = scann.scann_ops_pybind.load_searcher(searcher_path, num_per_split, D, coarse_path, fine_path)
else:
# Create ScaNN searcher
print("Entering ScaNN builder, will be created to ", searcher_path)
load_coarse = True if os.path.isfile(coarse_path) else False
load_fine = True if os.path.isfile(fine_path) else False
print("Load coarse: ", load_coarse, " / Load fine: ", load_fine)
dataset = read_data(split_dataset_path + str(args.num_split) + "_" + str(split) if args.num_split>1 else dataset_basedir, base=False if args.num_split>1 else True, offset_=None if args.num_split>1 else 0, shape_=None)
if args.reorder!=-1:
searcher = scann.scann_ops_pybind.builder(dataset, train_dataset, load_coarse, coarse_path, load_fine, fine_path, 10, metric).tree(
num_leaves=num_leaves, num_leaves_to_search=num_leaves, training_sample_size=args.coarse_training_size).score_ah(
dims, anisotropic_quantization_threshold=threshold, training_sample_size=args.fine_training_size).reorder(args.reorder).build()
else:
searcher = scann.scann_ops_pybind.builder(dataset, train_dataset, load_coarse, coarse_path, load_fine, fine_path, 10, metric).tree(
num_leaves=num_leaves, num_leaves_to_search=num_leaves, training_sample_size=args.coarse_training_size).score_ah(
dims, anisotropic_quantization_threshold=threshold, training_sample_size=args.fine_training_size).build()
print("Saving searcher to ", searcher_path)
os.makedirs(searcher_path, exist_ok=True)
searcher.serialize(searcher_path, coarse_path, load_coarse, fine_path, load_fine)
print("sc_list: ", sc_list)
n = list()
d = list()
for idx in range(len(sc_list)):
# if idx < len(sc_list)-2:
# continue
leaves_to_search, reorder = search_config[sc_list[idx]]
assert D%dims == 0
if args.reorder!=-1:
assert args.topk <= reorder
# else:
# if args.sweep:
# assert False, "Do you want reordering or not?"
print(str(num_leaves)+"\t"+str(threshold)+"\t"+str(int(D/dims))+"\t|\t"+str(leaves_to_search)+"\t"+str(reorder)+"\t"+str(metric)+"\n")
if args.flat != -1:
print("Start brute_force search!")
start = time.time()
local_neighbors, local_distances = searcher.search_batched_parallel(queries, leaves_to_search=leaves_to_search, pre_reorder_num_neighbors=reorder, final_num_neighbors=args.topk, batch_size=batch_size)
end = time.time()
total_latency[idx] += ( end - start ) * 1000
n.append(local_neighbors + base_idx)
d.append(local_distances)
else:
if args.batch > 1:
def search_batched(queries):
start = time.time()
local_neighbors, local_distances = searcher.search_batched_parallel(queries, leaves_to_search=leaves_to_search, pre_reorder_num_neighbors=reorder, final_num_neighbors=args.topk, batch_size=batch_size)
end = time.time()
local_distances[local_neighbors==2147483647] = math.inf if metric=="squared_l2" else -math.inf # 2147483647: maximum integer value
return (end - start, (local_neighbors, local_distances))
cstep = queries.shape[0] / args.csize
queries_list = list()
for step in range(int(cstep)):
input_queries = queries[ step * args.csize : (step + 1) * args.csize, : ]
queries_list.append(input_queries)
local_results = [search_batched(q) for q in queries_list]
total_latency[idx] += (np.sum(np.array([time for time, _ in local_results]).reshape(int(cstep), 1)))*1000
nd = [nd for _, nd in local_results]
n.append(np.vstack([n for n,d in nd])+base_idx)
d.append(np.vstack([d for n,d in nd]))
else:
# ScaNN search
def single_query(query, base_idx):
start = time.time()
local_neighbors, local_distances = searcher.search(query, leaves_to_search=leaves_to_search, pre_reorder_num_neighbors=reorder, final_num_neighbors=args.topk)
local_distances[local_neighbors==2147483647] = math.inf if metric=="squared_l2" else -math.inf # 2147483647: maximum integer value
return (time.time() - start, (local_neighbors, local_distances))
# ScaNN search
print("Entering ScaNN searcher")
local_results = [single_query(q, base_idx) for q in queries]
total_latency[idx] += (np.sum(np.array([time for time, _ in local_results]).reshape(queries.shape[0], 1)))*1000
nd = [nd for _, nd in local_results]
n.append(np.vstack([n for n,d in nd])+base_idx)
d.append(np.vstack([d for n,d in nd]))
base_idx = base_idx + num_per_split
neighbors = np.append(neighbors, np.array(n, dtype=np.int32), axis=-1)
distances = np.append(distances, np.array(d, dtype=np.float32), axis=-1)
# print("type(neighbors): ", type(neighbors))
# print("type(distances): ", type(distances))
neighbors, distances = sort_neighbors(distances, neighbors)
print("neighbors: ", neighbors.shape)
print("distances: ", distances.shape)
final_neighbors, _ = sort_neighbors(distances, neighbors)
for idx in range(len(sc_list)):
if args.sweep:
leaves_to_search, reorder = search_config[sc_list[idx]]
f.write(str(num_leaves)+"\t"+str(threshold)+"\t"+str(int(D/dims))+"\t|\t"+str(leaves_to_search)+"\t"+str(reorder)+"\t"+str(metric)+"\n")
print(str(num_leaves)+"\t"+str(threshold)+"\t"+str(int(D/dims))+"\t|\t"+str(leaves_to_search)+"\t"+str(reorder)+"\t"+str(metric)+"\n")
# if args.num_split > 1:
# top1, top10, top100, top1000 = print_recall(remap_index[final_neighbors[idx]], gt)
# else:
# top1, top10, top100, top1000 = print_recall(final_neighbors[idx], gt)
top1_10, top1_100, top10_100, top1_1000, top10_1000, top100_1000 = print_more_recalls(final_neighbors[idx], gt)
print()
top1, top10, top100, top1000 = print_recall(final_neighbors[idx], gt)
print("Top ", args.topk, " Total latency (ms): ", total_latency[idx])
print("arcm::Latency written. End of File.\n");
if args.sweep:
f.write(str(top1)+" %\t"+str(top10)+" %\t"+str(top100)+" %\t"+str(top1000)+" %\t|\t"+str(top1_10)+" %\t"+str(top1_100)+" %\t"+str(top10_100)+" %\t"+str(top1_1000)+" %\t"+str(top10_1000)+" %\t"+str(top100_1000)+" %\t"+str(total_latency[idx])+"\n")
if args.sweep:
f.close()
def faiss_pad_dataset(padded_D, dataset):
plus_dim = padded_D-D
dataset=np.concatenate((dataset, np.full((dataset.shape[0], plus_dim), 0, dtype='float32')), axis=-1)
print("Dataset dimension is padded from ", D, " to ", dataset.shape[1])
return dataset
def faiss_pad_trains_queries(padded_D, queries, train_dataset):
plus_dim = padded_D-D
queries = np.concatenate((queries, np.full((queries.shape[0], plus_dim), 0)), axis=-1)
train_dataset = np.concatenate((train_dataset, np.full((train_dataset.shape[0], plus_dim), 0)), axis=-1)
print("Query and Train Dataset dimension is padded from ", D, " to ", queries.shape[1])
return queries, train_dataset
def get_padded_info(m):
if (args.is_gpu and (m==1 or m==2 or m==3 or m==4 or m==8 or m==12 or m==16 or m==20 or m==24 or m==28 or m==32 or m==40 or m==48 or m==56 or m==64 or m==96)) or (not args.is_gpu) or (args.opq != -1):
return D, m, False
else:
dim_per_block = int(D/m)
if m<8: # 4<m<8
faiss_m = 8
elif m<12:
faiss_m = 12
elif m<16:
faiss_m = 16
elif m<20:
faiss_m = 20
elif m<24:
faiss_m = 24
elif m<28:
faiss_m = 28
elif m<32:
faiss_m = 32
elif m<40:
faiss_m = 40
elif m<48:
faiss_m = 48
elif m<56:
faiss_m = 56
elif m<64:
faiss_m = 64
elif m<96:
faiss_m = 96
else:
assert False, "somethings wrong.."
padded_D = dim_per_block * faiss_m
return padded_D, faiss_m, True
def run_faiss(D):
gt, queries = prepare_eval()
train_dataset = get_train()
if args.sweep:
if args.is_gpu:
log2kstar_ = 8
if "sift1b" in args.dataset or "deep1b" in args.dataset or "tti1b" in args.dataset:
build_config = [
[10000, int(D/2), log2kstar_, args.metric],
[10000, int(D/4), log2kstar_, args.metric],
[10000, int(D/8), log2kstar_, args.metric]
]
# build_config = [
# [8000, D, log2kstar_, args.metric], [8000, int(D/2), log2kstar_, args.metric], [8000, int(D/4), log2kstar_, args.metric], [8000, int(D/8), log2kstar_, args.metric], \
# # [7000, int(D/2), log2kstar_, args.metric], [7000, int(D/4), log2kstar_, args.metric], [7000, int(D/8), log2kstar_, args.metric], \
# [6000, D, log2kstar_, args.metric], [6000, int(D/2), log2kstar_, args.metric], [6000, int(D/4), log2kstar_, args.metric], [6000, int(D/8), log2kstar_, args.metric], \
# # [5000, D, log2kstar_, args.metric], [5000, int(D/2), log2kstar_, args.metric], \
# [4000, D, log2kstar_, args.metric], [4000, int(D/2), log2kstar_, args.metric], [4000, int(D/4), log2kstar_, args.metric], [4000, int(D/8), log2kstar_, args.metric], \
# # [3500, D, log2kstar_, args.metric], [3500, int(D/2), log2kstar_, args.metric], \
# ]
if "tti1b" in args.dataset:
search_config = [[1, args.reorder], [2, args.reorder], [4, args.reorder], [8, args.reorder], [16, args.reorder], [32, args.reorder], [64, args.reorder], [128, args.reorder]]
else:
search_config = [[1, args.reorder], [2, args.reorder], [4, args.reorder], [8, args.reorder], [16, args.reorder], [32, args.reorder], [64, args.reorder], [128, args.reorder], [256, args.reorder]]
else:
# build_config = [[800, int(D/64), log2kstar_, args.metric], [800, int(D/50), log2kstar_, args.metric], [800, int(D/32), log2kstar_, args.metric], [800, int(D/25), log2kstar_, args.metric], [800, int(D/16), log2kstar_, args.metric], [800, int(D/10), log2kstar_, args.metric], [800, int(D/8), log2kstar_, args.metric], [800, int(D/5), log2kstar_, args.metric], [800, int(D/4), log2kstar_, args.metric], [800, int(D/3), log2kstar_, args.metric], [800, int(D/2), log2kstar_, args.metric], [800, D, log2kstar_, args.metric], \
# [1000, int(D/64), log2kstar_, args.metric], [1000, int(D/50), log2kstar_, args.metric], [1000, int(D/32), log2kstar_, args.metric], [1000, int(D/25), log2kstar_, args.metric], [1000, int(D/16), log2kstar_, args.metric], [1000, int(D/10), log2kstar_, args.metric], [1000, int(D/8), log2kstar_, args.metric], [1000, int(D/5), log2kstar_, args.metric], [1000, int(D/4), log2kstar_, args.metric], [1000, int(D/3), log2kstar_, args.metric], [1000, int(D/2), log2kstar_, args.metric], [1000, D, log2kstar_, args.metric], \
# [1500, int(D/64), log2kstar_, args.metric], [1500, int(D/50), log2kstar_, args.metric], [1500, int(D/32), log2kstar_, args.metric], [1500, int(D/25), log2kstar_, args.metric], [1500, int(D/16), log2kstar_, args.metric], [1500, int(D/10), log2kstar_, args.metric], [1500, int(D/8), log2kstar_, args.metric], [1500, int(D/5), log2kstar_, args.metric], [1500, int(D/4), log2kstar_, args.metric], [1500, int(D/3), log2kstar_, args.metric], [1500, int(D/2), log2kstar_, args.metric], [1500, D, log2kstar_, args.metric], \
# [2000, int(D/64), log2kstar_, args.metric], [2000, int(D/50), log2kstar_, args.metric], [2000, int(D/32), log2kstar_, args.metric], [2000, int(D/25), log2kstar_, args.metric], [2000, int(D/16), log2kstar_, args.metric], [2000, int(D/10), log2kstar_, args.metric], [2000, int(D/8), log2kstar_, args.metric], [2000, int(D/5), log2kstar_, args.metric], [2000, int(D/4), log2kstar_, args.metric], [2000, int(D/3), log2kstar_, args.metric], [2000, int(D/2), log2kstar_, args.metric], [2000, D, log2kstar_, args.metric], \
# [4000, int(D/64), log2kstar_, args.metric], [4000, int(D/50), log2kstar_, args.metric], [4000, int(D/32), log2kstar_, args.metric], [4000, int(D/25), log2kstar_, args.metric], [4000, int(D/16), log2kstar_, args.metric], [4000, int(D/10), log2kstar_, args.metric], [4000, int(D/8), log2kstar_, args.metric], [4000, int(D/5), log2kstar_, args.metric], [4000, int(D/4), log2kstar_, args.metric], [4000, int(D/3), log2kstar_, args.metric], [4000, int(D/2), log2kstar_, args.metric], [4000, D, log2kstar_, args.metric], \
# [600, int(D/25), log2kstar_, args.metric], [600, int(D/16), log2kstar_, args.metric], [600, int(D/10), log2kstar_, args.metric], [600, int(D/8), log2kstar_, args.metric], [600, int(D/5), log2kstar_, args.metric], [600, int(D/4), log2kstar_, args.metric], [600, int(D/3), log2kstar_, args.metric], [600, int(D/2), log2kstar_, args.metric], [600, D, log2kstar_, args.metric], \
# [500, int(D/25), log2kstar_, args.metric], [500, int(D/16), log2kstar_, args.metric], [500, int(D/10), log2kstar_, args.metric], [500, int(D/8), log2kstar_, args.metric], [500, int(D/5), log2kstar_, args.metric], [500, int(D/4), log2kstar_, args.metric], [500, int(D/3), log2kstar_, args.metric], [500, int(D/2), log2kstar_, args.metric], [500, D, log2kstar_, args.metric], \
# [400, int(D/25), log2kstar_, args.metric], [400, int(D/16), log2kstar_, args.metric], [400, int(D/10), log2kstar_, args.metric], [400, int(D/8), log2kstar_, args.metric], [400, int(D/5), log2kstar_, args.metric], [400, int(D/4), log2kstar_, args.metric], [400, int(D/3), log2kstar_, args.metric], [400, int(D/2), log2kstar_, args.metric], [400, D, log2kstar_, args.metric]]
build_config = [
[250, int(D/2), log2kstar_, args.metric],
[250, int(D/4), log2kstar_, args.metric],
[250, int(D/8), log2kstar_, args.metric],
[250, int(D/10), log2kstar_, args.metric]
]
# build_config = [
# [4000, int(D/8), log2kstar_, args.metric], [4000, int(D/5), log2kstar_, args.metric], [4000, int(D/4), log2kstar_, args.metric], [4000, int(D/3), log2kstar_, args.metric], [4000, int(D/2), log2kstar_, args.metric], [4000, D, log2kstar_, args.metric], \
# [2000, int(D/8), log2kstar_, args.metric], [2000, int(D/5), log2kstar_, args.metric], [2000, int(D/4), log2kstar_, args.metric], [2000, int(D/3), log2kstar_, args.metric], [2000, int(D/2), log2kstar_, args.metric], [2000, D, log2kstar_, args.metric], \
# [1000, int(D/8), log2kstar_, args.metric], [1000, int(D/5), log2kstar_, args.metric], [1000, int(D/4), log2kstar_, args.metric], [1000, int(D/3), log2kstar_, args.metric], [1000, int(D/2), log2kstar_, args.metric], [1000, D, log2kstar_, args.metric], \
# [800, int(D/8), log2kstar_, args.metric], [800, int(D/5), log2kstar_, args.metric], [800, int(D/4), log2kstar_, args.metric], [800, int(D/3), log2kstar_, args.metric], [800, int(D/2), log2kstar_, args.metric], [800, D, log2kstar_, args.metric], \
# [500, int(D/8), log2kstar_, args.metric], [500, int(D/5), log2kstar_, args.metric], [500, int(D/4), log2kstar_, args.metric], [500, int(D/3), log2kstar_, args.metric], [500, int(D/2), log2kstar_, args.metric], [500, D, log2kstar_, args.metric], \
# [250, int(D/10), log2kstar_, args.metric], [250, int(D/8), log2kstar_, args.metric], [250, int(D/4), log2kstar_, args.metric], [250, int(D/3), log2kstar_, args.metric], [250, int(D/2), log2kstar_, args.metric], [250, D, log2kstar_, args.metric], \
# ]
# search_config = [[1, args.reorder], [16, args.reorder], [32, args.reorder], [64, args.reorder], [128, args.reorder], \
# [256, args.reorder], [320, args.reorder], [384, args.reorder], [448, args.reorder], [512, args.reorder], [576, args.reorder], [640, args.reorder], [704, args.reorder], [768, args.reorder], \
# [1024, args.reorder], [1280, args.reorder], [1536, args.reorder], [2048, args.reorder], [2560, args.reorder], [3072, args.reorder], [4096, args.reorder], [4608, args.reorder], \
# [5120, args.reorder], [5632, args.reorder], [6144, args.reorder], [6656, args.reorder], [7168, args.reorder], [7680, args.reorder], \
# [8192, args.reorder], [16384, args.reorder]]
# search_config = [[1, args.reorder], [2, args.reorder], [4, args.reorder], [8, args.reorder], [16, args.reorder], [32, args.reorder], [64, args.reorder], [128, args.reorder], [256, args.reorder]]
search_config = [[1, args.reorder], [2, args.reorder], [4, args.reorder], [8, args.reorder], [16, args.reorder], [32, args.reorder], [64, args.reorder], [128, args.reorder], [160, args.reorder], [192, args.reorder], [256, args.reorder]]
else:
if "gist" in args.dataset:
build_config = [[1000, int(D/2), 8, args.metric], [1000, int(D/3), 8, args.metric], [1000, int(D/4), 8, args.metric]] # L, m, log2(k*), metric
elif "sift1b" in args.dataset or "deep1b" in args.dataset or "tti1b" in args.dataset:
# build_config = [[7000, D, 4, args.metric], [7000, int(D/2), 4, args.metric], [7000, int(D/4), 4, args.metric], \
# [7000, D, 6, args.metric], [7000, int(D/2), 6, args.metric], [7000, int(D/4), 6, args.metric], \
# [7000, D, 8, args.metric], [7000, int(D/2), 8, args.metric], [7000, int(D/4), 8, args.metric], \
# [8000, D, 4, args.metric], [8000, int(D/2), 4, args.metric], [8000, int(D/4), 4, args.metric], \
# [8000, D, 6, args.metric], [8000, int(D/2), 6, args.metric], [8000, int(D/4), 6, args.metric], \
# [8000, D, 8, args.metric], [8000, int(D/2), 8, args.metric], [8000, int(D/4), 8, args.metric], \
# [6000, D, 4, args.metric], [6000, int(D/2), 4, args.metric], [6000, int(D/4), 4, args.metric], \
# [6000, D, 6, args.metric], [6000, int(D/2), 6, args.metric], [6000, int(D/4), 6, args.metric], \
# [6000, D, 8, args.metric], [6000, int(D/2), 8, args.metric], [6000, int(D/4), 8, args.metric]]
build_config = [
[40000, D, 4, args.metric],
[40000, int(D/2), 4, args.metric],
[40000, int(D/4), 4, args.metric],
[40000, int(D/2), 8, args.metric],
[40000, int(D/4), 8, args.metric],
[40000, int(D/8), 8, args.metric]
]
# build_config = [
# [8000, D, 8, args.metric], [8000, int(D/2), 8, args.metric], [8000, int(D/4), 8, args.metric], [8000, int(D/8), 8, args.metric], \
# # [7000, D, 8, args.metric], [7000, int(D/2), 8, args.metric], [7000, int(D/4), 8, args.metric], [7000, int(D/8), 8, args.metric], \
# [6000, D, 8, args.metric], [6000, int(D/2), 8, args.metric], [6000, int(D/4), 8, args.metric], [6000, int(D/8), 8, args.metric], \
# [4000, D, 8, args.metric], [4000, int(D/2), 8, args.metric], [4000, int(D/4), 8, args.metric], [4000, int(D/8), 8, args.metric], \
# ]
if "tti1b" in args.dataset:
search_config = [[1, args.reorder], [2, args.reorder], [4, args.reorder], [8, args.reorder], [16, args.reorder], [32, args.reorder], [64, args.reorder], [128, args.reorder], [256, args.reorder], [512, args.reorder], [1024, args.reorder]]
else:
search_config = [[1, args.reorder], [2, args.reorder], [4, args.reorder], [8, args.reorder], [16, args.reorder], [32, args.reorder], [64, args.reorder], [128, args.reorder], [256, args.reorder]]
else:
# build_config = [[1000, int(D/32), 4, args.metric], [1000, int(D/16), 4, args.metric], [1000, int(D/8), 4, args.metric], [1000, int(D/4), 4, args.metric], [1000, int(D/3), 4, args.metric], [1000, int(D/2), 4, args.metric], [1000, D, 4, args.metric], \
# [1000, int(D/32), 6, args.metric], [1000, int(D/16), 6, args.metric], [1000, int(D/8), 6, args.metric], [1000, int(D/4), 6, args.metric], [1000, int(D/3), 6, args.metric], [1000, int(D/2), 6, args.metric], [1000, D, 6, args.metric], \
# [1000, int(D/32), 8, args.metric], [1000, int(D/16), 8, args.metric], [1000, int(D/8), 8, args.metric], [1000, int(D/4), 8, args.metric], [1000, int(D/3), 8, args.metric], [1000, int(D/2), 8, args.metric], [1000, D, 8, args.metric], \
# [2000, int(D/32), 4, args.metric], [2000, int(D/16), 4, args.metric], [2000, int(D/8), 4, args.metric], [2000, int(D/4), 4, args.metric], [2000, int(D/3), 4, args.metric], [2000, int(D/2), 4, args.metric], [2000, D, 4, args.metric], \
# [2000, int(D/32), 6, args.metric], [2000, int(D/16), 6, args.metric], [2000, int(D/8), 6, args.metric], [2000, int(D/4), 6, args.metric], [2000, int(D/3), 6, args.metric], [2000, int(D/2), 6, args.metric], [2000, D, 6, args.metric], \
# [2000, int(D/32), 8, args.metric], [2000, int(D/16), 8, args.metric], [2000, int(D/8), 8, args.metric], [2000, int(D/4), 8, args.metric], [2000, int(D/3), 8, args.metric], [2000, int(D/2), 8, args.metric], [2000, D, 8, args.metric], \
# [800, int(D/32), 4, args.metric], [800, int(D/16), 4, args.metric], [800, int(D/8), 4, args.metric], [800, int(D/4), 4, args.metric], [800, int(D/3), 4, args.metric], [800, int(D/2), 4, args.metric], [800, D, 4, args.metric], \
# [800, int(D/32), 6, args.metric], [800, int(D/16), 6, args.metric], [800, int(D/8), 6, args.metric], [800, int(D/4), 6, args.metric], [800, int(D/3), 6, args.metric], [800, int(D/2), 6, args.metric], [800, D, 6, args.metric], \
# [800, int(D/32), 8, args.metric], [800, int(D/16), 8, args.metric], [800, int(D/8), 8, args.metric], [800, int(D/4), 8, args.metric], [800, int(D/3), 8, args.metric], [800, int(D/2), 8, args.metric], [800, D, 8, args.metric], \
# [400, int(D/8), 8, args.metric], [400, int(D/4), 8, args.metric], [400, int(D/3), 8, args.metric], [400, int(D/2), 8, args.metric], [400, D, 8, args.metric], \
# [500, int(D/8), 8, args.metric], [500, int(D/4), 8, args.metric], [500, int(D/3), 8, args.metric], [500, int(D/2), 8, args.metric], [500, D, 8, args.metric], \
# [600, int(D/8), 8, args.metric], [600, int(D/4), 8, args.metric], [600, int(D/3), 8, args.metric], [600, int(D/2), 8, args.metric], [600, D, 8, args.metric]] # L, m, log2(k*), metric
build_config = [
[250, D, 4, args.metric],
[250, int(D/2), 4, args.metric],
[250, int(D/4), 4, args.metric],
[250, int(D/2), 8, args.metric],
[250, int(D/4), 8, args.metric],
[250, int(D/8), 8, args.metric],
[250, int(D/10), 8, args.metric],
]
# build_config = [
# [4000, int(D/10), 4, args.metric], [4000, int(D/8), 4, args.metric], [4000, int(D/5), 4, args.metric], [4000, int(D/4), 4, args.metric], [4000, int(D/3), 4, args.metric], [4000, int(D/2), 4, args.metric], [4000, D, 4, args.metric], \
# [4000, int(D/10), 6, args.metric], [4000, int(D/8), 6, args.metric], [4000, int(D/5), 6, args.metric], [4000, int(D/4), 6, args.metric], [4000, int(D/3), 6, args.metric], [4000, int(D/2), 6, args.metric], [4000, D, 6, args.metric], \
# [4000, int(D/10), 8, args.metric], [4000, int(D/8), 8, args.metric], [4000, int(D/5), 8, args.metric], [4000, int(D/4), 8, args.metric], [4000, int(D/3), 8, args.metric], [4000, int(D/2), 8, args.metric], [4000, D, 8, args.metric], \
# [2000, int(D/10), 4, args.metric], [2000, int(D/8), 4, args.metric], [2000, int(D/5), 4, args.metric], [2000, int(D/4), 4, args.metric], [2000, int(D/3), 4, args.metric], [2000, int(D/2), 4, args.metric], [2000, D, 4, args.metric], \
# [2000, int(D/10), 6, args.metric], [2000, int(D/8), 6, args.metric], [2000, int(D/5), 6, args.metric], [2000, int(D/4), 6, args.metric], [2000, int(D/3), 6, args.metric], [2000, int(D/2), 6, args.metric], [2000, D, 6, args.metric], \
# [2000, int(D/10), 8, args.metric], [2000, int(D/8), 8, args.metric], [2000, int(D/5), 8, args.metric], [2000, int(D/4), 8, args.metric], [2000, int(D/3), 8, args.metric], [2000, int(D/2), 8, args.metric], [2000, D, 8, args.metric], \
# [1000, int(D/10), 4, args.metric], [1000, int(D/8), 4, args.metric], [1000, int(D/5), 4, args.metric], [1000, int(D/4), 4, args.metric], [1000, int(D/3), 4, args.metric], [1000, int(D/2), 4, args.metric], [1000, D, 4, args.metric], \
# [1000, int(D/10), 6, args.metric], [1000, int(D/8), 6, args.metric], [1000, int(D/5), 6, args.metric], [1000, int(D/4), 6, args.metric], [1000, int(D/3), 6, args.metric], [1000, int(D/2), 6, args.metric], [1000, D, 6, args.metric], \
# [1000, int(D/10), 8, args.metric], [1000, int(D/8), 8, args.metric], [1000, int(D/5), 8, args.metric], [1000, int(D/4), 8, args.metric], [1000, int(D/3), 8, args.metric], [1000, int(D/2), 8, args.metric], [1000, D, 8, args.metric], \
# [800, int(D/10), 4, args.metric], [800, int(D/8), 4, args.metric], [800, int(D/5), 4, args.metric], [800, int(D/4), 4, args.metric], [800, int(D/3), 4, args.metric], [800, int(D/2), 4, args.metric], [800, D, 4, args.metric], \
# [800, int(D/10), 6, args.metric], [800, int(D/8), 6, args.metric], [800, int(D/5), 6, args.metric], [800, int(D/4), 6, args.metric], [800, int(D/3), 6, args.metric], [800, int(D/2), 6, args.metric], [800, D, 6, args.metric], \
# [800, int(D/10), 8, args.metric], [800, int(D/8), 8, args.metric], [800, int(D/5), 8, args.metric], [800, int(D/4), 8, args.metric], [800, int(D/3), 8, args.metric], [800, int(D/2), 8, args.metric], [800, D, 8, args.metric], \
# [500, int(D/10), 4, args.metric], [500, int(D/8), 4, args.metric], [500, int(D/5), 4, args.metric], [500, int(D/4), 4, args.metric], [500, int(D/3), 4, args.metric], [500, int(D/2), 4, args.metric], [500, D, 4, args.metric], \
# [500, int(D/10), 6, args.metric], [500, int(D/8), 6, args.metric], [500, int(D/5), 6, args.metric], [500, int(D/4), 6, args.metric], [500, int(D/3), 6, args.metric], [500, int(D/2), 6, args.metric], [500, D, 6, args.metric], \
# [500, int(D/10), 8, args.metric], [500, int(D/8), 8, args.metric], [500, int(D/5), 8, args.metric], [500, int(D/4), 8, args.metric], [500, int(D/3), 8, args.metric], [500, int(D/2), 8, args.metric], [500, D, 8, args.metric], \
# [250, int(D/10), 4, args.metric], [250, int(D/8), 4, args.metric], [250, int(D/5), 4, args.metric], [250, int(D/4), 4, args.metric], [250, int(D/3), 4, args.metric], [250, int(D/2), 4, args.metric], [250, D, 4, args.metric], \
# [250, int(D/10), 6, args.metric], [250, int(D/8), 6, args.metric], [250, int(D/5), 6, args.metric], [250, int(D/4), 6, args.metric], [250, int(D/3), 6, args.metric], [250, int(D/2), 6, args.metric], [250, D, 6, args.metric], \
# [250, int(D/10), 8, args.metric], [250, int(D/8), 8, args.metric], [250, int(D/5), 8, args.metric], [250, int(D/4), 8, args.metric], [250, int(D/3), 8, args.metric], [250, int(D/2), 8, args.metric], [250, D, 8, args.metric], \
# ] # L, m, log2(k*), metric
# search_config = [[1, args.reorder], [2, args.reorder], [4, args.reorder], [8, args.reorder], [16, args.reorder], [25, args.reorder], [30, args.reorder], [35, args.reorder], [40, args.reorder], \
# [45, args.reorder], [50, args.reorder], [55, args.reorder], [60, args.reorder], [65, args.reorder], [75, args.reorder], [90, args.reorder], [110, args.reorder], [130, args.reorder], [150, args.reorder], \
# [170, args.reorder], [200, args.reorder], [220, args.reorder], [250, args.reorder], [310, args.reorder], [400, args.reorder], [500, args.reorder], [800, args.reorder], [1000, args.reorder], \
# [1250, args.reorder], [1500, args.reorder], [1750, args.reorder], [1900, args.reorder], [2000, args.reorder], [2250, args.reorder], [2500, args.reorder], [2750, args.reorder], [3000, args.reorder], [3500, args.reorder], [4000, args.reorder]]
# search_config = [[1, args.reorder], [2, args.reorder], [4, args.reorder], [8, args.reorder], [16, args.reorder], [32, args.reorder], [64, args.reorder], [128, args.reorder], [192, args.reorder], [256, args.reorder], \
# [384, args.reorder], [512, args.reorder], [1024, args.reorder]]
search_config = [[1, args.reorder], [2, args.reorder], [4, args.reorder], [8, args.reorder], [16, args.reorder], [32, args.reorder], [64, args.reorder], [128, args.reorder], [160, args.reorder], [192, args.reorder], [256, args.reorder]]
'''
if args.dataset == "sift1m" or args.dataset == "glove":
build_config = [
[250, D, 4, args.metric], [250, int(D/2), 4, args.metric], [250, int(D/4), 4, args.metric],
# [250, int(D/2), 8, args.metric], [250, int(D/4), 8, args.metric], [250, int(D/8), 8, args.metric], [250, int(D/10), 8, args.metric],
[500, int(D/2), 4, args.metric], [1000, int(D/2), 4, args.metric], [2000, int(D/2), 4, args.metric], [4000, int(D/2), 4, args.metric],
[1000, D, 4, args.metric], [1000, int(D/4), 4, args.metric], [1000, int(D/8), 4, args.metric], [1000, int(D/10), 4, args.metric],
# [1000, int(D/8), 5, args.metric], [1000, int(D/8), 6, args.metric], [1000, int(D/8), 7, args.metric], [1000, int(D/8), 8, args.metric],
# [1000, int(D/10), 5, args.metric], [1000, int(D/10), 6, args.metric], [1000, int(D/10), 7, args.metric], [1000, int(D/10), 8, args.metric],
# [1000, int(D/2), 8, args.metric], [1000, int(D/4), 8, args.metric], [1000, int(D/16), 8, args.metric], [1000, int(D/20), 8, args.metric],
]
search_config = [
[1, args.reorder], [2, args.reorder], [4, args.reorder], [8, args.reorder], [16, args.reorder], [32, args.reorder], [64, args.reorder], [128, args.reorder], [256, args.reorder],
[6, args.reorder], [12, args.reorder], [18, args.reorder], [25, args.reorder],
[38, args.reorder], [50, args.reorder],
[75, args.reorder], [100, args.reorder],
[150, args.reorder], [160, args.reorder],
[192, args.reorder], [200, args.reorder],
[300, args.reorder], [400, args.reorder],
]
'''
f = open(sweep_result_path, "w")
f.write("Program: " + args.program + ("GPU" if args.is_gpu else "") + " Topk: " + str(args.topk) + " Num_split: " + str(args.num_split)+ " Batch: "+str(args.batch)+" CSize: "+str(args.csize)+"\n")
f.write("L\tm\tk_star\t|\tw\tReorder\tMetric\n")
else:
if args.opq != -1:
assert args.opq % args.m == 0 and args.sq == -1
elif args.sq != -1:
assert (args.sq == 4 or args.sq == 6 or args.sq == 8 or args.sq == 16) and args.opq == -1
else:
assert D% args.m == 0
build_config = [[args.L, args.m, int(math.log(args.k_star,2)), args.metric]]
search_config = [[args.w, args.reorder]]
for bc in build_config:
L, m, log2kstar, metric = bc
# assert (not args.is_gpu and log2kstar<=8) or (log2kstar == 8)
sc_list = check_available_search_config(args.program, bc, search_config)
print(bc)
print(sc_list)
if len(sc_list) > 0:
neighbors=np.empty((len(sc_list), queries.shape[0],0), dtype=np.int32)
distances=np.empty((len(sc_list), queries.shape[0],0), dtype=np.float32)
base_idx = 0
total_latency = np.zeros(len(sc_list))
args.batch = min(args.batch, queries.shape[0])
padded_D, faiss_m, is_padding = get_padded_info(m)
if is_padding:
padded_queries, padded_train_dataset = faiss_pad_trains_queries(padded_D, queries, train_dataset)
else:
padded_queries, padded_train_dataset = queries, train_dataset
if args.opq == -1 and args.sq == -1:
index_key_manual = "IVF"+str(L)+",PQ"+str(faiss_m)+"x"+str(log2kstar)
elif args.sq != -1:
index_key_manual = "IVF"+str(L)+",SQ"+str(args.sq)
else:
index_key_manual = "OPQ"+str(faiss_m)+"_"+str(args.opq)+",IVF"+str(L)+",PQ"+str(faiss_m)+"x"+str(log2kstar)
if args.flat != -1:
index_key_manual = "Flat"
for split in range(args.num_split):
# if split > 0:
# break
print("Split ", split)
num_per_split = int(N/args.num_split) if split < args.num_split-1 else N-base_idx
searcher_dir, searcher_path = get_searcher_path(split)
if args.flat == -1:
is_cached = check_cached(searcher_dir, args, args.dataset, split, args.num_split, index_key_manual, log2kstar)
else:
is_cached = False
args.m = faiss_m
if is_cached:
index, preproc = build_faiss(args, log2kstar, searcher_dir, coarse_dir, split, int(N/args.num_split), padded_D, index_key_manual, is_cached, padded_queries)
# continue
else:
dataset = read_data(split_dataset_path + str(args.num_split) + "_" + str(split) if args.num_split>1 else dataset_basedir, base=False if args.num_split>1 else True, offset_=None if args.num_split>1 else 0, shape_=None)
if args.flat == -1:
if is_padding:
padded_dataset = faiss_pad_dataset(padded_D, dataset)
else:
padded_dataset = dataset
print("[YJ] reading done")
if args.flat == -1:
index, preproc = build_faiss(args, log2kstar, searcher_dir, coarse_dir, split, int(N/args.num_split), padded_D, index_key_manual, is_cached, padded_queries, padded_train_dataset, padded_dataset)
# continue
n = list()
d = list()
for idx in range(len(sc_list)):
w, reorder = search_config[sc_list[idx]]
assert reorder == args.reorder
# Build Faiss index
print(str(L)+"\t"+str(m)+"\t"+str(2**log2kstar)+"\t|\t"+str(w)+"\t"+str(reorder)+"\t"+str(metric)+"\n") # faiss-gpu has no reorder
# Faiss search
if args.flat == -1:
local_neighbors, local_distances, time = faiss_search(index, preproc, args, reorder, w, args.csize)
else:
local_neighbors, local_distances, time = faiss_search_flat(D, dataset, queries, args, reorder, w, args.csize)
total_latency[idx] = total_latency[idx] + time
n.append((local_neighbors+base_idx).astype(np.int32))
d.append(local_distances.astype(np.float32))
del local_neighbors
del local_distances
if args.flat == -1:
del index
# if is_cached == False:
# dataset._mmap.close()
base_idx = base_idx + num_per_split
neighbors = np.append(neighbors, np.array(n, dtype=np.int32), axis=-1)
distances = np.append(distances, np.array(d, dtype=np.float32), axis=-1)
neighbors, distances = sort_neighbors(distances, neighbors)
final_neighbors, _ = sort_neighbors(distances, neighbors)
# continue
for idx in range(len(sc_list)):
if args.sweep:
w, reorder = search_config[sc_list[idx]]
f.write(str(L)+"\t"+str(m)+"\t"+str(2**log2kstar)+"\t|\t"+str(w)+"\t"+str(reorder)+"\t"+str(metric)+"\n") # faiss-gpu has no reorder
top1_10, top1_100, top10_100, top1_1000, top10_1000, top100_1000 = print_more_recalls(final_neighbors[idx], gt)
top1, top10, top100, top1000 = print_recall(final_neighbors[idx], gt)
print("Top ", args.topk, " Total latency (ms): ", total_latency[idx])
print("arcm::Latency written. End of File.\n");
if args.sweep:
f.write(str(top1)+" %\t"+str(top10)+" %\t"+str(top100)+" %\t"+str(top1000)+" %\t|\t"+str(top1_10)+" %\t"+str(top1_100)+" %\t"+str(top10_100)+" %\t"+str(top1_1000)+" %\t"+str(top10_1000)+" %\t"+str(top100_1000)+" %\t"+str(total_latency[idx])+"\n")
if args.sweep:
f.close()
def run_annoy(D):
gt, queries = prepare_eval()
assert args.metric!='angular', "[TODO] don't understand how angular works yet..."
if args.sweep:
build_config = [(args.metric, 50), (args.metric, 100), (args.metric, 150), (args.metric, 200), (args.metric, 250), (args.metric, 300), (args.metric, 400)]
search_config = [100, 200, 400, 1000, 2000, 4000, 10000, 20000, 40000, 100000, 200000, 400000]
f = open(sweep_result_path, "w")
f.write("Program: " + args.program + " Topk: " + str(args.topk) + " Num_split: " + str(args.num_split)+ " Batch: "+str(args.batch)+" CSize: "+str(args.csize)+"\n")
f.write("Num trees\t|\tNum search\tReorder\tMetric\n")
else:
build_config = [(args.metric, args.n_trees)]
search_config = [args.num_search]
for bc in build_config:
metric, n_trees = bc
if metric == "dot_product":
annoy_metric = "dot"
elif metric == "squared_l2":
annoy_metric = "euclidean"
elif metric == "angular":
annoy_metric = "angular"
neighbors = np.empty((len(search_config), queries.shape[0],0), dtype=np.int32)
distances = np.empty((len(search_config), queries.shape[0],0), dtype=np.float32)
total_latency = np.zeros(len(search_config))
base_idx = 0
for split in range(args.num_split):
num_per_split = int(N/args.num_split) if split < args.num_split-1 else N-base_idx
searcher_dir, searcher_path = get_searcher_path(split)
searcher_path = searcher_path + '_' + str(n_trees) + '_' + metric
print("Split ", split)
# Create Annoy index
searcher = annoy.AnnoyIndex(D, metric=annoy_metric)
if os.path.isfile(searcher_path):
print("Loading searcher from ", searcher_path)
searcher.load(searcher_path)
else:
# Load splitted dataset
dataset = read_data(split_dataset_path + str(args.num_split) + "_" + str(split) if args.num_split>1 else dataset_basedir, base=False if args.num_split>1 else True, offset_=None if args.num_split>1 else 0, shape_=None)
print("Annoy, adding items")
for i, x in enumerate(dataset):
searcher.add_item(i, x.tolist())
print("Annoy, building trees")
searcher.build(n_trees)
print("Saving searcher to ", searcher_path)
os.makedirs(searcher_dir, exist_ok=True)
searcher.save(searcher_path)
n = list()
d = list()
for idx, sc in enumerate(search_config):
num_search = sc
# if args.sweep:
# f.write(str(n_trees)+"\t"+str(num_search)+"\t"+str(annoy_metric)+"\n")
print(str(n_trees)+"\t"+str(num_search)+"\t"+str(annoy_metric))
print("Entering Annoy searcher")
# Annoy batch version
if args.batch > 1:
pool = ThreadPool(args.batch)
start = time.time()
result = pool.map(lambda q: searcher.get_nns_by_vector(q.tolist(), args.topk, num_search, include_distances=True), queries)
end = time.time()
ne = np.empty((0, args.topk))
di = np.empty((0, args.topk))
for nn, dd in result:
if len(nn) < args.topk:
plus_dim = args.topk-len(nn)
ne = np.append(ne, np.array(nn+[N]*plus_dim).reshape(1, args.topk), axis=0)
di = np.append(di, np.array(dd+[math.inf if metric=="squared_l2" else -math.inf]*plus_dim).reshape(1, args.topk), axis=0)
else:
ne = np.append(ne, np.array(nn).reshape(1, args.topk), axis=0)
di = np.append(di, np.array(dd).reshape(1, args.topk), axis=0)
total_latency[idx] = total_latency[idx] + 1000*(end - start)
n.append(ne+base_idx)
d.append(di)
else:
def single_query(query, base_idx):
start = time.time()
local_neighbors, local_distances = searcher.get_nns_by_vector(query.tolist(), args.topk, num_search, include_distances=True)
if len(local_neighbors) < args.topk:
plus_dim = args.topk-len(local_neighbors)
local_neighbors=np.concatenate((local_neighbors, np.full((plus_dim), N)), axis=-1)
local_distances=np.concatenate((local_distances, np.full((plus_dim), math.inf if metric=="squared_l2" else -math.inf)), axis=-1)
return (time.time() - start, (local_neighbors, local_distances))
local_results = [single_query(q, base_idx) for q in queries]
total_latency[idx] += (np.sum(np.array([time for time, _ in local_results]).reshape(queries.shape[0], 1)))*1000
nd = [nd for _, nd in local_results]
n.append(np.vstack([n for n,d in nd])+base_idx)
d.append(np.vstack([d for n,d in nd]))
base_idx = base_idx + num_per_split
neighbors = np.append(neighbors, np.array(n), axis=-1)
distances = np.append(distances, np.array(d), axis=-1)
final_neighbors, _ = sort_neighbors(distances, neighbors)
for idx in range(len(search_config)):
top1, top10, top100, top1000 = print_recall(final_neighbors[idx], gt)
print("Top ", args.topk, " Total latency (ms): ", total_latency[idx])
print("arcm::Latency written. End of File.\n");
if args.sweep:
f.write(str(n_trees)+"\t|\t"+str(search_config[idx])+"\t-1\t"+str(annoy_metric)+"\n")
f.write(str(top1)+" %\t"+str(top10)+" %\t"+str(top100)+" %\t"+str(top1000)+" %\t"+str(total_latency[idx])+"\n")
if args.sweep:
f.close()
# only for faiss
def get_train():
if "sift1m" in args.dataset:
filename = dataset_basedir + 'sift_learn.fvecs'
return mmap_fvecs(filename)
if "deep1b" in args.dataset:
filename = dataset_basedir + 'deep1b_learn.fvecs'
xt = mmap_fvecs(filename, 0, 1000000)
return xt
elif "tti1b" in args.dataset:
filename = dataset_basedir + '/split_data/split_data_128D_new/tti1b_learn20_0'
xt = mmap_fvecs(filename, 0, 1000000)
return xt
elif "tti1m" in args.dataset:
filename = dataset_basedir + '/split_data/tti1m_learn1_0'
return mmap_fvecs(filename)
elif "deep1m" in args.dataset:
filename = dataset_basedir + 'deep1m_learn.fvecs'
return mmap_fvecs(filename)
elif "deepm96" in args.dataset:
filename = dataset_basedir + 'split_data/deepm96_learn1_0'
return mmap_fvecs(filename)
elif "test" in args.dataset:
filename = dataset_basedir + 'split_data/test_learn1_0'
return mmap_fvecs(filename)
elif "clognormal1m" in args.dataset:
filename = dataset_basedir + 'split_data/clognormal1m_learn1_0'
return mmap_fvecs(filename)
elif "cnormal1m" in args.dataset:
filename = dataset_basedir + 'split_data/cnormal1m_learn1_0'
return mmap_fvecs(filename)
elif "lognormal1m" in args.dataset:
filename = dataset_basedir + 'split_data/lognormal1m_learn1_0'
return mmap_fvecs(filename)
elif "normal1m" in args.dataset:
filename = dataset_basedir + 'split_data/normal1m_learn1_0'
return mmap_fvecs(filename)
elif "clognormal1b" in args.dataset:
filename = dataset_basedir + 'split_data/clognormal1b_learn'+str(args.num_split)+'_0'
xt = mmap_fvecs(filename, 0, 1000000)
return xt
elif "cnormal1b" in args.dataset:
filename = dataset_basedir + 'split_data/cnormal1b_learn'+str(args.num_split)+'_0'
xt = mmap_fvecs(filename, 0, 1000000)
return xt
elif "lognormal1b" in args.dataset:
filename = dataset_basedir + 'split_data/lognormal1b_learn'+str(args.num_split)+'_0'
xt = mmap_fvecs(filename, 0, 1000000)
return xt
elif "normal1b" in args.dataset:
filename = dataset_basedir + 'split_data/normal1b_learn'+str(args.num_split)+'_0'
xt = mmap_fvecs(filename, 0, 1000000)
return xt
elif "gist" in args.dataset:
filename = dataset_basedir + 'gist_learn.fvecs'
return mmap_fvecs(filename)
elif "sift1b" in args.dataset:
filename = dataset_basedir + 'bigann_learn.bvecs'
return bvecs_mmap(filename, 0, 1000000)
elif "glove" in args.dataset:
filename = dataset_basedir + 'split_data/glove_'+args.metric+'_learn1_0'
return read_data(filename, base=False)
elif "music1m" in args.dataset:
filename = dataset_basedir + 'split_data/music1m_learn1_0'
return mmap_fvecs(filename)
else:
assert False
def get_groundtruth():
print("Reading grountruth from ", groundtruth_path)
if os.path.isfile(groundtruth_path)==False:
run_groundtruth()
if "glove" in args.dataset:
return read_data(groundtruth_path, base=False)
elif "deep1b" in args.dataset or ("sift1b" in args.dataset and args.metric == "dot_product") or args.dataset == "clognormal1b" or args.dataset == "cnormal1b" or args.dataset == "lognormal1b" or args.dataset == "normal1b" or "tti1m" in args.dataset or "tti1b" in args.dataset:
return np.load(groundtruth_path)
else:
return ivecs_read(groundtruth_path)
def get_queries():
if "sift1m" in args.dataset:
return mmap_fvecs(dataset_basedir + 'sift_query.fvecs')
elif "deep1m" in args.dataset:
return mmap_fvecs(dataset_basedir + 'deep1m_query.fvecs')
elif "deepm96" in args.dataset:
return mmap_fvecs(dataset_basedir + 'deepm96_query.fvecs')
elif "test" in args.dataset:
return mmap_fvecs(dataset_basedir + 'test_query.fvecs')
elif "clognormal1m" in args.dataset:
return mmap_fvecs(dataset_basedir + 'clognormal1m_query.fvecs')
elif "cnormal1m" in args.dataset:
return mmap_fvecs(dataset_basedir + 'cnormal1m_query.fvecs')
elif "lognormal1m" in args.dataset:
return mmap_fvecs(dataset_basedir + 'lognormal1m_query.fvecs')
elif "normal1m" in args.dataset:
return mmap_fvecs(dataset_basedir + 'normal1m_query.fvecs')
elif "clognormal1b" in args.dataset:
return mmap_fvecs2(dataset_basedir + '1000000000_128_clognormal_query.txt')
elif "cnormal1b" in args.dataset:
return mmap_fvecs2(dataset_basedir + '1000000000_128_cnormal_query.txt')
elif "lognormal1b" in args.dataset:
return mmap_fvecs2(dataset_basedir + '1000000000_128_lognormal_query.txt')
elif "normal1b" in args.dataset:
return mmap_fvecs2(dataset_basedir + '1000000000_128_normal_query.txt')
elif "music1m" in args.dataset:
# return np.fromfile(dataset_basedir + 'query_music100.bin', dtype = np.float32).reshape(qN, D)
return mmap_fvecs(split_dataset_path + 'query1_0')
elif "gist" in args.dataset:
return mmap_fvecs(dataset_basedir + 'gist_query.fvecs')
elif "sift1b" in args.dataset:
return bvecs_read(dataset_basedir+'bigann_query.bvecs')
elif "glove" in args.dataset:
return np.array(h5py.File(dataset_basedir+"glove-100-angular.hdf5", "r")['test'], dtype='float32')
elif "deep1b" in args.dataset:
return mmap_fvecs(dataset_basedir + 'deep1B_queries.fvecs')
elif "tti1b" in args.dataset:
return mmap_fvecs(dataset_basedir + 'tti1b_128D_query.fvecs')
elif "tti1m" in args.dataset:
return mmap_fvecs(dataset_basedir + 'tti1m_128D_query.fvecs')
else:
assert False
if os.path.isdir("/arc-share"):
basedir = "/arc-share/MICRO21_ANNA/"
else:
basedir = "./"
os.makedirs("./result", exist_ok=True)
split_dataset_path = None
if args.sweep:
# sweep_result_path = "./result/"+args.program+("GPU_" if args.is_gpu else "_")+args.dataset+"_topk_"+str(args.topk)+"_num_split_"+str(args.num_split)+"_batch_"+str(args.batch)+"_"+args.metric+"_reorder_"+str(args.reorder)+"_sweep_result.txt"
sweep_result_path = "./hpca_eval/"+args.program+("GPU_" if args.is_gpu else "_")+args.dataset+"_topk_"+str(args.topk)+"_num_split_"+str(args.num_split)+"_batch_"+str(args.batch)+"_"+args.metric+"_reorder_"+str(args.reorder)+"_csize_"+str(args.csize)+"_sweep_result.txt"
if args.flat != -1:
sweep_result_path = "./hpca_eval/"+args.program+("GPU_" if args.is_gpu else "_")+args.dataset+"_flat_topk_"+str(args.topk)+"_num_split_"+str(args.num_split)+"_batch_"+str(args.batch)+"_"+args.metric+"_reorder_"+str(args.reorder)+"_csize_"+str(args.csize)+"_sweep_result.txt"
index_key = None
N = -1
D = -1
num_iter = -1
qN = -1
if "sift1m" in args.dataset:
dataset_basedir = basedir + "SIFT1M/"
split_dataset_path = dataset_basedir+"split_data/sift1m_"
if args.split==False:
groundtruth_path = dataset_basedir + "sift1m_"+args.metric+"_gt"
N=1000000
D=128
num_iter = 1
qN = 10000
index_key = "<KEY>"
elif "deep1m" in args.dataset:
dataset_basedir = basedir + "DEEP1M/"
split_dataset_path = dataset_basedir+"split_data/deep1m_"
if args.split==False:
groundtruth_path = dataset_basedir + "deep1m_"+args.metric+"_gt"
N=1000000
D=256
num_iter = 1
qN = 1000
index_key = "<KEY>" #arcm::FIXME
elif "deepm96" in args.dataset:
dataset_basedir = basedir + "DEEPM96/"
split_dataset_path = dataset_basedir+"split_data/deepm96_"
if args.split==False:
groundtruth_path = dataset_basedir + "deepm96_"+args.metric+"_gt"
N=1000000
D=96
num_iter = 1
qN = 10000
index_key = "<KEY>" #arcm::FIXME
elif args.dataset == "clognormal1m":
dataset_basedir = basedir + "SYNTHETIC_1M_CLOGNORMAL/"
split_dataset_path = dataset_basedir+"split_data/clognormal1m_"
if args.split==False:
groundtruth_path = dataset_basedir + "clognormal1m_"+args.metric+"_gt"
N=1000000
D=128
num_iter = 1
qN = 10000
index_key = "<KEY>" #arcm::FIXME
elif args.dataset == "cnormal1m":
dataset_basedir = basedir + "SYNTHETIC_1M_CNORMAL/"
split_dataset_path = dataset_basedir+"split_data/cnormal1m_"
if args.split==False:
groundtruth_path = dataset_basedir + "cnormal1m_"+args.metric+"_gt"
N=1000000
D=128
num_iter = 1
qN = 10000
index_key = "<KEY>" #arcm::FIXME
elif args.dataset == "lognormal1m":
dataset_basedir = basedir + "SYNTHETIC_1M_LOGNORMAL/"
split_dataset_path = dataset_basedir+"split_data/lognormal1m_"
if args.split==False:
groundtruth_path = dataset_basedir + "lognormal1m_"+args.metric+"_gt"
N=1000000
D=128
num_iter = 1
qN = 10000
index_key = "<KEY>" #arcm::FIXME
elif args.dataset == "normal1m":
dataset_basedir = basedir + "SYNTHETIC_1M_NORMAL/"
split_dataset_path = dataset_basedir+"split_data/normal1m_"
if args.split==False:
groundtruth_path = dataset_basedir + "normal1m_"+args.metric+"_gt"
N=1000000
D=128
num_iter = 1
qN = 10000
index_key = "<KEY>" #arcm::FIXME
elif args.dataset == "clognormal1b":
dataset_basedir = basedir + "SYNTHETIC_1B_CLOGNORMAL/"
split_dataset_path = dataset_basedir+"split_data/clognormal1b_"
# split_dataset_path = dataset_basedir+"split_data_mmap_split/clognormal1b_"
if args.split==False:
groundtruth_path = dataset_basedir + "1bclognormal_"+args.metric+"_gt.npy"
N=1000000000
D=128
num_iter = 1
qN = 10000
index_key = "<KEY>" #arcm::FIXME
elif args.dataset == "cnormal1b":
dataset_basedir = basedir + "SYNTHETIC_1B_CNORMAL/"
split_dataset_path = dataset_basedir+"split_data/cnormal1b_"
if args.split==False:
groundtruth_path = dataset_basedir + "1bcnormal_"+args.metric+"_gt.npy"
N=1000000000
D=128
num_iter = 1
qN = 10000
index_key = "<KEY>" #arcm::FIXME
elif args.dataset == "lognormal1b":
dataset_basedir = basedir + "SYNTHETIC_1B_LOGNORMAL/"
split_dataset_path = dataset_basedir+"split_data/lognormal1b_"
if args.split==False:
groundtruth_path = dataset_basedir + "1blognormal_"+args.metric+"_gt.npy"
N=1000000000
D=128
num_iter = 1
qN = 10000
index_key = "<KEY>" #arcm::FIXME
elif args.dataset == "normal1b":
dataset_basedir = basedir + "SYNTHETIC_1B_NORMAL/"
split_dataset_path = dataset_basedir+"split_data/normal1b_"
if args.split==False:
groundtruth_path = dataset_basedir + "1bnormal_"+args.metric+"_gt.npy"
N=1000000000
D=128
num_iter = 1
qN = 10000
index_key = "<KEY>" #arcm::FIXME
elif "music1m" in args.dataset:
dataset_basedir = basedir + "MUSIC1M/"
split_dataset_path = dataset_basedir+"split_data/music1m_"
if args.split==False:
groundtruth_path = dataset_basedir + "music1m_"+args.metric+"_gt"
N=1000000
D=100
num_iter = 1
qN = 10000
index_key = "<KEY>" #arcm::FIXME
elif "gist" in args.dataset:
dataset_basedir = basedir + "GIST/"
split_dataset_path =dataset_basedir+"split_data/gist_"
if args.split==False:
groundtruth_path = dataset_basedir + "gist_"+args.metric+"_gt"
N=1000000
D=960
num_iter = 1
qN = 1000
elif "sift1b" in args.dataset:
dataset_basedir = basedir + "SIFT1B/"
split_dataset_path = dataset_basedir+"split_data/sift1b_"
if args.split==False:
groundtruth_path = dataset_basedir + 'gnd/idx_1000M.ivecs' if args.metric=="squared_l2" else dataset_basedir + "sift1b_"+args.metric+"_gt"
N=1000000000
D=128
num_iter = 4
qN = 10000
index_key = "<KEY>"
elif "glove" in args.dataset:
assert args.metric != None
dataset_basedir = basedir + "GLOVE/"
split_dataset_path = dataset_basedir+"split_data/glove_"
if args.split==False:
groundtruth_path = dataset_basedir + "glove_"+args.metric+"_gt"
N=1183514
D=100
num_iter = 10
qN = 10000
elif "deep1b" in args.dataset:
dataset_basedir = basedir + "DEEP1B/"
split_dataset_path = dataset_basedir+"split_data/deep1b_"
if args.split==False:
groundtruth_path = dataset_basedir + "deep1b_"+args.metric+"_gt"
N=1000000000
D=96
num_iter = 16
qN = 10000
elif "tti1b" in args.dataset:
dataset_basedir = basedir + "TTI1B/"
split_dataset_path = dataset_basedir+"split_data/split_data_128D_new/tti1b_"
if args.split==False:
groundtruth_path = dataset_basedir + "split_data/split_data_128D_new/tti1b_"+args.metric+"_gt.npy"
N=1000000000
D=128
num_iter = 16
qN = 10000
elif "tti1m" in args.dataset:
dataset_basedir = basedir + "TTI1M/"
split_dataset_path = dataset_basedir+"split_data/tti1m_"
if args.split==False:
groundtruth_path = dataset_basedir + "tti1m_"+args.metric+"_gt.npy"
N=1000000
D=128
num_iter = 1
qN = 10000
elif "test" in args.dataset:
dataset_basedir = basedir + "synthetic_test/"
split_dataset_path = dataset_basedir+"split_data/test_"
if args.split==False:
groundtruth_path = dataset_basedir + "test_"+args.metric+"_gt"
N=1000000
D=128
qN = 10000
if args.split == False and args.groundtruth == False:
coarse_dir = basedir + args.program + '_searcher_' + args.metric + '/' + args.dataset + '/coarse_dir/'
os.makedirs(coarse_dir, exist_ok=True)
if args.program == "scann":
scann_fine_dir = basedir + args.program + '_searcher_' + args.metric + '/' + args.dataset + '/fine_dir/'
os.makedirs(scann_fine_dir, exist_ok=True)
# if (args.num_split > 1 and args.eval_split) or args.split:
# remapping_file_path = split_dataset_path + 'remapping_index_' + str(args.num_split)
os.makedirs(dataset_basedir+"split_data/", exist_ok=True)
# main
if args.split:
# split(args.dataset, num_iter, N, D)
mmap_split(args.dataset, N)
# random_split(args.dataset, num_iter, N, D)
if args.eval_split or args.sweep:
if args.program == "scann":
run_scann()
elif args.program == "faiss":
run_faiss(D)
elif args.program == "annoy":
run_annoy(D)
else:
assert False
if args.groundtruth:
# base
# arcm_base = read_data(dataset_basedir)
# print("base.shape =", arcm_base.shape, ", \nbase =", arcm_base)
# train
# arcm_train = get_train()
# print("train.shape =", arcm_train.shape, ", \ntrain =", arcm_train)
# query
# arcm_query = get_queries()
# print("query.shape =", arcm_query.shape, ", \nquery =", arcm_query)
# gt
# arcm_gt = ivecs_read(dataset_basedir + 'groundtruth/sift_groundtruth.ivecs')
# print("gt.shape =", arcm_gt.shape, ", \ngt =", arcm_gt)
run_groundtruth()
# qry = read_fbin(dataset_basedir + 'query.public.100K.fbin', 0, 10000)
# fvecs_write(dataset_basedir + 'tti1m_query.fvecs', qry)
# fname = dataset_basedir + "1000000_128_normal_query.txt"
# txt_to_fvecs(fname)
# fname = dataset_basedir + "1000000_128_normal.txt"
# txt_to_fvecs(fname)
| [
"numpy.fromfile",
"runfaiss.faiss_search",
"math.log",
"numpy.argsort",
"numpy.array",
"ctypes.CDLL",
"numpy.linalg.norm",
"numpy.load",
"argparse.ArgumentParser",
"scann.scann_ops_pybind.builder",
"numpy.sort",
"numpy.memmap",
"multiprocessing.pool.ThreadPool",
"runfaiss.faiss_search_flat... | [((559, 605), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Options"""'}), "(description='Options')\n", (582, 605), False, 'import argparse\n'), ((84982, 85009), 'os.path.isdir', 'os.path.isdir', (['"""/arc-share"""'], {}), "('/arc-share')\n", (84995, 85009), False, 'import os\n'), ((85072, 85110), 'os.makedirs', 'os.makedirs', (['"""./result"""'], {'exist_ok': '(True)'}), "('./result', exist_ok=True)\n", (85083, 85110), False, 'import os\n'), ((92428, 92487), 'os.makedirs', 'os.makedirs', (["(dataset_basedir + 'split_data/')"], {'exist_ok': '(True)'}), "(dataset_basedir + 'split_data/', exist_ok=True)\n", (92439, 92487), False, 'import os\n'), ((4760, 4780), 'numpy.shape', 'np.shape', (['trimmed_gt'], {}), '(trimmed_gt)\n', (4768, 4780), True, 'import numpy as np\n'), ((5955, 5988), 'numpy.fromfile', 'np.fromfile', (['fname'], {'dtype': '"""int32"""'}), "(fname, dtype='int32')\n", (5966, 5988), True, 'import numpy as np\n'), ((6096, 6128), 'numpy.zeros', 'np.zeros', (['(n, 1)'], {'dtype': 'np.int32'}), '((n, 1), dtype=np.int32)\n', (6104, 6128), True, 'import numpy as np\n'), ((6159, 6194), 'numpy.append', 'np.append', (['dimension_arr', 'm'], {'axis': '(1)'}), '(dimension_arr, m, axis=1)\n', (6168, 6194), True, 'import numpy as np\n'), ((6570, 6602), 'numpy.zeros', 'np.zeros', (['(n, 4)'], {'dtype': 'np.uint8'}), '((n, 4), dtype=np.uint8)\n', (6578, 6602), True, 'import numpy as np\n'), ((6633, 6668), 'numpy.append', 'np.append', (['dimension_arr', 'm'], {'axis': '(1)'}), '(dimension_arr, m, axis=1)\n', (6642, 6668), True, 'import numpy as np\n'), ((6715, 6749), 'numpy.fromfile', 'np.fromfile', (['fname'], {'dtype': 'np.uint8'}), '(fname, dtype=np.uint8)\n', (6726, 6749), True, 'import numpy as np\n'), ((7535, 7570), 'numpy.empty', 'np.empty', (['(n, d + 1)'], {'dtype': '"""int32"""'}), "((n, d + 1), dtype='int32')\n", (7543, 7570), True, 'import numpy as np\n'), ((7669, 7686), 'numpy.loadtxt', 'np.loadtxt', (['fname'], {}), '(fname)\n', (7679, 7686), True, 'import numpy as np\n'), ((17374, 17450), 'numpy.empty', 'np.empty', (['(0, D)'], {'dtype': "(np.uint8 if 'sift1b' in args.dataset else np.float32)"}), "((0, D), dtype=np.uint8 if 'sift1b' in args.dataset else np.float32)\n", (17382, 17450), True, 'import numpy as np\n'), ((20914, 20990), 'numpy.empty', 'np.empty', (['(0, D)'], {'dtype': "(np.uint8 if 'sift1b' in args.dataset else np.float32)"}), "((0, D), dtype=np.uint8 if 'sift1b' in args.dataset else np.float32)\n", (20922, 20990), True, 'import numpy as np\n'), ((21218, 21234), 'random.seed', 'random.seed', (['(100)'], {}), '(100)\n', (21229, 21234), False, 'import random\n'), ((21263, 21287), 'random.shuffle', 'random.shuffle', (['data_ids'], {}), '(data_ids)\n', (21277, 21287), False, 'import random\n'), ((22083, 22119), 'numpy.empty', 'np.empty', (['[qN, 1000]'], {'dtype': 'np.int32'}), '([qN, 1000], dtype=np.int32)\n', (22091, 22119), True, 'import numpy as np\n'), ((22141, 22179), 'numpy.empty', 'np.empty', (['[qN, 1000]'], {'dtype': 'np.float32'}), '([qN, 1000], dtype=np.float32)\n', (22149, 22179), True, 'import numpy as np\n'), ((25556, 25596), 'os.makedirs', 'os.makedirs', (['searcher_dir'], {'exist_ok': '(True)'}), '(searcher_dir, exist_ok=True)\n', (25567, 25596), False, 'import os\n'), ((92060, 92098), 'os.makedirs', 'os.makedirs', (['coarse_dir'], {'exist_ok': '(True)'}), '(coarse_dir, exist_ok=True)\n', (92071, 92098), False, 'import os\n'), ((6305, 6394), 'numpy.memmap', 'np.memmap', (['fname'], {'dtype': 'np.uint8', 'mode': '"""r"""', 'offset': '(offset_ * 132)', 'shape': '(shape_ * 132)'}), "(fname, dtype=np.uint8, mode='r', offset=offset_ * 132, shape=\n shape_ * 132)\n", (6314, 6394), True, 'import numpy as np\n'), ((6401, 6443), 'numpy.memmap', 'np.memmap', (['fname'], {'dtype': 'np.uint8', 'mode': '"""r"""'}), "(fname, dtype=np.uint8, mode='r')\n", (6410, 6443), True, 'import numpy as np\n'), ((6905, 7004), 'numpy.memmap', 'np.memmap', (['fname'], {'dtype': '"""int32"""', 'mode': '"""r"""', 'offset': '(offset_ * (D + 1) * 4)', 'shape': '(shape_ * (D + 1))'}), "(fname, dtype='int32', mode='r', offset=offset_ * (D + 1) * 4,\n shape=shape_ * (D + 1))\n", (6914, 7004), True, 'import numpy as np\n'), ((7008, 7049), 'numpy.memmap', 'np.memmap', (['fname'], {'dtype': '"""int32"""', 'mode': '"""r"""'}), "(fname, dtype='int32', mode='r')\n", (7017, 7049), True, 'import numpy as np\n'), ((7243, 7344), 'numpy.memmap', 'np.memmap', (['fname'], {'dtype': '"""float16"""', 'mode': '"""r"""', 'offset': '(offset_ * (D + 1) * 2)', 'shape': '(shape_ * (D + 1))'}), "(fname, dtype='float16', mode='r', offset=offset_ * (D + 1) * 2,\n shape=shape_ * (D + 1))\n", (7252, 7344), True, 'import numpy as np\n'), ((7348, 7391), 'numpy.memmap', 'np.memmap', (['fname'], {'dtype': '"""float16"""', 'mode': '"""r"""'}), "(fname, dtype='float16', mode='r')\n", (7357, 7391), True, 'import numpy as np\n'), ((8949, 8988), 'numpy.fromfile', 'np.fromfile', (['f'], {'count': '(2)', 'dtype': 'np.int32'}), '(f, count=2, dtype=np.int32)\n', (8960, 8988), True, 'import numpy as np\n'), ((9077, 9156), 'numpy.fromfile', 'np.fromfile', (['f'], {'count': '(nvecs * dim)', 'dtype': 'np.float32', 'offset': '(start_idx * 4 * dim)'}), '(f, count=nvecs * dim, dtype=np.float32, offset=start_idx * 4 * dim)\n', (9088, 9156), True, 'import numpy as np\n'), ((9702, 9741), 'numpy.fromfile', 'np.fromfile', (['f'], {'count': '(2)', 'dtype': 'np.int32'}), '(f, count=2, dtype=np.int32)\n', (9713, 9741), True, 'import numpy as np\n'), ((9830, 9907), 'numpy.fromfile', 'np.fromfile', (['f'], {'count': '(nvecs * dim)', 'dtype': 'np.int32', 'offset': '(start_idx * 4 * dim)'}), '(f, count=nvecs * dim, dtype=np.int32, offset=start_idx * 4 * dim)\n', (9841, 9907), True, 'import numpy as np\n'), ((21950, 21980), 'os.path.isdir', 'os.path.isdir', (['groundtruth_dir'], {}), '(groundtruth_dir)\n', (21963, 21980), False, 'import os\n'), ((21990, 22015), 'os.mkdir', 'os.mkdir', (['groundtruth_dir'], {}), '(groundtruth_dir)\n', (21998, 22015), False, 'import os\n'), ((22196, 22223), 'numpy.ctypeslib.as_ctypes', 'np.ctypeslib.as_ctypes', (['row'], {}), '(row)\n', (22218, 22223), True, 'import numpy as np\n'), ((22260, 22287), 'numpy.ctypeslib.as_ctypes', 'np.ctypeslib.as_ctypes', (['row'], {}), '(row)\n', (22282, 22287), True, 'import numpy as np\n'), ((22329, 22356), 'numpy.ctypeslib.as_ctypes', 'np.ctypeslib.as_ctypes', (['row'], {}), '(row)\n', (22351, 22356), True, 'import numpy as np\n'), ((22817, 22848), 'ctypes.CDLL', 'ctypes.CDLL', (['"""./groundtruth.so"""'], {}), "('./groundtruth.so')\n", (22828, 22848), False, 'import ctypes\n'), ((24537, 24549), 'numpy.shape', 'np.shape', (['gt'], {}), '(gt)\n', (24545, 24549), True, 'import numpy as np\n'), ((24577, 24594), 'numpy.shape', 'np.shape', (['queries'], {}), '(queries)\n', (24585, 24594), True, 'import numpy as np\n'), ((82450, 82482), 'os.path.isfile', 'os.path.isfile', (['groundtruth_path'], {}), '(groundtruth_path)\n', (82464, 82482), False, 'import os\n'), ((92237, 92279), 'os.makedirs', 'os.makedirs', (['scann_fine_dir'], {'exist_ok': '(True)'}), '(scann_fine_dir, exist_ok=True)\n', (92248, 92279), False, 'import os\n'), ((16468, 16500), 'h5py.File', 'h5py.File', (['groundtruth_path', '"""w"""'], {}), "(groundtruth_path, 'w')\n", (16477, 16500), False, 'import h5py\n'), ((21695, 21730), 'numpy.array', 'np.array', (['data_ids'], {'dtype': 'np.uint32'}), '(data_ids, dtype=np.uint32)\n', (21703, 21730), True, 'import numpy as np\n'), ((22395, 22425), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_float'], {}), '(ctypes.c_float)\n', (22409, 22425), False, 'import ctypes\n'), ((22454, 22482), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_int'], {}), '(ctypes.c_int)\n', (22468, 22482), False, 'import ctypes\n'), ((22512, 22542), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_float'], {}), '(ctypes.c_float)\n', (22526, 22542), False, 'import ctypes\n'), ((22700, 22727), 'numpy.ctypeslib.as_ctypes', 'np.ctypeslib.as_ctypes', (['row'], {}), '(row)\n', (22722, 22727), True, 'import numpy as np\n'), ((23518, 23549), 'ctypes.CDLL', 'ctypes.CDLL', (['"""./groundtruth.so"""'], {}), "('./groundtruth.so')\n", (23529, 23549), False, 'import ctypes\n'), ((38897, 38935), 'os.makedirs', 'os.makedirs', (['coarse_dir'], {'exist_ok': '(True)'}), '(coarse_dir, exist_ok=True)\n', (38908, 38935), False, 'import os\n'), ((46745, 46802), 'numpy.full', 'np.full', (['(dataset.shape[0], plus_dim)', '(0)'], {'dtype': '"""float32"""'}), "((dataset.shape[0], plus_dim), 0, dtype='float32')\n", (46752, 46802), True, 'import numpy as np\n'), ((47027, 47067), 'numpy.full', 'np.full', (['(queries.shape[0], plus_dim)', '(0)'], {}), '((queries.shape[0], plus_dim), 0)\n', (47034, 47067), True, 'import numpy as np\n'), ((47127, 47173), 'numpy.full', 'np.full', (['(train_dataset.shape[0], plus_dim)', '(0)'], {}), '((train_dataset.shape[0], plus_dim), 0)\n', (47134, 47173), True, 'import numpy as np\n'), ((76165, 76205), 'annoy.AnnoyIndex', 'annoy.AnnoyIndex', (['D'], {'metric': 'annoy_metric'}), '(D, metric=annoy_metric)\n', (76181, 76205), False, 'import annoy\n'), ((76212, 76241), 'os.path.isfile', 'os.path.isfile', (['searcher_path'], {}), '(searcher_path)\n', (76226, 76241), False, 'import os\n'), ((82875, 82900), 'numpy.load', 'np.load', (['groundtruth_path'], {}), '(groundtruth_path)\n', (82882, 82900), True, 'import numpy as np\n'), ((4510, 4537), 'numpy.intersect1d', 'np.intersect1d', (['gt_row', 'row'], {}), '(gt_row, row)\n', (4524, 4537), True, 'import numpy as np\n'), ((15638, 15669), 'h5py.File', 'h5py.File', (['split_data_path', '"""w"""'], {}), "(split_data_path, 'w')\n", (15647, 15669), False, 'import h5py\n'), ((22757, 22787), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_float'], {}), '(ctypes.c_float)\n', (22771, 22787), False, 'import ctypes\n'), ((23399, 23426), 'numpy.ctypeslib.as_ctypes', 'np.ctypeslib.as_ctypes', (['row'], {}), '(row)\n', (23421, 23426), True, 'import numpy as np\n'), ((76811, 76851), 'os.makedirs', 'os.makedirs', (['searcher_dir'], {'exist_ok': '(True)'}), '(searcher_dir, exist_ok=True)\n', (76822, 76851), False, 'import os\n'), ((79075, 79086), 'numpy.array', 'np.array', (['n'], {}), '(n)\n', (79083, 79086), True, 'import numpy as np\n'), ((79133, 79144), 'numpy.array', 'np.array', (['d'], {}), '(d)\n', (79141, 79144), True, 'import numpy as np\n'), ((23457, 23487), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_float'], {}), '(ctypes.c_float)\n', (23471, 23487), False, 'import ctypes\n'), ((23924, 23955), 'numpy.argsort', 'np.argsort', (['(-distances)'], {'axis': '(-1)'}), '(-distances, axis=-1)\n', (23934, 23955), True, 'import numpy as np\n'), ((23979, 24007), 'numpy.sort', 'np.sort', (['(-distances)'], {'axis': '(-1)'}), '(-distances, axis=-1)\n', (23986, 24007), True, 'import numpy as np\n'), ((24151, 24178), 'numpy.sort', 'np.sort', (['distances'], {'axis': '(-1)'}), '(distances, axis=-1)\n', (24158, 24178), True, 'import numpy as np\n'), ((40219, 40247), 'os.path.isdir', 'os.path.isdir', (['searcher_path'], {}), '(searcher_path)\n', (40232, 40247), False, 'import os\n'), ((44988, 45015), 'numpy.array', 'np.array', (['n'], {'dtype': 'np.int32'}), '(n, dtype=np.int32)\n', (44996, 45015), True, 'import numpy as np\n'), ((45063, 45092), 'numpy.array', 'np.array', (['d'], {'dtype': 'np.float32'}), '(d, dtype=np.float32)\n', (45071, 45092), True, 'import numpy as np\n'), ((70068, 70092), 'math.log', 'math.log', (['args.k_star', '(2)'], {}), '(args.k_star, 2)\n', (70076, 70092), False, 'import math\n'), ((71563, 71665), 'runfaiss.check_cached', 'check_cached', (['searcher_dir', 'args', 'args.dataset', 'split', 'args.num_split', 'index_key_manual', 'log2kstar'], {}), '(searcher_dir, args, args.dataset, split, args.num_split,\n index_key_manual, log2kstar)\n', (71575, 71665), False, 'from runfaiss import build_faiss, faiss_search, check_cached, faiss_search_flat\n'), ((73544, 73571), 'numpy.array', 'np.array', (['n'], {'dtype': 'np.int32'}), '(n, dtype=np.int32)\n', (73552, 73571), True, 'import numpy as np\n'), ((73619, 73648), 'numpy.array', 'np.array', (['d'], {'dtype': 'np.float32'}), '(d, dtype=np.float32)\n', (73627, 73648), True, 'import numpy as np\n'), ((77242, 77264), 'multiprocessing.pool.ThreadPool', 'ThreadPool', (['args.batch'], {}), '(args.batch)\n', (77252, 77264), False, 'from multiprocessing.pool import ThreadPool\n'), ((77278, 77289), 'time.time', 'time.time', ([], {}), '()\n', (77287, 77289), False, 'import time\n'), ((77430, 77441), 'time.time', 'time.time', ([], {}), '()\n', (77439, 77441), False, 'import time\n'), ((77452, 77476), 'numpy.empty', 'np.empty', (['(0, args.topk)'], {}), '((0, args.topk))\n', (77460, 77476), True, 'import numpy as np\n'), ((77487, 77511), 'numpy.empty', 'np.empty', (['(0, args.topk)'], {}), '((0, args.topk))\n', (77495, 77511), True, 'import numpy as np\n'), ((24098, 24128), 'numpy.argsort', 'np.argsort', (['distances'], {'axis': '(-1)'}), '(distances, axis=-1)\n', (24108, 24128), True, 'import numpy as np\n'), ((39665, 39692), 'os.path.isfile', 'os.path.isfile', (['coarse_path'], {}), '(coarse_path)\n', (39679, 39692), False, 'import os\n'), ((39729, 39754), 'os.path.isfile', 'os.path.isfile', (['fine_path'], {}), '(fine_path)\n', (39743, 39754), False, 'import os\n'), ((40319, 40416), 'scann.scann_ops_pybind.load_searcher', 'scann.scann_ops_pybind.load_searcher', (['searcher_path', 'num_per_split', 'D', 'coarse_path', 'fine_path'], {}), '(searcher_path, num_per_split, D,\n coarse_path, fine_path)\n', (40355, 40416), False, 'import scann\n'), ((41822, 41863), 'os.makedirs', 'os.makedirs', (['searcher_path'], {'exist_ok': '(True)'}), '(searcher_path, exist_ok=True)\n', (41833, 41863), False, 'import os\n'), ((42558, 42569), 'time.time', 'time.time', ([], {}), '()\n', (42567, 42569), False, 'import time\n'), ((42789, 42800), 'time.time', 'time.time', ([], {}), '()\n', (42798, 42800), False, 'import time\n'), ((72973, 73031), 'runfaiss.faiss_search', 'faiss_search', (['index', 'preproc', 'args', 'reorder', 'w', 'args.csize'], {}), '(index, preproc, args, reorder, w, args.csize)\n', (72985, 73031), False, 'from runfaiss import build_faiss, faiss_search, check_cached, faiss_search_flat\n'), ((73090, 73158), 'runfaiss.faiss_search_flat', 'faiss_search_flat', (['D', 'dataset', 'queries', 'args', 'reorder', 'w', 'args.csize'], {}), '(D, dataset, queries, args, reorder, w, args.csize)\n', (73107, 73158), False, 'from runfaiss import build_faiss, faiss_search, check_cached, faiss_search_flat\n'), ((78144, 78155), 'time.time', 'time.time', ([], {}), '()\n', (78153, 78155), False, 'import time\n'), ((78970, 78999), 'numpy.vstack', 'np.vstack', (['[d for n, d in nd]'], {}), '([d for n, d in nd])\n', (78979, 78999), True, 'import numpy as np\n'), ((40556, 40583), 'os.path.isfile', 'os.path.isfile', (['coarse_path'], {}), '(coarse_path)\n', (40570, 40583), False, 'import os\n'), ((40621, 40646), 'os.path.isfile', 'os.path.isfile', (['fine_path'], {}), '(fine_path)\n', (40635, 40646), False, 'import os\n'), ((78917, 78946), 'numpy.vstack', 'np.vstack', (['[n for n, d in nd]'], {}), '([n for n, d in nd])\n', (78926, 78946), True, 'import numpy as np\n'), ((43015, 43026), 'time.time', 'time.time', ([], {}), '()\n', (43024, 43026), False, 'import time\n'), ((43250, 43261), 'time.time', 'time.time', ([], {}), '()\n', (43259, 43261), False, 'import time\n'), ((44001, 44030), 'numpy.vstack', 'np.vstack', (['[d for n, d in nd]'], {}), '([d for n, d in nd])\n', (44010, 44030), True, 'import numpy as np\n'), ((44123, 44134), 'time.time', 'time.time', ([], {}), '()\n', (44132, 44134), False, 'import time\n'), ((44881, 44910), 'numpy.vstack', 'np.vstack', (['[d for n, d in nd]'], {}), '([d for n, d in nd])\n', (44890, 44910), True, 'import numpy as np\n'), ((78619, 78630), 'time.time', 'time.time', ([], {}), '()\n', (78628, 78630), False, 'import time\n'), ((40058, 40176), 'scann.scann_ops_pybind.builder', 'scann.scann_ops_pybind.builder', (['dataset', 'train_dataset', 'load_coarse', 'coarse_path', 'load_fine', 'fine_path', '(10)', 'metric'], {}), '(dataset, train_dataset, load_coarse,\n coarse_path, load_fine, fine_path, 10, metric)\n', (40088, 40176), False, 'import scann\n'), ((43946, 43975), 'numpy.vstack', 'np.vstack', (['[n for n, d in nd]'], {}), '([n for n, d in nd])\n', (43955, 43975), True, 'import numpy as np\n'), ((44826, 44855), 'numpy.vstack', 'np.vstack', (['[n for n, d in nd]'], {}), '([n for n, d in nd])\n', (44835, 44855), True, 'import numpy as np\n'), ((78435, 78455), 'numpy.full', 'np.full', (['plus_dim', 'N'], {}), '(plus_dim, N)\n', (78442, 78455), True, 'import numpy as np\n'), ((78525, 78593), 'numpy.full', 'np.full', (['plus_dim', "(math.inf if metric == 'squared_l2' else -math.inf)"], {}), "(plus_dim, math.inf if metric == 'squared_l2' else -math.inf)\n", (78532, 78593), True, 'import numpy as np\n'), ((78779, 78824), 'numpy.array', 'np.array', (['[time for time, _ in local_results]'], {}), '([time for time, _ in local_results])\n', (78787, 78824), True, 'import numpy as np\n'), ((44460, 44471), 'time.time', 'time.time', ([], {}), '()\n', (44469, 44471), False, 'import time\n'), ((77631, 77660), 'numpy.array', 'np.array', (['(nn + [N] * plus_dim)'], {}), '(nn + [N] * plus_dim)\n', (77639, 77660), True, 'import numpy as np\n'), ((77714, 77791), 'numpy.array', 'np.array', (["(dd + [math.inf if metric == 'squared_l2' else -math.inf] * plus_dim)"], {}), "(dd + [math.inf if metric == 'squared_l2' else -math.inf] * plus_dim)\n", (77722, 77791), True, 'import numpy as np\n'), ((77855, 77867), 'numpy.array', 'np.array', (['nn'], {}), '(nn)\n', (77863, 77867), True, 'import numpy as np\n'), ((77925, 77937), 'numpy.array', 'np.array', (['dd'], {}), '(dd)\n', (77933, 77937), True, 'import numpy as np\n'), ((43810, 43855), 'numpy.array', 'np.array', (['[time for time, _ in local_results]'], {}), '([time for time, _ in local_results])\n', (43818, 43855), True, 'import numpy as np\n'), ((44684, 44729), 'numpy.array', 'np.array', (['[time for time, _ in local_results]'], {}), '([time for time, _ in local_results])\n', (44692, 44729), True, 'import numpy as np\n'), ((41407, 41525), 'scann.scann_ops_pybind.builder', 'scann.scann_ops_pybind.builder', (['dataset', 'train_dataset', 'load_coarse', 'coarse_path', 'load_fine', 'fine_path', '(10)', 'metric'], {}), '(dataset, train_dataset, load_coarse,\n coarse_path, load_fine, fine_path, 10, metric)\n', (41437, 41525), False, 'import scann\n'), ((40998, 41116), 'scann.scann_ops_pybind.builder', 'scann.scann_ops_pybind.builder', (['dataset', 'train_dataset', 'load_coarse', 'coarse_path', 'load_fine', 'fine_path', '(10)', 'metric'], {}), '(dataset, train_dataset, load_coarse,\n coarse_path, load_fine, fine_path, 10, metric)\n', (41028, 41116), False, 'import scann\n'), ((84587, 84645), 'h5py.File', 'h5py.File', (["(dataset_basedir + 'glove-100-angular.hdf5')", '"""r"""'], {}), "(dataset_basedir + 'glove-100-angular.hdf5', 'r')\n", (84596, 84645), False, 'import h5py\n'), ((14501, 14521), 'h5py.File', 'h5py.File', (['file', '"""r"""'], {}), "(file, 'r')\n", (14510, 14521), False, 'import h5py\n'), ((14535, 14578), 'numpy.array', 'np.array', (["dataset['train']"], {'dtype': '"""float32"""'}), "(dataset['train'], dtype='float32')\n", (14543, 14578), True, 'import numpy as np\n'), ((14816, 14844), 'h5py.File', 'h5py.File', (['dataset_path', '"""r"""'], {}), "(dataset_path, 'r')\n", (14825, 14844), False, 'import h5py\n'), ((14855, 14900), 'numpy.array', 'np.array', (["dataset['dataset']"], {'dtype': '"""float32"""'}), "(dataset['dataset'], dtype='float32')\n", (14863, 14900), True, 'import numpy as np\n'), ((14639, 14670), 'numpy.linalg.norm', 'np.linalg.norm', (['dataset'], {'axis': '(1)'}), '(dataset, axis=1)\n', (14653, 14670), True, 'import numpy as np\n')] |
__author__ = '<NAME>'
"""
graph_builder is used by negative_samples_generator.py to get what is needed to build the negative samples.
"""
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import corpus2graph.util as util
import corpus2graph.multi_processing
class NoGraph:
def __init__(self, encoded_edges_count_file_path, valid_vocabulary_path):
"""
Theoretically valid_vocabulary file is not necessary. We could build a graph_index2wordId dict by going through
encoded_edges_count_file_path and getting all wordIds. But it's not efficient.
valid_wordId's order is really important and should be static, because:
1. graph_index2wordId is built on this
2. graph_wordId2index (temp var), on which cooccurrence_matrix element's order is based, is built on this.
3. graph_index2wordId represents cooccurrence_matrix element's order
"""
self.name_prefix = corpus2graph.multi_processing.get_file_name(encoded_edges_count_file_path).split('.')[0]
valid_wordId = list(set(util.read_valid_vocabulary(valid_vocabulary_path))) # make sure no duplication
# ATTENTION: graph_index2wordId should be a list of which the index order is from 0 to vocab_size-1
# TODO LATER No need to make graph_index2wordId an int list. Find where graph_index2wordId is needed and changed them.
self.graph_index2wordId = list(map(int, valid_wordId))
vocab_size = len(valid_wordId)
# ATTENTION: the index is of the type int, while the wordId is of the type str
graph_wordId2index = dict(zip(valid_wordId, range(vocab_size)))
# initialize numpy 2d array
cooccurrence_matrix = np.zeros((vocab_size, vocab_size))
# read encoded_edges_count_file
for line in corpus2graph.util.read_file_line_yielder(encoded_edges_count_file_path):
# ATTENTION: line e.g. '17' '57' '10' or '57' '17' '10' (only one of them will appear in the file.)
(source, target, weight) = line.split("\t")
cooccurrence_matrix[graph_wordId2index[source]][graph_wordId2index[target]] = weight
# undirected graph
cooccurrence_matrix[graph_wordId2index[target]][graph_wordId2index[source]] = weight
self.cooccurrence_matrix = cooccurrence_matrix
def get_stochastic_matrix(self, remove_self_loops, power=None):
"""
A replacement of get_stochastic_matrix function NXGraph class.
"""
vocab_size = self.cooccurrence_matrix.shape[0]
stochastic_matrix = self.cooccurrence_matrix.copy()
# power co-occurrence if needed.
if power:
stochastic_matrix = np.power(stochastic_matrix, power)
# remove self loop
if remove_self_loops:
for i in range(vocab_size):
stochastic_matrix[i][i] = 0
# calculate percentage
matrix_sum_row = np.sum(stochastic_matrix, axis=1, keepdims=True) # sum of each row and preserve the dimension
stochastic_matrix /= matrix_sum_row
return stochastic_matrix
def one_to_t_step_random_walk_stochastic_matrix_yielder(self, t, remove_self_loops):
"""
Instead of getting a specific t step random walk result, this method gets a dict of result from 1 step random
walk to t step random walk. This method should be used for grid search.
"""
transition_matrix = self.get_stochastic_matrix(remove_self_loops)
result = transition_matrix
for t in range(1, t+1):
if t != 1:
result = np.matmul(result, transition_matrix)
yield result, t
def get_t_step_random_walk_stochastic_matrix(self, t, remove_self_loops, output_folder=None):
# TODO NOW not the same result from 1 step random walk
transition_matrix = self.get_stochastic_matrix(remove_self_loops)
result = transition_matrix
while t > 1:
result = np.matmul(result, transition_matrix)
t -= 1
if output_folder:
file_prefix = output_folder + self.name_prefix + '_' + str(t)
np.save(file_prefix + '_step_rw_matrix.npy', result, fix_imports=False)
corpus2graph.util.write_to_pickle(self.graph_index2wordId, file_prefix + '_step_rw_nodes.pickle')
return self.graph_index2wordId, result
class NXGraph:
def __init__(self, graph, name_prefix, directed):
# name_prefix = encoded_edges_count file's name - '.txt' => encoded_edges_count file names must be unique.
self.name_prefix = name_prefix
self.graph = graph
self.directed = directed
@classmethod
def from_gpickle(cls, path):
name_prefix = corpus2graph.multi_processing.get_file_name(path).split('.')[0]
graph = nx.read_gpickle(path)
return cls(graph, name_prefix, nx.is_directed(graph))
@classmethod
def from_encoded_edges_count_file(cls, path, directed):
name_prefix = corpus2graph.multi_processing.get_file_name(path).split('.')[0]
if directed:
graph = nx.read_weighted_edgelist(path, create_using=nx.DiGraph(), nodetype=int)
else:
graph = nx.read_weighted_edgelist(path, create_using=nx.Graph(), nodetype=int)
# nx.write_gpickle(graph, output_folder + name_prefix + '.gpickle')
return cls(graph, name_prefix, directed)
def draw_graph(self):
"""
Takes too much time with big data.
"""
nx.draw(self.graph, with_labels=True)
plt.show()
def print_graph_information(self):
print('\n###################### Graph Information ######################')
number_of_edges = self.graph.number_of_edges()
number_of_selfloops = nx.number_of_selfloops(self.graph)
number_of_nodes = self.graph.number_of_nodes()
if nx.is_directed(self.graph):
print('The graph is directed.')
connected_edges_proportion = round(
(number_of_edges - number_of_selfloops) / (number_of_nodes * (number_of_nodes - 1)) * 100, 2)
else:
print('The graph is undirected.')
connected_edges_proportion = round(
(number_of_edges - number_of_selfloops) / ((number_of_nodes * (number_of_nodes - 1)) / 2) * 100, 2)
print("#nodes:", number_of_nodes, "#edges:", number_of_edges, "#selfloops:", number_of_selfloops)
print(str(connected_edges_proportion) + '% of the node pairs are connected via edges.')
# TODO LATER: Code below takes long time to calculate for big graphs.
# print('Average shortest path length (weight=None):', str(round(nx.average_shortest_path_length(self.graph), 2)))
# TODO LATER: average_clustering has not implemented for undirected graph yet.
if not nx.is_directed(self.graph):
# For unweighted graphs, the clustering of a node
# is the fraction of possible triangles through that node that exist
print('The clustering coefficient for the graph is ' + str(
round(nx.average_clustering(self.graph, weight=None), 2)))
print('###############################################################\n')
def get_stochastic_matrix(self, remove_self_loops):
# ATTENTION: for a big graph, this method consumes too much memory and calculation time.
# ATTENTION: it's really important to copy copy graph. If not, in remove_self_loops=True situation, self-loops
# will be permanently deleted.
graph_copy = self.graph.copy()
if remove_self_loops:
graph_copy.remove_edges_from(list(nx.selfloop_edges(graph_copy))) # remove self loop
if self.directed:
directed_graph = graph_copy
else:
directed_graph = graph_copy.to_directed()
# this function only works with directed graph
stochastic_graph = nx.stochastic_graph(directed_graph, weight='weight')
return nx.to_numpy_matrix(stochastic_graph)
def get_t_step_random_walk_stochastic_matrix(self, t, remove_self_loops, output_folder=None):
transition_matrix = self.get_stochastic_matrix(remove_self_loops=remove_self_loops)
result = transition_matrix
while t > 1:
result = np.matmul(result, transition_matrix)
t -= 1
if output_folder:
file_prefix = output_folder + self.name_prefix + '_' + str(t)
np.save(file_prefix + '_step_rw_matrix.npy', result, fix_imports=False)
corpus2graph.util.write_to_pickle(self.graph.nodes(), file_prefix + '_step_rw_nodes.pickle')
return self.graph.nodes(), result
def one_to_t_step_random_walk_stochastic_matrix_yielder(self, t, remove_self_loops):
"""
Instead of getting a specific t step random walk result, this method gets a dict of result from 1 step random
walk to t step random walk. This method should be used for grid search.
"""
transition_matrix = self.get_stochastic_matrix(remove_self_loops)
result = transition_matrix
for t in range(1, t+1):
if t != 1:
result = np.matmul(result, transition_matrix)
yield result, t
# def get_shortest_path_lengths_between_all_nodes(self, output_folder):
# """
# From test, these three algorithms below take more than 20 hours (processes have been killed after 20 hours) to
# calculate.
# 'floyd_warshall_numpy' takes around 100 minutes to get the result.
#
# # length1 = dict(nx.all_pairs_dijkstra_path_length(g))
# # length2 = dict(nx.all_pairs_bellman_ford_path_length(g))
# # length3 = nx.johnson(g, weight='weight')
# # for node in [0, 1, 2, 3, 4]:
# # print('1 - {}: {}'.format(node, length2[1][node]))
# """
# """ ATTENTION
# 'floyd_warshall_numpy' has already considered situations below:
# 1. If there's no path between source and target node, matrix will put 'inf'
# 2. No matter how much the weight is between node and node itself(self loop), the shortest path will always be 0.
# """
# matrix = nx.floyd_warshall_numpy(self.graph) # ATTENTION: return type is NumPy matrix not NumPy ndarray.
# # ATTENTION: after saving, NumPy matrix has been changed to 2darray.
# np.save(output_folder + self.name_prefix + '_matrix.npy', matrix, fix_imports=False)
# corpus2graph.util.write_to_pickle(self.graph.nodes(), output_folder + self.name_prefix + '_nodes.pickle')
# return self.graph.nodes(), matrix
| [
"networkx.stochastic_graph",
"networkx.number_of_selfloops",
"networkx.is_directed",
"numpy.power",
"networkx.selfloop_edges",
"networkx.DiGraph",
"corpus2graph.util.read_valid_vocabulary",
"networkx.Graph",
"networkx.average_clustering",
"numpy.sum",
"numpy.zeros",
"numpy.matmul",
"networkx... | [((1731, 1765), 'numpy.zeros', 'np.zeros', (['(vocab_size, vocab_size)'], {}), '((vocab_size, vocab_size))\n', (1739, 1765), True, 'import numpy as np\n'), ((2954, 3002), 'numpy.sum', 'np.sum', (['stochastic_matrix'], {'axis': '(1)', 'keepdims': '(True)'}), '(stochastic_matrix, axis=1, keepdims=True)\n', (2960, 3002), True, 'import numpy as np\n'), ((4840, 4861), 'networkx.read_gpickle', 'nx.read_gpickle', (['path'], {}), '(path)\n', (4855, 4861), True, 'import networkx as nx\n'), ((5534, 5571), 'networkx.draw', 'nx.draw', (['self.graph'], {'with_labels': '(True)'}), '(self.graph, with_labels=True)\n', (5541, 5571), True, 'import networkx as nx\n'), ((5580, 5590), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5588, 5590), True, 'import matplotlib.pyplot as plt\n'), ((5799, 5833), 'networkx.number_of_selfloops', 'nx.number_of_selfloops', (['self.graph'], {}), '(self.graph)\n', (5821, 5833), True, 'import networkx as nx\n'), ((5900, 5926), 'networkx.is_directed', 'nx.is_directed', (['self.graph'], {}), '(self.graph)\n', (5914, 5926), True, 'import networkx as nx\n'), ((7956, 8008), 'networkx.stochastic_graph', 'nx.stochastic_graph', (['directed_graph'], {'weight': '"""weight"""'}), "(directed_graph, weight='weight')\n", (7975, 8008), True, 'import networkx as nx\n'), ((8024, 8060), 'networkx.to_numpy_matrix', 'nx.to_numpy_matrix', (['stochastic_graph'], {}), '(stochastic_graph)\n', (8042, 8060), True, 'import networkx as nx\n'), ((2722, 2756), 'numpy.power', 'np.power', (['stochastic_matrix', 'power'], {}), '(stochastic_matrix, power)\n', (2730, 2756), True, 'import numpy as np\n'), ((4005, 4041), 'numpy.matmul', 'np.matmul', (['result', 'transition_matrix'], {}), '(result, transition_matrix)\n', (4014, 4041), True, 'import numpy as np\n'), ((4173, 4244), 'numpy.save', 'np.save', (["(file_prefix + '_step_rw_matrix.npy')", 'result'], {'fix_imports': '(False)'}), "(file_prefix + '_step_rw_matrix.npy', result, fix_imports=False)\n", (4180, 4244), True, 'import numpy as np\n'), ((4901, 4922), 'networkx.is_directed', 'nx.is_directed', (['graph'], {}), '(graph)\n', (4915, 4922), True, 'import networkx as nx\n'), ((6860, 6886), 'networkx.is_directed', 'nx.is_directed', (['self.graph'], {}), '(self.graph)\n', (6874, 6886), True, 'import networkx as nx\n'), ((8329, 8365), 'numpy.matmul', 'np.matmul', (['result', 'transition_matrix'], {}), '(result, transition_matrix)\n', (8338, 8365), True, 'import numpy as np\n'), ((8497, 8568), 'numpy.save', 'np.save', (["(file_prefix + '_step_rw_matrix.npy')", 'result'], {'fix_imports': '(False)'}), "(file_prefix + '_step_rw_matrix.npy', result, fix_imports=False)\n", (8504, 8568), True, 'import numpy as np\n'), ((1089, 1138), 'corpus2graph.util.read_valid_vocabulary', 'util.read_valid_vocabulary', (['valid_vocabulary_path'], {}), '(valid_vocabulary_path)\n', (1115, 1138), True, 'import corpus2graph.util as util\n'), ((3627, 3663), 'numpy.matmul', 'np.matmul', (['result', 'transition_matrix'], {}), '(result, transition_matrix)\n', (3636, 3663), True, 'import numpy as np\n'), ((9217, 9253), 'numpy.matmul', 'np.matmul', (['result', 'transition_matrix'], {}), '(result, transition_matrix)\n', (9226, 9253), True, 'import numpy as np\n'), ((5174, 5186), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (5184, 5186), True, 'import networkx as nx\n'), ((5281, 5291), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (5289, 5291), True, 'import networkx as nx\n'), ((7688, 7717), 'networkx.selfloop_edges', 'nx.selfloop_edges', (['graph_copy'], {}), '(graph_copy)\n', (7705, 7717), True, 'import networkx as nx\n'), ((7125, 7171), 'networkx.average_clustering', 'nx.average_clustering', (['self.graph'], {'weight': 'None'}), '(self.graph, weight=None)\n', (7146, 7171), True, 'import networkx as nx\n')] |
"""
==========================
Yet another Sankey diagram
==========================
This example showcases a more complex sankey diagram.
"""
from __future__ import print_function
__author__ = "<NAME> <<EMAIL>>"
__version__ = "Time-stamp: <10/02/2010 16:49 <EMAIL>>"
import numpy as np
def sankey(ax,
outputs=[100.], outlabels=None,
inputs=[100.], inlabels='',
dx=40, dy=10, outangle=45, w=3, inangle=30, offset=2, **kwargs):
"""Draw a Sankey diagram.
outputs: array of outputs, should sum up to 100%
outlabels: output labels (same length as outputs),
or None (use default labels) or '' (no labels)
inputs and inlabels: similar for inputs
dx: horizontal elongation
dy: vertical elongation
outangle: output arrow angle [deg]
w: output arrow shoulder
inangle: input dip angle
offset: text offset
**kwargs: propagated to Patch (e.g., fill=False)
Return (patch,[intexts,outtexts]).
"""
import matplotlib.patches as mpatches
from matplotlib.path import Path
outs = np.absolute(outputs)
outsigns = np.sign(outputs)
outsigns[-1] = 0 # Last output
ins = np.absolute(inputs)
insigns = np.sign(inputs)
insigns[0] = 0 # First input
assert sum(outs) == 100, "Outputs don't sum up to 100%"
assert sum(ins) == 100, "Inputs don't sum up to 100%"
def add_output(path, loss, sign=1):
# Arrow tip height
h = (loss/2 + w) * np.tan(np.radians(outangle))
move, (x, y) = path[-1] # Use last point as reference
if sign == 0: # Final loss (horizontal)
path.extend([(Path.LINETO, [x + dx, y]),
(Path.LINETO, [x + dx, y + w]),
(Path.LINETO, [x + dx + h, y - loss/2]), # Tip
(Path.LINETO, [x + dx, y - loss - w]),
(Path.LINETO, [x + dx, y - loss])])
outtips.append((sign, path[-3][1]))
else: # Intermediate loss (vertical)
path.extend([(Path.CURVE4, [x + dx/2, y]),
(Path.CURVE4, [x + dx, y]),
(Path.CURVE4, [x + dx, y + sign*dy]),
(Path.LINETO, [x + dx - w, y + sign*dy]),
# Tip
(Path.LINETO, [
x + dx + loss/2, y + sign*(dy + h)]),
(Path.LINETO, [x + dx + loss + w, y + sign*dy]),
(Path.LINETO, [x + dx + loss, y + sign*dy]),
(Path.CURVE3, [x + dx + loss, y - sign*loss]),
(Path.CURVE3, [x + dx/2 + loss, y - sign*loss])])
outtips.append((sign, path[-5][1]))
def add_input(path, gain, sign=1):
h = (gain / 2) * np.tan(np.radians(inangle)) # Dip depth
move, (x, y) = path[-1] # Use last point as reference
if sign == 0: # First gain (horizontal)
path.extend([(Path.LINETO, [x - dx, y]),
(Path.LINETO, [x - dx + h, y + gain/2]), # Dip
(Path.LINETO, [x - dx, y + gain])])
xd, yd = path[-2][1] # Dip position
indips.append((sign, [xd - h, yd]))
else: # Intermediate gain (vertical)
path.extend([(Path.CURVE4, [x - dx/2, y]),
(Path.CURVE4, [x - dx, y]),
(Path.CURVE4, [x - dx, y + sign*dy]),
# Dip
(Path.LINETO, [
x - dx - gain / 2, y + sign*(dy - h)]),
(Path.LINETO, [x - dx - gain, y + sign*dy]),
(Path.CURVE3, [x - dx - gain, y - sign*gain]),
(Path.CURVE3, [x - dx/2 - gain, y - sign*gain])])
xd, yd = path[-4][1] # Dip position
indips.append((sign, [xd, yd + sign*h]))
outtips = [] # Output arrow tip dir. and positions
urpath = [(Path.MOVETO, [0, 100])] # 1st point of upper right path
lrpath = [(Path.LINETO, [0, 0])] # 1st point of lower right path
for loss, sign in zip(outs, outsigns):
add_output(sign >= 0 and urpath or lrpath, loss, sign=sign)
indips = [] # Input arrow tip dir. and positions
llpath = [(Path.LINETO, [0, 0])] # 1st point of lower left path
ulpath = [(Path.MOVETO, [0, 100])] # 1st point of upper left path
for gain, sign in reversed(list(zip(ins, insigns))):
add_input(sign <= 0 and llpath or ulpath, gain, sign=sign)
def revert(path):
"""A path is not just revertable by path[::-1] because of Bezier
curves."""
rpath = []
nextmove = Path.LINETO
for move, pos in path[::-1]:
rpath.append((nextmove, pos))
nextmove = move
return rpath
# Concatenate subpathes in correct order
path = urpath + revert(lrpath) + llpath + revert(ulpath)
codes, verts = zip(*path)
verts = np.array(verts)
# Path patch
path = Path(verts, codes)
patch = mpatches.PathPatch(path, **kwargs)
ax.add_patch(patch)
if False: # DEBUG
print("urpath", urpath)
print("lrpath", revert(lrpath))
print("llpath", llpath)
print("ulpath", revert(ulpath))
xs, ys = zip(*verts)
ax.plot(xs, ys, 'go-')
# Labels
def set_labels(labels, values):
"""Set or check labels according to values."""
if labels == '': # No labels
return labels
elif labels is None: # Default labels
return ['%2d%%' % val for val in values]
else:
assert len(labels) == len(values)
return labels
def put_labels(labels, positions, output=True):
"""Put labels to positions."""
texts = []
lbls = output and labels or labels[::-1]
for i, label in enumerate(lbls):
s, (x, y) = positions[i] # Label direction and position
if s == 0:
t = ax.text(x + offset, y, label,
ha=output and 'left' or 'right', va='center')
elif s > 0:
t = ax.text(x, y + offset, label, ha='center', va='bottom')
else:
t = ax.text(x, y - offset, label, ha='center', va='top')
texts.append(t)
return texts
outlabels = set_labels(outlabels, outs)
outtexts = put_labels(outlabels, outtips, output=True)
inlabels = set_labels(inlabels, ins)
intexts = put_labels(inlabels, indips, output=False)
# Axes management
ax.set_xlim(verts[:, 0].min() - dx, verts[:, 0].max() + dx)
ax.set_ylim(verts[:, 1].min() - dy, verts[:, 1].max() + dy)
ax.set_aspect('equal', adjustable='datalim')
return patch, [intexts, outtexts]
if __name__ == '__main__':
import matplotlib.pyplot as plt
outputs = [10., -20., 5., 15., -10., 40.]
outlabels = ['First', 'Second', 'Third', 'Fourth', 'Fifth', 'Hurray!']
outlabels = [s + '\n%d%%' % abs(l) for l, s in zip(outputs, outlabels)]
inputs = [60., -25., 15.]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[], title="Sankey diagram")
patch, (intexts, outtexts) = sankey(ax, outputs=outputs,
outlabels=outlabels, inputs=inputs,
inlabels=None)
outtexts[1].set_color('r')
outtexts[-1].set_fontweight('bold')
plt.show()
| [
"numpy.radians",
"matplotlib.path.Path",
"numpy.absolute",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.patches.PathPatch",
"numpy.sign",
"matplotlib.pyplot.show"
] | [((1071, 1091), 'numpy.absolute', 'np.absolute', (['outputs'], {}), '(outputs)\n', (1082, 1091), True, 'import numpy as np\n'), ((1107, 1123), 'numpy.sign', 'np.sign', (['outputs'], {}), '(outputs)\n', (1114, 1123), True, 'import numpy as np\n'), ((1171, 1190), 'numpy.absolute', 'np.absolute', (['inputs'], {}), '(inputs)\n', (1182, 1190), True, 'import numpy as np\n'), ((1205, 1220), 'numpy.sign', 'np.sign', (['inputs'], {}), '(inputs)\n', (1212, 1220), True, 'import numpy as np\n'), ((4973, 4988), 'numpy.array', 'np.array', (['verts'], {}), '(verts)\n', (4981, 4988), True, 'import numpy as np\n'), ((5018, 5036), 'matplotlib.path.Path', 'Path', (['verts', 'codes'], {}), '(verts, codes)\n', (5022, 5036), False, 'from matplotlib.path import Path\n'), ((5049, 5083), 'matplotlib.patches.PathPatch', 'mpatches.PathPatch', (['path'], {}), '(path, **kwargs)\n', (5067, 5083), True, 'import matplotlib.patches as mpatches\n'), ((7097, 7109), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7107, 7109), True, 'import matplotlib.pyplot as plt\n'), ((7459, 7469), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7467, 7469), True, 'import matplotlib.pyplot as plt\n'), ((1476, 1496), 'numpy.radians', 'np.radians', (['outangle'], {}), '(outangle)\n', (1486, 1496), True, 'import numpy as np\n'), ((2797, 2816), 'numpy.radians', 'np.radians', (['inangle'], {}), '(inangle)\n', (2807, 2816), True, 'import numpy as np\n')] |
import pickle
import keras
import uuid
#pytorch
import torch as t
import torch.nn as nn
from torch.autograd import Variable
import torch.utils.data as Data
import torchvision
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import msgpack
import random
import codecs
import numpy as np
import json
import msgpack_numpy
# https://github.com/lebedov/msgpack-numpy
import sys
import time
from flask import *
from flask_socketio import SocketIO
from flask_socketio import *
# https://flask-socketio.readthedocs.io/en/latest/
class GlobalModel(object):
"""docstring for GlobalModel"""
def __init__(self):
self.model = self.build_model()
self.current_weights = self.model.get_weights()
# for convergence check
self.prev_train_loss = None
# all rounds; losses[i] = [round#, timestamp, loss]
# round# could be None if not applicable
self.train_losses = []
self.valid_losses = []
self.train_accuracies = []
self.valid_accuracies = []
self.training_start_time = int(round(time.time()))
print("GlobalModel __init__")
def build_model(self):
print("GlobalModel build_model")
raise NotImplementedError()
# client_updates = [(w, n)..]
def update_weights(self, client_weights, client_sizes, current_round):
print("1114 update_weights")
new_weights = [np.zeros(w.shape) for w in self.current_weights]
total_size = np.sum(client_sizes)
# total_size is not number
# liuying change selection
# 1: continue mode
# 2: condidate the error weights
for c in range(len(client_weights)):
if (c == (current_round % 5)):
print("c=", c, "round =", current_round)
for i in range(len(new_weights)):
if (isinstance(client_weights[c][i], unicode)):
if (not isinstance(client_weights[c - 1][i], unicode)):
new_weights[i] = client_weights[c - 1][i]
else:
if (not isinstance(client_weights[c + 1][i], unicode)):
new_weights[i] = client_weights[c + 1][i]
else:
new_weights[i] = client_weights[c - 2][i]
else:
# new_weights[i] += client_weights[c][i] * client_sizes[c] / total_size
new_weights[i] = client_weights[c][i]
self.current_weights = new_weights
print("GlobalModel update_weights")
# liuying if use break mode then should change the total_size
# for c in range(len(client_weights)):
# for i in range(len(new_weights)):
# if(isinstance(client_weights[c][i],unicode)):
# total_size -= client_sizes[c]
# break;
# for c in range(len(client_weights)):
# #if(c==current_round):
# if(1):
# print("c=",c,"round =",current_round)
# for i in range(len(new_weights)):
# if(isinstance(client_weights[c][i],unicode)):
# print("continue")
# continue
# else:
# new_weights[i] += client_weights[c][i] * client_sizes[c] / total_size
# self.current_weights = new_weights
# print("GlobalModel update_weights")
def aggregate_loss_accuracy(self, client_losses, client_accuracies, client_sizes):
total_size = np.sum(client_sizes)
# weighted sum
aggr_loss = np.sum(client_losses[i] / total_size * client_sizes[i]
for i in range(len(client_sizes)))
aggr_accuraries = np.sum(client_accuracies[i] / total_size * client_sizes[i]
for i in range(len(client_sizes)))
print("GlobalModel aggregate_loss_accuracy")
return aggr_loss, aggr_accuraries
# cur_round coule be None
def aggregate_train_loss_accuracy(self, client_losses, client_accuracies, client_sizes, cur_round):
cur_time = int(round(time.time())) - self.training_start_time
aggr_loss, aggr_accuraries = self.aggregate_loss_accuracy(client_losses, client_accuracies, client_sizes)
self.train_losses += [[cur_round, cur_time, aggr_loss]]
self.train_accuracies += [[cur_round, cur_time, aggr_accuraries]]
with open('stats.txt', 'w') as outfile:
json.dump(self.get_stats(), outfile)
print("GlobalModel aggregate_train_loss_accuracy")
return aggr_loss, aggr_accuraries
# cur_round coule be None
def aggregate_valid_loss_accuracy(self, client_losses, client_accuracies, client_sizes, cur_round):
cur_time = int(round(time.time())) - self.training_start_time
aggr_loss, aggr_accuraries = self.aggregate_loss_accuracy(client_losses, client_accuracies, client_sizes)
self.valid_losses += [[cur_round, cur_time, aggr_loss]]
self.valid_accuracies += [[cur_round, cur_time, aggr_accuraries]]
with open('stats.txt', 'w') as outfile:
json.dump(self.get_stats(), outfile)
print("GlobalModel aggregate_valid_loss_accuracy")
return aggr_loss, aggr_accuraries
def get_stats(self):
print("GlobalModel get_stats")
return {
"train_loss": self.train_losses,
"valid_loss": self.valid_losses,
"train_accuracy": self.train_accuracies,
"valid_accuracy": self.valid_accuracies
}
# class GlobalModel_MNIST_CNN(GlobalModel):
class GlobalModel_MNIST_CNN(nn.Module):
def __init__(self):
super(GlobalModel_MNIST_CNN, self).__init__()
print("GlobalModel_MNIST_CNN __init__")
def build_model(self):
# ~5MB worth of parameters
# model = Sequential()
# model.add(Conv2D(32, kernel_size=(3, 3),
# activation='relu',
# input_shape=(28, 28, 1)))
# model.add(Conv2D(64, (3, 3), activation='relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
# model.add(Flatten())
# model.add(Dense(128, activation='relu'))
# model.add(Dropout(0.5))
# model.add(Dense(10, activation='softmax'))
#
# model.compile(loss=keras.losses.categorical_crossentropy,
# optimizer=keras.optimizers.Adadelta(),
# metrics=['accuracy'])
# print("GlobalModel_MNIST_CNN build_model")
# return model
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=2), #(16,28,28)
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(2))
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2))
self.fc = nn.Linear(7 * 7 * 32, 10)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out = out.view(out.size(0), -1) #reshape
out = self.fc(out)
return out
cnn = GlobalModel_MNIST_CNN()
if t.cuda.is_available():
cnn = cnn.cuda()
#选择损失函数和优化方法
loss_func = nn.CrossEntropyLoss()
optimizer = t.option.Adam(cnn.parameters(), lr=0.001)
######## Flask server with Socket IO ########
# Federated Averaging algorithm with the server pulling from clients
class FLServer(object):
MIN_NUM_WORKERS = 6
MAX_NUM_ROUNDS = 20
NUM_CLIENTS_CONTACTED_PER_ROUND = 6
ROUNDS_BETWEEN_VALIDATIONS = 1
def __init__(self, global_model, host, port):
self.global_model = global_model()
self.ready_client_sids = set()
self.app = Flask(__name__)
self.socketio = SocketIO(self.app)
self.host = host
self.port = port
self.model_id = str(uuid.uuid4())
#####
# training states
self.current_round = -1 # -1 for not yet started
self.current_round_client_updates = []
self.eval_client_updates = []
#####
# socket io messages
self.register_handles()
print("FLServer __init__")
@self.app.route('/')
def dashboard():
return render_template('dashboard.html')
@self.app.route('/stats')
def status_page():
return json.dumps(self.global_model.get_stats())
def register_handles(self):
# single-threaded async, no need to lock
print("FLServer register_handles")
@self.socketio.on('connect')
def handle_connect():
print(request.sid, "connected")
@self.socketio.on('reconnect')
def handle_reconnect():
print(request.sid, "reconnected")
@self.socketio.on('disconnect')
def handle_reconnect():
print(request.sid, "disconnected")
if request.sid in self.ready_client_sids:
self.ready_client_sids.remove(request.sid)
@self.socketio.on('client_wake_up')
def handle_wake_up():
print("client wake_up: ", request.sid)
emit('init', {
'model_json': self.global_model.model.to_json(),
'model_id': self.model_id,
'min_train_size': 1200,
'data_split': (0.6, 0.3, 0.1), # train, test, valid
'epoch_per_round': 1,
'batch_size': 10
})
@self.socketio.on('client_ready')
def handle_client_ready(data):
print("client ready for training", request.sid, data)
self.ready_client_sids.add(request.sid)
if len(self.ready_client_sids) >= FLServer.MIN_NUM_WORKERS and self.current_round == -1:
self.train_next_round()
@self.socketio.on('client_update')
def handle_client_update(data):
print("received client update of bytes: ", sys.getsizeof(data))
print("handle client_update", request.sid)
for x in data:
if x != 'weights':
print("error x != 'weights':")
print(x, data[x])
# data:
# weights
# train_size
# valid_size
# train_loss
# train_accuracy
# valid_loss?
# valid_accuracy?
# discard outdated update
# with open('data.txt', 'w') as outfile:
# json.dump(data, outfile)
if data['round_number'] == self.current_round:
self.current_round_client_updates += [data]
self.current_round_client_updates[-1]['weights'] = pickle_string_to_obj(data['weights'])
# tolerate 30% unresponsive clients
if len(self.current_round_client_updates) == FLServer.NUM_CLIENTS_CONTACTED_PER_ROUND * 1:
time.sleep(5)
self.global_model.update_weights(
[x['weights'] for x in self.current_round_client_updates],
[x['train_size'] for x in self.current_round_client_updates],
int(self.current_round)
)
aggr_train_loss, aggr_train_accuracy = self.global_model.aggregate_train_loss_accuracy(
[x['train_loss'] for x in self.current_round_client_updates],
[x['train_accuracy'] for x in self.current_round_client_updates],
[x['train_size'] for x in self.current_round_client_updates],
self.current_round
)
print("aggr_train_loss", aggr_train_loss)
print("aggr_train_accuracy", aggr_train_accuracy)
###error index out of range
if 'valid_loss' in self.current_round_client_updates[0]:
aggr_valid_loss, aggr_valid_accuracy = self.global_model.aggregate_valid_loss_accuracy(
# liuying x['valid_loss'] for x in self.current_round_client_updates
# error list index out of range
# message handler error
[x['valid_loss'] for x in self.current_round_client_updates],
[x['valid_accuracy'] for x in self.current_round_client_updates],
[x['valid_size'] for x in self.current_round_client_updates],
self.current_round
)
print("self.current_round", self.current_round)
print("aggr_valid_loss", aggr_valid_loss)
print("aggr_valid_accuracy", aggr_valid_accuracy)
# stop and eval based on loss
# avoid converge in low accuracy
if self.global_model.prev_train_loss is not None and \
(
self.global_model.prev_train_loss - aggr_train_loss) / self.global_model.prev_train_loss < 0.01 and \
aggr_valid_accuracy > 0.5:
# converges
print("converges! starting test phase..")
self.stop_and_eval()
return
self.global_model.prev_train_loss = aggr_train_loss
if self.current_round >= FLServer.MAX_NUM_ROUNDS or aggr_valid_accuracy > 0.98:
self.stop_and_eval()
else:
self.train_next_round()
@self.socketio.on('client_eval')
def handle_client_eval(data):
if self.eval_client_updates is None:
return
print("handle client_eval", request.sid)
print("eval_resp", data)
# TypeError: unsupported operand type(s) for +=: 'NoneType' and 'list'
self.eval_client_updates += [data]
# tolerate 30% unresponsive clients
if len(self.eval_client_updates) == FLServer.NUM_CLIENTS_CONTACTED_PER_ROUND * 1:
aggr_test_loss, aggr_test_accuracy = self.global_model.aggregate_loss_accuracy(
[x['test_loss'] for x in self.eval_client_updates],
[x['test_accuracy'] for x in self.eval_client_updates],
[x['test_size'] for x in self.eval_client_updates],
);
print("self.current_round", self.current_round)
print("\naggr_test_loss", aggr_test_loss)
print("aggr_test_accuracy", aggr_test_accuracy)
print("== done ==")
self.eval_client_updates = None # special value, forbid evaling again
# Note: we assume that during training the #workers will be >= MIN_NUM_WORKERS
def train_next_round(self):
self.current_round += 1
# buffers all client updates
self.current_round_client_updates = []
print("### Round ", self.current_round, "###")
client_sids_selected = random.sample(list(self.ready_client_sids), FLServer.NUM_CLIENTS_CONTACTED_PER_ROUND)
print("request updates from", client_sids_selected)
# by default each client cnn is in its own "room"
for rid in client_sids_selected:
emit('request_update', {
'model_id': self.model_id,
'round_number': self.current_round,
'current_weights': obj_to_pickle_string(self.global_model.current_weights),
'weights_format': 'pickle',
'run_validation': self.current_round % FLServer.ROUNDS_BETWEEN_VALIDATIONS == 0,
}, room=rid)
print("FLServer train_next_round")
# train next round
def stop_and_eval(self):
self.eval_client_updates = []
for rid in self.ready_client_sids:
emit('stop_and_eval', {
'model_id': self.model_id,
'current_weights': obj_to_pickle_string(self.global_model.current_weights),
'weights_format': 'pickle'
}, room=rid)
print("FLServer stop_and_eval")
def start(self):
self.socketio.run(self.app, host=self.host, port=self.port)
print("FLServer start")
# serializable
def obj_to_pickle_string(x):
print("obj_to_pickle_string")
return codecs.encode(pickle.dumps(x), "base64").decode()
# return msgpack.packb(x, default=msgpack_numpy.encode)
# TODO: compare pickle vs msgpack vs json for serialization; tradeoff: computation vs network IO
# anti-serializable
def pickle_string_to_obj(s):
print("pickle_string_to_obj")
return pickle.loads(codecs.decode(s.encode(), "base64"))
# return msgpack.unpackb(s, object_hook=msgpack_numpy.decode)
if __name__ == '__main__':
# When the application is in debug mode the Werkzeug development server is still used
# and configured properly inside socketio.run(). In production mode the eventlet web server
# is used if available, else the gevent web server is used.
print("fl_server __main__")
server = FLServer(GlobalModel_MNIST_CNN, "172.17.0.2", 1111)
print("listening on 172.17.0.2:1111");
server.start()
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.CrossEntropyLoss",
"pickle.dumps",
"sys.getsizeof",
"flask_socketio.SocketIO",
"torch.nn.Conv2d",
"uuid.uuid4",
"numpy.sum",
"numpy.zeros",
"torch.cuda.is_available",
"torch.nn.MaxPool2d",
"time.sleep",
"torch.nn.Linear",
"time.time"
] | [((7576, 7597), 'torch.cuda.is_available', 't.cuda.is_available', ([], {}), '()\n', (7595, 7597), True, 'import torch as t\n'), ((7646, 7667), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (7665, 7667), True, 'import torch.nn as nn\n'), ((1616, 1636), 'numpy.sum', 'np.sum', (['client_sizes'], {}), '(client_sizes)\n', (1622, 1636), True, 'import numpy as np\n'), ((3820, 3840), 'numpy.sum', 'np.sum', (['client_sizes'], {}), '(client_sizes)\n', (3826, 3840), True, 'import numpy as np\n'), ((7335, 7360), 'torch.nn.Linear', 'nn.Linear', (['(7 * 7 * 32)', '(10)'], {}), '(7 * 7 * 32, 10)\n', (7344, 7360), True, 'import torch.nn as nn\n'), ((8188, 8206), 'flask_socketio.SocketIO', 'SocketIO', (['self.app'], {}), '(self.app)\n', (8196, 8206), False, 'from flask_socketio import SocketIO\n'), ((1546, 1563), 'numpy.zeros', 'np.zeros', (['w.shape'], {}), '(w.shape)\n', (1554, 1563), True, 'import numpy as np\n'), ((6930, 7007), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(16)', 'kernel_size': '(5)', 'stride': '(1)', 'padding': '(2)'}), '(in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=2)\n', (6939, 7007), True, 'import torch.nn as nn\n'), ((7033, 7051), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(16)'], {}), '(16)\n', (7047, 7051), True, 'import torch.nn as nn\n'), ((7065, 7074), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7072, 7074), True, 'import torch.nn as nn\n'), ((7088, 7103), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (7100, 7103), True, 'import torch.nn as nn\n'), ((7153, 7231), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(16)', 'out_channels': '(32)', 'kernel_size': '(5)', 'stride': '(1)', 'padding': '(2)'}), '(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2)\n', (7162, 7231), True, 'import torch.nn as nn\n'), ((7245, 7263), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (7259, 7263), True, 'import torch.nn as nn\n'), ((7277, 7286), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7284, 7286), True, 'import torch.nn as nn\n'), ((7300, 7315), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (7312, 7315), True, 'import torch.nn as nn\n'), ((8286, 8298), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (8296, 8298), False, 'import uuid\n'), ((1217, 1228), 'time.time', 'time.time', ([], {}), '()\n', (1226, 1228), False, 'import time\n'), ((10350, 10369), 'sys.getsizeof', 'sys.getsizeof', (['data'], {}), '(data)\n', (10363, 10369), False, 'import sys\n'), ((16914, 16929), 'pickle.dumps', 'pickle.dumps', (['x'], {}), '(x)\n', (16926, 16929), False, 'import pickle\n'), ((4413, 4424), 'time.time', 'time.time', ([], {}), '()\n', (4422, 4424), False, 'import time\n'), ((5068, 5079), 'time.time', 'time.time', ([], {}), '()\n', (5077, 5079), False, 'import time\n'), ((11356, 11369), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (11366, 11369), False, 'import time\n')] |
import numpy as np
#from cvxopt import matrix
import pickle
from l1ls import l1ls
import copy
import pdb
import os
feats_file = 'gt_feats.pkl'
mode = 'gt'
with open(feats_file, 'rb') as f:
feats = pickle.load(f)
num_classes = len(feats)
dicts_list = []
dicts_num = 192
max_iters = 25
min_tol = 1e-2
lamda = 1e-3
learn_dicts_list = []
learn_alpha_list = []
error_pre = 0
error_now = 0
error_list = []
for i in range(num_classes):
error = []
feat = feats[i]
init_dict = np.random.randn(feat.shape[0], dicts_num)
learn_dict = None
norm = np.linalg.norm(init_dict, axis=0, keepdims=True)
init_dict = init_dict / norm
print('Begin learn class {} \n'.format(i))
num_sample = feat.shape[1]
for k in range(max_iters):
alpha = []
if k == 0:
dict = init_dict
else:
dict = learn_dict
for j in range(feat.shape[1]):
[x, status, hist] = l1ls(dict, feat[:,j], lamda, quiet=True)
if 'Failed' in status:
print('L1 normalization not solved!')
alpha.append(x.reshape(-1,1))
alpha = np.concatenate(alpha, axis=1)
recon_feat = np.matmul(dict, alpha)
learn_dict = []
for j in range(dict.shape[1]):
y = feat - (recon_feat - dict[:,[j]].reshape(-1,1) * alpha[[j],:].reshape(1,-1))
d_j = np.matmul(y, alpha[j, :].reshape(-1, 1))
norm_d_j = d_j / np.linalg.norm(d_j)
learn_dict.append(norm_d_j.reshape(-1, 1))
learn_dict = np.concatenate(learn_dict, axis=1)
recon_error = ((feat - np.matmul(learn_dict, alpha))**2).sum() / num_sample
co_error = np.abs(alpha).sum() * lamda / num_sample
error.append([recon_error, co_error])
error_pre = error_now
error_now = recon_error + co_error
print('iter: {} error: {} {} \n'.format(k, recon_error, co_error))
if abs(error_now - error_pre) < min_tol:
break
learn_dicts_list.append(learn_dict)
learn_alpha_list.append(alpha)
error_list.append(error)
dict_file = os.path.join(os.path.dirname(feats_file), mode + '_learn_dicts_'+ str(lamda) +'.pkl')
alpha_file = os.path.join(os.path.dirname(feats_file), mode +'_alpha_' + str(lamda) +'.pkl')
error_file = os.path.join(os.path.dirname(feats_file), mode +'_error_' + str(lamda) +'.pkl')
with open(dict_file, 'wb') as f:
pickle.dump(learn_dicts_list, f)
with open(alpha_file, 'wb') as f:
pickle.dump(learn_alpha_list, f)
with open(error_file, 'wb') as f:
pickle.dump(error_list, f) | [
"numpy.abs",
"pickle.dump",
"pickle.load",
"l1ls.l1ls",
"os.path.dirname",
"numpy.matmul",
"numpy.concatenate",
"numpy.linalg.norm",
"numpy.random.randn"
] | [((203, 217), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (214, 217), False, 'import pickle\n'), ((489, 530), 'numpy.random.randn', 'np.random.randn', (['feat.shape[0]', 'dicts_num'], {}), '(feat.shape[0], dicts_num)\n', (504, 530), True, 'import numpy as np\n'), ((564, 612), 'numpy.linalg.norm', 'np.linalg.norm', (['init_dict'], {'axis': '(0)', 'keepdims': '(True)'}), '(init_dict, axis=0, keepdims=True)\n', (578, 612), True, 'import numpy as np\n'), ((2110, 2137), 'os.path.dirname', 'os.path.dirname', (['feats_file'], {}), '(feats_file)\n', (2125, 2137), False, 'import os\n'), ((2209, 2236), 'os.path.dirname', 'os.path.dirname', (['feats_file'], {}), '(feats_file)\n', (2224, 2236), False, 'import os\n'), ((2303, 2330), 'os.path.dirname', 'os.path.dirname', (['feats_file'], {}), '(feats_file)\n', (2318, 2330), False, 'import os\n'), ((2407, 2439), 'pickle.dump', 'pickle.dump', (['learn_dicts_list', 'f'], {}), '(learn_dicts_list, f)\n', (2418, 2439), False, 'import pickle\n'), ((2478, 2510), 'pickle.dump', 'pickle.dump', (['learn_alpha_list', 'f'], {}), '(learn_alpha_list, f)\n', (2489, 2510), False, 'import pickle\n'), ((2549, 2575), 'pickle.dump', 'pickle.dump', (['error_list', 'f'], {}), '(error_list, f)\n', (2560, 2575), False, 'import pickle\n'), ((1125, 1154), 'numpy.concatenate', 'np.concatenate', (['alpha'], {'axis': '(1)'}), '(alpha, axis=1)\n', (1139, 1154), True, 'import numpy as np\n'), ((1176, 1198), 'numpy.matmul', 'np.matmul', (['dict', 'alpha'], {}), '(dict, alpha)\n', (1185, 1198), True, 'import numpy as np\n'), ((1539, 1573), 'numpy.concatenate', 'np.concatenate', (['learn_dict'], {'axis': '(1)'}), '(learn_dict, axis=1)\n', (1553, 1573), True, 'import numpy as np\n'), ((937, 978), 'l1ls.l1ls', 'l1ls', (['dict', 'feat[:, j]', 'lamda'], {'quiet': '(True)'}), '(dict, feat[:, j], lamda, quiet=True)\n', (941, 978), False, 'from l1ls import l1ls\n'), ((1443, 1462), 'numpy.linalg.norm', 'np.linalg.norm', (['d_j'], {}), '(d_j)\n', (1457, 1462), True, 'import numpy as np\n'), ((1677, 1690), 'numpy.abs', 'np.abs', (['alpha'], {}), '(alpha)\n', (1683, 1690), True, 'import numpy as np\n'), ((1605, 1633), 'numpy.matmul', 'np.matmul', (['learn_dict', 'alpha'], {}), '(learn_dict, alpha)\n', (1614, 1633), True, 'import numpy as np\n')] |
import cv2 as cv
import numpy as np
resizeValue = 1
x,y = np.meshgrid(range(7),range(6))
worldPoints = np.hstack((x.reshape(42,1),y.reshape(42,1),np.zeros((42,1)))).astype(np.float32)
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objPoints = []
imgPoints =[]
numberOfFramesUsed = 0
cap = cv.VideoCapture("http://192.168.0.102:8000/stream.mjpg") #or: highres.mjpg
#cap = cv.VideoCapture(0)
imgWidth = 0
imgHeight = 0
key = -1
photosTaken = 0
while(True):
ret, frame = cap.read()
imgHeight, imgWidth, layers = frame.shape
resizeIMG = cv.resize(frame,(int(imgWidth*resizeValue),int(imgHeight*resizeValue)))
gray = cv.cvtColor(resizeIMG,cv.COLOR_BGR2GRAY)
blurGaussian = cv.GaussianBlur(gray, (5,5),0)
try:
imgPoints.append(corners2)
objPoints.append(worldPoints)
except:
print("no corners")
retCorner, corners = cv.findCirclesGrid(blurGaussian, (4,11), flags=cv.CALIB_CB_ASYMMETRIC_GRID)
if key & 0xFF == ord('q'):
print('quit loop')
break
elif key & 0xFF == ord(' '):
print('will take photo')
photosTaken += 1
if retCorner:
numberOfFramesUsed += 1
corners = corners.reshape(-1,2)
corners2 = cv.cornerSubPix(blurGaussian,corners,(11,11),(-1,-1),criteria)
frame_vis = blurGaussian.copy()
cv.drawChessboardCorners(frame_vis, (7,6), corners2, ret)
cv.imshow('Recognised: ' + str(numberOfFramesUsed), frame_vis)
#imgPoints.append(corners)
#objPoints.append(worldPoints)
else:
'''
h = 100
w = 100
cropIMG = frame[y:y+h, x:x+w].copy()
cv.imshow(str(photosTaken), cropIMG)
'''
cv.imshow(str(photosTaken), blurGaussian)
print("photosTaken: ", photosTaken, "Recognised: ", numberOfFramesUsed)
else:
cv.imshow('preview', frame)
key = cv.waitKey(1)
if objPoints:
print('calibrating...')
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objPoints,imgPoints,blurGaussian.shape[::-1],None,None)
print("calibrateCamera done, getting optimalNewCameraMatrix")
#h,w = img.shape[:2]
newCameraMTX, roi = cv.getOptimalNewCameraMatrix(mtx,dist,(imgWidth,imgHeight),0,(imgWidth,imgHeight))
calibrationfile = cv.FileStorage("calibrationValuesVideo.xml", cv.FILE_STORAGE_WRITE)
calibrationfile.write("mtx", mtx)
calibrationfile.write("dist", dist)
calibrationfile.write("newCameraMTX",newCameraMTX)
calibrationfile.release()
print("Camera matrix xml file released")
else:
print('no corners found (yet)')
cap.release()
cv.destroyAllWindows()
| [
"cv2.findCirclesGrid",
"cv2.drawChessboardCorners",
"cv2.FileStorage",
"cv2.imshow",
"cv2.cornerSubPix",
"cv2.getOptimalNewCameraMatrix",
"numpy.zeros",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.calibrateCamera",
"cv2.GaussianBlur",
"cv2.waitKey"
] | [((322, 378), 'cv2.VideoCapture', 'cv.VideoCapture', (['"""http://192.168.0.102:8000/stream.mjpg"""'], {}), "('http://192.168.0.102:8000/stream.mjpg')\n", (337, 378), True, 'import cv2 as cv\n'), ((2714, 2736), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (2734, 2736), True, 'import cv2 as cv\n'), ((666, 707), 'cv2.cvtColor', 'cv.cvtColor', (['resizeIMG', 'cv.COLOR_BGR2GRAY'], {}), '(resizeIMG, cv.COLOR_BGR2GRAY)\n', (677, 707), True, 'import cv2 as cv\n'), ((727, 759), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['gray', '(5, 5)', '(0)'], {}), '(gray, (5, 5), 0)\n', (742, 759), True, 'import cv2 as cv\n'), ((908, 984), 'cv2.findCirclesGrid', 'cv.findCirclesGrid', (['blurGaussian', '(4, 11)'], {'flags': 'cv.CALIB_CB_ASYMMETRIC_GRID'}), '(blurGaussian, (4, 11), flags=cv.CALIB_CB_ASYMMETRIC_GRID)\n', (926, 984), True, 'import cv2 as cv\n'), ((1989, 2002), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (1999, 2002), True, 'import cv2 as cv\n'), ((2083, 2161), 'cv2.calibrateCamera', 'cv.calibrateCamera', (['objPoints', 'imgPoints', 'blurGaussian.shape[::-1]', 'None', 'None'], {}), '(objPoints, imgPoints, blurGaussian.shape[::-1], None, None)\n', (2101, 2161), True, 'import cv2 as cv\n'), ((2274, 2366), 'cv2.getOptimalNewCameraMatrix', 'cv.getOptimalNewCameraMatrix', (['mtx', 'dist', '(imgWidth, imgHeight)', '(0)', '(imgWidth, imgHeight)'], {}), '(mtx, dist, (imgWidth, imgHeight), 0, (imgWidth,\n imgHeight))\n', (2302, 2366), True, 'import cv2 as cv\n'), ((2380, 2447), 'cv2.FileStorage', 'cv.FileStorage', (['"""calibrationValuesVideo.xml"""', 'cv.FILE_STORAGE_WRITE'], {}), "('calibrationValuesVideo.xml', cv.FILE_STORAGE_WRITE)\n", (2394, 2447), True, 'import cv2 as cv\n'), ((1950, 1977), 'cv2.imshow', 'cv.imshow', (['"""preview"""', 'frame'], {}), "('preview', frame)\n", (1959, 1977), True, 'import cv2 as cv\n'), ((149, 166), 'numpy.zeros', 'np.zeros', (['(42, 1)'], {}), '((42, 1))\n', (157, 166), True, 'import numpy as np\n'), ((1275, 1343), 'cv2.cornerSubPix', 'cv.cornerSubPix', (['blurGaussian', 'corners', '(11, 11)', '(-1, -1)', 'criteria'], {}), '(blurGaussian, corners, (11, 11), (-1, -1), criteria)\n', (1290, 1343), True, 'import cv2 as cv\n'), ((1395, 1453), 'cv2.drawChessboardCorners', 'cv.drawChessboardCorners', (['frame_vis', '(7, 6)', 'corners2', 'ret'], {}), '(frame_vis, (7, 6), corners2, ret)\n', (1419, 1453), True, 'import cv2 as cv\n')] |
'''
Utility functions to analyze particle data.
@author: <NAME> <<EMAIL>>
Units: unless otherwise noted, all quantities are in (combinations of):
mass [M_sun]
position [kpc comoving]
distance, radius [kpc physical]
velocity [km / s]
time [Gyr]
'''
# system ----
from __future__ import absolute_import, division, print_function # python 2 compatability
import numpy as np
from numpy import Inf
# local ----
from . import basic as ut
from . import halo_property
from . import orbit
from . import catalog
#===================================================================================================
# utilities - parsing input arguments
#===================================================================================================
def parse_species(part, species):
'''
Parse input list of species to ensure all are in catalog.
Parameters
----------
part : dict : catalog of particles
species : str or list : name[s] of particle species to analyze
Returns
-------
species : list : name[s] of particle species
'''
Say = ut.io.SayClass(parse_species)
if np.isscalar(species):
species = [species]
if species == ['all'] or species == ['total']:
species = list(part.keys())
elif species == ['baryon']:
species = ['gas', 'star']
for spec in list(species):
if spec not in part:
species.remove(spec)
Say.say('! {} not in particle catalog'.format(spec))
return species
def parse_indices(part_spec, part_indices):
'''
Parse input list of particle indices.
If none, generate via arange.
Parameters
----------
part_spec : dict : catalog of particles of given species
part_indices : array-like : indices of particles
Returns
-------
part_indices : array : indices of particles
'''
if part_indices is None or not len(part_indices):
if 'position' in part_spec:
part_indices = ut.array.get_arange(part_spec['position'].shape[0])
elif 'id' in part_spec:
part_indices = ut.array.get_arange(part_spec['id'].size)
elif 'mass' in part_spec:
part_indices = ut.array.get_arange(part_spec['mass'].size)
return part_indices
def parse_property(parts_or_species, property_name, property_values=None, single_host=True):
'''
Get property values, either input or stored in particle catalog.
List-ify as necessary to match input particle catalog.
Parameters
----------
parts_or_species : dict or string or list thereof :
catalog[s] of particles or string[s] of species
property_name : str : options: 'center_position', 'center_velocity', 'indices'
property_values : float/array or list thereof : property values to assign
single_host : bool : use only the primary host (if not input any property_values)
Returns
-------
property_values : float or list
'''
def parse_property_single(part_or_spec, property_name, property_values, single_host):
if property_name in ['center_position', 'center_velocity']:
if property_values is None or not len(property_values):
if property_name == 'center_position':
property_values = part_or_spec.host_positions
elif property_name == 'center_velocity':
# default to the primary host
property_values = part_or_spec.host_velocities
if property_values is None or not len(property_values):
raise ValueError('no input {} and no {} in input catalog'.format(
property_name, property_name))
if single_host:
property_values = property_values[0] # use omly the primary host
if isinstance(property_values, list):
raise ValueError('input list of {}s but input single catalog'.format(property_name))
return property_values
assert property_name in ['center_position', 'center_velocity', 'indices']
if isinstance(parts_or_species, list):
# input list of particle catalogs
if (property_values is None or not len(property_values) or
not isinstance(property_values, list)):
property_values = [property_values for _ in parts_or_species]
if len(property_values) != len(parts_or_species):
raise ValueError('number of input {}s not match number of input catalogs'.format(
property_name))
for i, part_or_spec in enumerate(parts_or_species):
property_values[i] = parse_property_single(
part_or_spec, property_name, property_values[i], single_host)
else:
# input single particle catalog
property_values = parse_property_single(
parts_or_species, property_name, property_values, single_host)
return property_values
#===================================================================================================
# id <-> index conversion
#===================================================================================================
def assign_id_to_index(
part, species=['all'], id_name='id', id_min=0, store_as_dict=False, print_diagnostic=True):
'''
Assign, to particle dictionary, arrays that points from object id to species kind and index in
species array.
This is useful for analyses multi-species catalogs with intermixed ids.
Do not assign pointers for ids below id_min.
Parameters
----------
part : dict : catalog of particles of various species
species : str or list : name[s] of species to use: 'all' = use all in particle dictionary
id_name : str : key name for particle id
id_min : int : minimum id in catalog
store_as_dict : bool : whether to store id-to-index pointer as dict instead of array
print_diagnostic : bool : whether to print diagnostic information
'''
Say = ut.io.SayClass(assign_id_to_index)
# get list of species that have valid id key
species = parse_species(part, species)
for spec in species:
assert id_name in part[spec]
# get list of all ids
ids_all = []
for spec in species:
ids_all.extend(part[spec][id_name])
ids_all = np.array(ids_all, dtype=part[spec][id_name].dtype)
if print_diagnostic:
# check if duplicate ids within species
for spec in species:
masks = (part[spec][id_name] >= id_min)
total_number = np.sum(masks)
unique_number = np.unique(part[spec][id_name][masks]).size
if total_number != unique_number:
Say.say('species {} has {} ids that are repeated'.format(
spec, total_number - unique_number))
# check if duplicate ids across species
if len(species) > 1:
masks = (ids_all >= id_min)
total_number = np.sum(masks)
unique_number = np.unique(ids_all[masks]).size
if total_number != unique_number:
Say.say('across all species, {} ids are repeated'.format(
total_number - unique_number))
Say.say('maximum id = {}'.format(ids_all.max()))
part.id_to_index = {}
if store_as_dict:
# store pointers as a dictionary
# store overall dictionary (across all species) and dictionary within each species
for spec in species:
part[spec].id_to_index = {}
for part_i, part_id in enumerate(part[spec][id_name]):
if part_id in part.id_to_index:
# redundant ids - add to existing entry as list
if isinstance(part.id_to_index[part_id], tuple):
part.id_to_index[part_id] = [part.id_to_index[part_id]]
part.id_to_index[part_id].append((spec, part_i))
if part_id in part[spec].id_to_index:
if np.isscalar(part[spec].id_to_index[part_id]):
part[spec].id_to_index[part_id] = [part[spec].id_to_index[part_id]]
part[spec].id_to_index[part_id].append(part_i)
else:
# new id - add as new entry
part.id_to_index[part_id] = (spec, part_i)
part[spec].id_to_index[part_id] = part_i
# convert lists to arrays
dtype = part[spec][id_name].dtype
for part_id in part[spec].id_to_index:
if isinstance(part[spec].id_to_index[part_id], list):
part[spec].id_to_index[part_id] = np.array(
part[spec].id_to_index[part_id], dtype=dtype)
else:
# store pointers as arrays
part.id_to_index['species'] = np.zeros(ids_all.max() + 1, dtype='|S6')
dtype = ut.array.parse_data_type(ids_all.max() + 1)
part.id_to_index['index'] = ut.array.get_array_null(ids_all.max() + 1, dtype=dtype)
for spec in species:
masks = (part[spec][id_name] >= id_min)
part.id_to_index['species'][part[spec][id_name][masks]] = spec
part.id_to_index['index'][part[spec][id_name][masks]] = ut.array.get_arange(
part[spec][id_name], dtype=dtype)[masks]
#===================================================================================================
# position, velocity
#===================================================================================================
def get_center_positions(
part, species=['star', 'dark', 'gas'], part_indicess=None, method='center-of-mass',
center_number=1, exclusion_distance=200, center_positions=None, distance_max=Inf,
compare_centers=False, return_array=True):
'''
Get position[s] of center of mass [kpc comoving] using iterative zoom-in on input species.
Parameters
----------
part : dict : dictionary of particles
species : str or list : name[s] of species to use: 'all' = use all in particle dictionary
part_indicess : array or list of arrays : indices of particle to use to define center
use this to include only particles that you know are relevant
method : str : method of centering: 'center-of-mass', 'potential'
center_number : int : number of centers to compute
exclusion_distance : float :
radius around previous center to cut before finding next center [kpc comoving]
center_position : array-like : initial center position[s] to use
distance_max : float : maximum radius to consider initially
compare_centers : bool : whether to run sanity check to compare centers via zoom v potential
return_array : bool :
whether to return single array instead of array of arrays, if center_number = 1
Returns
-------
center_positions : array or array of arrays: position[s] of center[s] [kpc comoving]
'''
Say = ut.io.SayClass(get_center_positions)
assert method in ['center-of-mass', 'potential']
species = parse_species(part, species)
part_indicess = parse_property(species, 'indices', part_indicess)
if center_positions is None or np.ndim(center_positions) == 1:
# list-ify center_positions
center_positions = [center_positions for _ in range(center_number)]
if np.shape(center_positions)[0] != center_number:
raise ValueError('! input center_positions = {} but also input center_number = {}'.format(
center_positions, center_number))
if method == 'potential':
if len(species) > 1:
Say.say('! using only first species = {} for centering via potential'.format(
species[0]))
if 'potential' not in part[species[0]]:
Say.say('! {} does not have potential, using center-of-mass zoom instead'.format(
species[0]))
method = 'center-of-mass'
if method == 'potential':
# use single (first) species
spec_i = 0
spec_name = species[spec_i]
part_indices = parse_indices(spec_name, part_indicess[spec_i])
for center_i, center_position in enumerate(center_positions):
if center_i > 0:
# cull out particles near previous center
distances = get_distances_wrt_center(
part, spec_name, part_indices, center_positions[center_i - 1],
total_distance=True, return_array=True)
# exclusion distance in [kpc comoving]
part_indices = part_indices[
distances > (exclusion_distance * part.info['scalefactor'])]
if center_position is not None and distance_max > 0 and distance_max < Inf:
# impose distance cut around input center
part_indices = get_indices_within_coordinates(
part, spec_name, [0, distance_max], center_position, part_indicess=part_indices,
return_array=True)
part_index = np.nanargmin(part[spec_name]['potential'][part_indices])
center_positions[center_i] = part[spec_name]['position'][part_index]
else:
for spec_i, spec_name in enumerate(species):
part_indices = parse_indices(part[spec_name], part_indicess[spec_i])
if spec_i == 0:
positions = part[spec_name]['position'][part_indices]
masses = part[spec_name]['mass'][part_indices]
else:
positions = np.concatenate(
[positions, part[spec_name]['position'][part_indices]])
masses = np.concatenate([masses, part[spec_name]['mass'][part_indices]])
for center_i, center_position in enumerate(center_positions):
if center_i > 0:
# remove particles near previous center
distances = ut.coordinate.get_distances(
positions, center_positions[center_i - 1], part.info['box.length'],
part.snapshot['scalefactor'], total_distance=True) # [kpc physical]
masks = (distances > (exclusion_distance * part.info['scalefactor']))
positions = positions[masks]
masses = masses[masks]
center_positions[center_i] = ut.coordinate.get_center_position_zoom(
positions, masses, part.info['box.length'], center_position=center_position,
distance_max=distance_max)
center_positions = np.array(center_positions)
if compare_centers:
position_dif_max = 1 # [kpc comoving]
if 'potential' not in part[species[0]]:
Say.say('! {} not have potential, cannot compare against zoom center-of-mass'.format(
species[0]))
return center_positions
if method == 'potential':
method_other = 'center-of-mass'
else:
method_other = 'potential'
center_positions_other = get_center_positions(
part, species, part_indicess, method_other, center_number, exclusion_distance,
center_positions, distance_max, compare_centers=False, return_array=False)
position_difs = np.abs(center_positions - center_positions_other)
for pi, position_dif in enumerate(position_difs):
if np.max(position_dif) > position_dif_max:
Say.say('! offset center positions')
Say.say('center position via {}: '.format(method), end='')
ut.io.print_array(center_positions[pi], '{:.3f}')
Say.say('center position via {}: '.format(method_other), end='')
ut.io.print_array(center_positions_other[pi], '{:.3f}')
Say.say('position difference: ', end='')
ut.io.print_array(position_dif, '{:.3f}')
if return_array and center_number == 1:
center_positions = center_positions[0]
return center_positions
def get_center_velocities(
part, species_name='star', part_indices=None, distance_max=15, center_positions=None,
return_array=True):
'''
Get velocity[s] [km / s] of center of mass of input species.
Parameters
----------
part : dict : dictionary of particles
species_name : str : name of particle species to use
part_indices : array : indices of particle to use to define center
use this to exclude particles that you know are not relevant
distance_max : float : maximum radius to consider [kpc physical]
center_positions : array or list of arrays: center position[s] [kpc comoving]
if None, will use default center position[s] in catalog
return_array : bool :
whether to return single array instead of array of arrays, if input single center position
Returns
-------
center_velocities : array or array of arrays : velocity[s] of center of mass [km / s]
'''
center_positions = parse_property(part, 'center_position', center_positions, single_host=False)
part_indices = parse_indices(part[species_name], part_indices)
distance_max /= part.snapshot['scalefactor'] # convert to [kpc comoving] to match positions
center_velocities = np.zeros(center_positions.shape, part[species_name]['velocity'].dtype)
for center_i, center_position in enumerate(center_positions):
center_velocities[center_i] = ut.coordinate.get_center_velocity(
part[species_name]['velocity'][part_indices],
part[species_name]['mass'][part_indices],
part[species_name]['position'][part_indices],
center_position, distance_max, part.info['box.length'])
if return_array and len(center_velocities) == 1:
center_velocities = center_velocities[0]
return center_velocities
def get_distances_wrt_center(
part, species=['star'], part_indicess=None, center_position=None, rotation=None,
coordinate_system='cartesian', total_distance=False, return_array=True):
'''
Get distances (scalar or vector) between input particles and center_position (input or stored
in particle catalog).
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species to compute
part_indicess : array or list : indices[s] of particles to compute, one array per input species
center_position : array : position of center [kpc comoving]
if None, will use default center position in particle catalog
rotation : bool or array : whether to rotate particles
two options:
(a) if input array of eigen-vectors, will define rotation axes for all species
(b) if True, will rotate to align with principal axes defined by input species
coordinate_system : str : which coordinates to get distances in:
'cartesian' (default), 'cylindrical', 'spherical'
total_distance : bool : whether to compute total/scalar distance
return_array : bool : whether to return single array instead of dict if input single species
Returns
-------
dist : array (object number x dimension number) or dict thereof : [kpc physical]
3-D distance vectors aligned with default x,y,z axes OR
3-D distance vectors aligned with major, medium, minor axis OR
2-D distance vectors along major axes and along minor axis OR
1-D scalar distances
OR
dictionary of above for each species
'''
assert coordinate_system in ('cartesian', 'cylindrical', 'spherical')
species = parse_species(part, species)
center_position = parse_property(part, 'center_position', center_position)
part_indicess = parse_property(species, 'indices', part_indicess)
dist = {}
for spec_i, spec in enumerate(species):
part_indices = parse_indices(part[spec], part_indicess[spec_i])
dist[spec] = ut.coordinate.get_distances(
part[spec]['position'][part_indices], center_position, part.info['box.length'],
part.snapshot['scalefactor'], total_distance) # [kpc physical]
if not total_distance:
if rotation is not None:
if rotation is True:
# get principal axes stored in particle dictionary
if (len(part[spec].host_rotation_tensors) and
len(part[spec].host_rotation_tensors[0])):
rotation_tensor = part[spec].host_rotation_tensors[0]
else:
raise ValueError('! cannot find principal_axes_tensor in species dict')
elif len(rotation):
# use input rotation vectors
rotation_tensor = rotation
dist[spec] = ut.coordinate.get_coordinates_rotated(dist[spec], rotation_tensor)
if coordinate_system in ['cylindrical', 'spherical']:
dist[spec] = ut.coordinate.get_positions_in_coordinate_system(
dist[spec], 'cartesian', coordinate_system)
if return_array and len(species) == 1:
dist = dist[species[0]]
return dist
def get_velocities_wrt_center(
part, species=['star'], part_indicess=None, center_velocity=None, center_position=None,
rotation=False, coordinate_system='cartesian', total_velocity=False, return_array=True):
'''
Get velocities (either scalar or vector) between input particles and center_velocity
(input or stored in particle catalog).
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species to get
part_indicess : array or list : indices[s] of particles to select, one array per input species
center_velocity : array : center velocity [km / s]
if None, will use default center velocity in catalog
center_position : array : center position [kpc comoving], to use in computing Hubble flow
if None, will use default center position in catalog
rotation : bool or array : whether to rotate particles
two options:
(a) if input array of eigen-vectors, will define rotation axes for all species
(b) if True, will rotate to align with principal axes defined by input species
coordinate_system : str : which coordinates to get positions in:
'cartesian' (default), 'cylindrical', 'spherical'
total_velocity : bool : whether to compute total/scalar velocity
return_array : bool : whether to return array (instead of dict) if input single species
Returns
-------
vel : array or dict thereof :
velocities (object number x dimension number, or object number) [km / s]
'''
assert coordinate_system in ('cartesian', 'cylindrical', 'spherical')
species = parse_species(part, species)
center_velocity = parse_property(part, 'center_velocity', center_velocity)
center_position = parse_property(part, 'center_position', center_position)
part_indicess = parse_property(species, 'indices', part_indicess)
vel = {}
for spec_i, spec in enumerate(species):
part_indices = parse_indices(part[spec], part_indicess[spec_i])
vel[spec] = ut.coordinate.get_velocity_differences(
part[spec]['velocity'][part_indices], center_velocity,
part[spec]['position'][part_indices], center_position, part.info['box.length'],
part.snapshot['scalefactor'], part.snapshot['time.hubble'], total_velocity)
if not total_velocity:
if rotation is not None:
if rotation is True:
# get principal axes stored in particle dictionary
if (len(part[spec].host_rotation_tensors) and
len(part[spec].host_rotation_tensors[0])):
rotation_tensor = part[spec].host_rotation_tensors[0]
else:
raise ValueError('! cannot find principal_axes_tensor in species dict')
elif len(rotation):
# use input rotation vectors
rotation_tensor = rotation
vel[spec] = ut.coordinate.get_coordinates_rotated(vel[spec], rotation_tensor)
if coordinate_system in ('cylindrical', 'spherical'):
# need to compute distance vectors
distances = ut.coordinate.get_distances(
part[spec]['position'][part_indices], center_position,
part.info['box.length'], part.snapshot['scalefactor']) # [kpc physical]
if rotation is not None:
# need to rotate distances too
distances = ut.coordinate.get_coordinates_rotated(distances, rotation_tensor)
vel[spec] = ut.coordinate.get_velocities_in_coordinate_system(
vel[spec], distances, 'cartesian', coordinate_system)
if return_array and len(species) == 1:
vel = vel[species[0]]
return vel
def get_orbit_dictionary(
part, species=['star'], part_indicess=None, center_position=None, center_velocity=None,
return_single=True):
'''
Get dictionary of orbital parameters.
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species to compute
part_indicess : array or list : indices[s] of particles to select, one array per input species
center_position : array : center (reference) position
center_position : array : center (reference) velociy
return_single : bool :
whether to return single dict instead of dict of dicts, if single species
Returns
-------
orb : dict : dictionary of orbital properties, one for each species (unless scalarize is True)
'''
species = parse_species(part, species)
center_position = parse_property(part, 'center_position', center_position)
center_velocity = parse_property(part, 'center_velocity', center_velocity)
part_indicess = parse_property(species, 'indices', part_indicess)
orb = {}
for spec_i, spec in enumerate(species):
part_indices = parse_indices(part[spec], part_indicess[spec_i])
distance_vectors = ut.coordinate.get_distances(
part[spec]['position'][part_indices], center_position, part.info['box.length'],
part.snapshot['scalefactor'])
velocity_vectors = ut.coordinate.get_velocity_differences(
part[spec]['velocity'][part_indices], center_velocity,
part[spec]['position'][part_indices], center_position,
part.info['box.length'], part.snapshot['scalefactor'], part.snapshot['time.hubble'])
orb[spec] = orbit.get_orbit_dictionary(distance_vectors, velocity_vectors)
if return_single and len(species) == 1:
orb = orb[species[0]]
return orb
#===================================================================================================
# subsample
#===================================================================================================
def get_indices_within_coordinates(
part, species=['star'],
distance_limitss=[], center_position=None,
velocity_limitss=[], center_velocity=None,
rotation=None, coordinate_system='cartesian',
part_indicess=None, return_array=True):
'''
Get indices of particles that are within distance and/or velocity coordinate limits from center
(either input or stored in particle catalog).
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species to use
distance_limitss : list or list of lists:
min and max distance[s], relative to center, to get particles [kpc physical]
default is 1-D list, but can be 2-D or 3-D list to select separately along dimensions
if 2-D or 3-D, need to input *signed* limits
center_position : array : center position [kpc comoving]
if None, will use default center position in particle catalog
velocity_limitss : list or list of lists:
min and max velocities, relative to center, to get particles [km / s]
default is 1-D list, but can be 2-D or 3-D list to select separately along dimensions
if 2-D or 3-D, need to input *signed* limits
center_velocity : array : center velocity [km / s]
if None, will use default center velocity in particle catalog
rotation : bool or array : whether to rotate particle coordinates
two options:
(a) if input array of eigen-vectors, will use to define rotation axes for all species
(b) if True, will rotate to align with principal axes defined by each input species
coordinate_system : str : which coordinates to get positions in:
'cartesian' (default), 'cylindrical', 'spherical'
part_indicess : array : prior indices[s] of particles to select, one array per input species
return_array : bool : whether to return single array instead of dict, if input single species
Returns
-------
part_index : dict or array : array or dict of arrays of indices of particles in region
'''
assert coordinate_system in ['cartesian', 'cylindrical', 'spherical']
species = parse_species(part, species)
center_position = parse_property(part, 'center_position', center_position)
if velocity_limitss is not None and len(velocity_limitss):
center_velocity = parse_property(part, 'center_velocity', center_velocity)
part_indicess = parse_property(species, 'indices', part_indicess)
part_index = {}
for spec_i, spec in enumerate(species):
part_indices = parse_indices(part[spec], part_indicess[spec_i])
if len(part_indices) and distance_limitss is not None and len(distance_limitss):
distance_limits_dimen = np.ndim(distance_limitss)
if distance_limits_dimen == 1:
total_distance = True
elif distance_limits_dimen == 2:
total_distance = False
assert len(distance_limitss) in [2, 3]
else:
raise ValueError('! cannot parse distance_limitss = {}'.format(distance_limitss))
if (distance_limits_dimen == 1 and distance_limitss[0] <= 0 and
distance_limitss[1] >= Inf):
pass # null case, no actual limits imposed, so skip rest
else:
"""
# an attempt to be clever, but gains seem modest
distances = np.abs(coordinate.get_position_difference(
part[spec]['position'] - center_position,
part.info['box.length'])) * part.snapshot['scalefactor'] # [kpc physical]
for dimension_i in range(part[spec]['position'].shape[1]):
masks *= ((distances[:, dimension_i] < np.max(distance_limits)) *
(distances[:, dimension_i] >= np.min(distance_limits)))
part_indices[spec] = part_indices[spec][masks]
distances = distances[masks]
distances = np.sum(distances ** 2, 1) # assume 3-d position
"""
distancess = get_distances_wrt_center(
part, spec, part_indices, center_position, rotation, coordinate_system,
total_distance)
if distance_limits_dimen == 1:
# distances are absolute
masks = (
(distancess >= np.min(distance_limitss)) *
(distancess < np.max(distance_limitss))
)
elif distance_limits_dimen == 2:
if len(distance_limitss) == 2:
# distances are signed
masks = (
(distancess[0] >= np.min(distance_limitss[0])) *
(distancess[0] < np.max(distance_limitss[0])) *
(distancess[1] >= np.min(distance_limitss[1])) *
(distancess[1] < np.max(distance_limitss[1]))
)
elif distance_limits_dimen == 3:
# distances are signed
masks = (
(distancess[0] >= np.min(distance_limitss[0])) *
(distancess[0] < np.max(distance_limitss[0])) *
(distancess[1] >= np.min(distance_limitss[1])) *
(distancess[1] < np.max(distance_limitss[1]))
(distancess[2] >= np.min(distance_limitss[2])) *
(distancess[2] < np.max(distance_limitss[2]))
)
part_indices = part_indices[masks]
if len(part_indices) and velocity_limitss is not None and len(velocity_limitss):
velocity_limits_dimen = np.ndim(velocity_limitss)
if velocity_limits_dimen == 1:
return_total_velocity = True
elif velocity_limits_dimen == 2:
return_total_velocity = False
assert len(velocity_limitss) in [2, 3]
else:
raise ValueError('! cannot parse velocity_limitss = {}'.format(velocity_limitss))
if (velocity_limits_dimen == 1 and velocity_limitss[0] <= 0 and
velocity_limitss[1] >= Inf):
pass # null case, no actual limits imposed, so skip rest
else:
velocitiess = get_velocities_wrt_center(
part, spec, part_indices, center_velocity, center_position, rotation,
coordinate_system, return_total_velocity)
if velocity_limits_dimen == 1:
# velocities are absolute
masks = (
(velocitiess >= np.min(velocity_limitss)) *
(velocitiess < np.max(velocity_limitss))
)
elif velocity_limits_dimen == 2:
if len(velocity_limitss) == 2:
# velocities are signed
masks = (
(velocitiess[0] >= np.min(velocity_limitss[0])) *
(velocitiess[0] < np.max(velocity_limitss[0])) *
(velocitiess[1] >= np.min(velocity_limitss[1])) *
(velocitiess[1] < np.max(velocity_limitss[1]))
)
elif len(velocity_limitss) == 3:
# velocities are signed
masks = (
(velocitiess[0] >= np.min(velocity_limitss[0])) *
(velocitiess[0] < np.max(velocity_limitss[0])) *
(velocitiess[1] >= np.min(velocity_limitss[1])) *
(velocitiess[1] < np.max(velocity_limitss[1]))
(velocitiess[2] >= np.min(velocity_limitss[2])) *
(velocitiess[2] < np.max(velocity_limitss[2]))
)
part_indices = part_indices[masks]
part_index[spec] = part_indices
if return_array and len(species) == 1:
part_index = part_index[species[0]]
return part_index
def get_indices_id_kind(
part, species=['star'], id_kind='unique', part_indicess=None, return_array=True):
'''
Get indices of particles that either are unique (no other particles of same species have
same id) or multiple (other particle of same species has same id).
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species
split_kind : str : id kind of particles to get: 'unique', 'multiple'
part_indicess : array : prior indices[s] of particles to select, one array per input species
return_array : bool : whether to return single array instead of dict, if input single species
Returns
-------
part_index : dict or array : array or dict of arrays of indices of particles of given split kind
'''
species = parse_species(part, species)
part_indicess = parse_property(species, 'indices', part_indicess)
assert id_kind in ['unique', 'multiple']
part_index = {}
for spec_i, spec in enumerate(species):
part_indices = parse_indices(part[spec], part_indicess[spec_i])
_pids, piis, counts = np.unique(
part[spec]['id'][part_indices], return_index=True, return_counts=True)
pis_unsplit = np.sort(part_indices[piis[counts == 1]])
if id_kind == 'unique':
part_index[spec] = pis_unsplit
elif id_kind == 'multiple':
part_index[spec] = np.setdiff1d(part_indices, pis_unsplit)
else:
raise ValueError('! not recognize id_kind = {}'.format(id_kind))
if return_array and len(species) == 1:
part_index = part_index[species[0]]
return part_index
#===================================================================================================
# halo/galaxy major/minor axes
#===================================================================================================
def get_principal_axes(
part, species_name='star', distance_max=Inf, mass_percent=None, age_percent=None, age_limits=[],
center_positions=None, center_velocities=None, part_indices=None, return_array=True,
print_results=True):
'''
Get reverse-sorted eigen-vectors, eigen-values, and axis ratios of principal axes of
each host galaxy/halo.
Ensure that principal axes are oriented so median v_phi > 0.
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species to use
distance_max : float : maximum distance to select particles [kpc physical]
mass_percent : float : keep particles within the distance that encloses mass percent [0, 100]
of all particles within distance_max
age_percent : float : use the youngest age_percent of particles within distance cut
age_limits : float : use only particles within age limits
center_positions : array or array of arrays : position[s] of center[s] [kpc comoving]
center_velocities : array or array of arrays : velocity[s] of center[s] [km / s]
part_indices : array : indices[s] of particles to select
return_array : bool :
whether to return single array for each property, instead of array of arrays, if single host
print_results : bool : whether to print axis ratios
Returns
-------
principal_axes = {
'rotation.tensor': array : rotation vectors that define max, med, min axes
'eigen.values': array : eigen-values of max, med, min axes
'axis.ratios': array : ratios of principal axes
}
'''
Say = ut.io.SayClass(get_principal_axes)
center_positions = parse_property(part, 'center_position', center_positions, single_host=False)
center_velocities = parse_property(
part, 'center_velocity', center_velocities, single_host=False)
part_indices = parse_indices(part[species_name], part_indices)
principal_axes = {
'rotation.tensor': [],
'eigen.values': [],
'axis.ratios': [],
}
for center_i, center_position in enumerate(center_positions):
distance_vectors = ut.coordinate.get_distances(
part[species_name]['position'][part_indices], center_position, part.info['box.length'],
part.snapshot['scalefactor']) # [kpc physical]
distances = np.sqrt(np.sum(distance_vectors ** 2, 1))
masks = (distances < distance_max)
if mass_percent:
distance_percent = ut.math.percentile_weighted(
distances[masks], mass_percent,
part[species_name].prop('mass', part_indices[masks]))
masks *= (distances < distance_percent)
if age_percent or (age_limits is not None and len(age_limits)):
if 'form.scalefactor' not in part[species_name]:
raise ValueError('! input age constraints but age not in {} catalog'.format(
species_name))
if age_percent and (age_limits is not None and len(age_limits)):
Say.say('input both age_percent and age_limits, using only age_percent')
if age_percent:
age_max = ut.math.percentile_weighted(
part[species_name].prop('age', part_indices[masks]), age_percent,
part[species_name].prop('mass', part_indices[masks]))
age_limits_use = [0, age_max]
else:
age_limits_use = age_limits
Say.say('using {} particles with age = {} Gyr'.format(
species_name, ut.array.get_limits_string(age_limits_use)))
masks *= ((part[species_name].prop('age', part_indices) >= min(age_limits_use)) *
(part[species_name].prop('age', part_indices) < max(age_limits_use)))
rotation_tensor, eigen_values, axis_ratios = ut.coordinate.get_principal_axes(
distance_vectors[masks], part[species_name].prop('mass', part_indices[masks]),
print_results)
# test if need to flip a principal axis to ensure that net v_phi > 0
velocity_vectors = ut.coordinate.get_velocity_differences(
part[species_name].prop('velocity', part_indices[masks]), center_velocities[center_i])
velocity_vectors_rot = ut.coordinate.get_coordinates_rotated(
velocity_vectors, rotation_tensor)
distance_vectors_rot = ut.coordinate.get_coordinates_rotated(
distance_vectors[masks], rotation_tensor)
velocity_vectors_cyl = ut.coordinate.get_velocities_in_coordinate_system(
velocity_vectors_rot, distance_vectors_rot, 'cartesian', 'cylindrical')
if np.median(velocity_vectors_cyl[:, 2]) < 0:
rotation_tensor[1] *= -1 # flip so net v_phi is positive
principal_axes['rotation.tensor'].append(rotation_tensor)
principal_axes['eigen.values'].append(eigen_values)
principal_axes['axis.ratios'].append(axis_ratios)
for k in principal_axes:
principal_axes[k] = np.array(principal_axes[k])
if return_array and np.shape(center_positions)[0] == 1:
for k in principal_axes:
principal_axes[k] = principal_axes[k][0]
return principal_axes
#===================================================================================================
# halo/galaxy radius
#===================================================================================================
def get_halo_properties(
part, species=['dark', 'star', 'gas'], virial_kind='200m',
distance_limits=[10, 600], distance_bin_width=0.02, distance_scaling='log',
center_position=None, return_array=True, print_results=True):
'''
Compute halo radius according to virial_kind.
Return this radius, the mass from each species within this radius, and particle indices within
this radius (if get_part_indices).
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species to use: 'all' = use all in dictionary
virial_kind : str : virial overdensity definition
'200m' -> average density is 200 x matter
'200c' -> average density is 200 x critical
'vir' -> average density is Bryan & Norman
'fof.100m' -> edge density is 100 x matter, for FoF(ll=0.168)
'fof.60m' -> edge density is 60 x matter, for FoF(ll=0.2)
distance_limits : list : min and max distance to consider [kpc physical]
distance_bin_width : float : width of distance bin
distance_scaling : str : scaling of distance: 'log', 'linear'
center_position : array : center position to use
if None, will use default center position in catalog
return_array : bool : whether to return array (instead of dict) if input single species
print_results : bool : whether to print radius and mass
Returns
-------
halo_prop : dict : dictionary of halo properties:
radius : float : halo radius [kpc physical]
mass : float : mass within radius [M_sun]
indices : array : indices of partices within radius (if get_part_indices)
'''
distance_limits = np.asarray(distance_limits)
Say = ut.io.SayClass(get_halo_properties)
species = parse_species(part, species)
center_position = parse_property(part, 'center_position', center_position)
HaloProperty = halo_property.HaloPropertyClass(part.Cosmology, part.snapshot['redshift'])
DistanceBin = ut.binning.DistanceBinClass(
distance_scaling, distance_limits, width=distance_bin_width, dimension_number=3)
overdensity, reference_density = HaloProperty.get_overdensity(virial_kind, units='kpc physical')
virial_density = overdensity * reference_density
mass_cum_in_bins = np.zeros(DistanceBin.number)
distancess = []
for spec_i, spec in enumerate(species):
distances = ut.coordinate.get_distances(
part[spec]['position'], center_position, part.info['box.length'],
part.snapshot['scalefactor'], total_distance=True) # [kpc physical]
distancess.append(distances)
mass_in_bins = DistanceBin.get_histogram(distancess[spec_i], False, part[spec]['mass'])
# get mass within distance minimum, for computing cumulative values
distance_indices = np.where(distancess[spec_i] < np.min(distance_limits))[0]
mass_cum_in_bins += (np.sum(part[spec]['mass'][distance_indices]) +
np.cumsum(mass_in_bins))
if part.info['baryonic'] and len(species) == 1 and species[0] == 'dark':
# correct for baryonic mass if analyzing only dark matter in baryonic simulation
Say.say('! using only dark particles, so correcting for baryonic mass')
mass_factor = 1 + part.Cosmology['omega_baryon'] / part.Cosmology['omega_matter']
mass_cum_in_bins *= mass_factor
# cumulative densities in bins
density_cum_in_bins = mass_cum_in_bins / DistanceBin.volumes_cum
# get smallest radius that satisfies virial density
for d_bin_i in range(DistanceBin.number - 1):
if (density_cum_in_bins[d_bin_i] >= virial_density and
density_cum_in_bins[d_bin_i + 1] < virial_density):
# interpolate in log space
log_halo_radius = np.interp(
np.log10(virial_density), np.log10(density_cum_in_bins[[d_bin_i + 1, d_bin_i]]),
DistanceBin.log_maxs[[d_bin_i + 1, d_bin_i]])
halo_radius = 10 ** log_halo_radius
break
else:
Say.say('! could not determine halo R_{}'.format(virial_kind))
if density_cum_in_bins[0] < virial_density:
Say.say('distance min = {:.1f} kpc already is below virial density = {}'.format(
distance_limits.min(), virial_density))
Say.say('decrease distance_limits')
elif density_cum_in_bins[-1] > virial_density:
Say.say('distance max = {:.1f} kpc still is above virial density = {}'.format(
distance_limits.max(), virial_density))
Say.say('increase distance_limits')
else:
Say.say('not sure why!')
return
# get maximum of V_circ = sqrt(G M(< r) / r)
vel_circ_in_bins = ut.constant.km_per_kpc * np.sqrt(
ut.constant.grav_kpc_msun_sec * mass_cum_in_bins / DistanceBin.maxs)
vel_circ_max = np.max(vel_circ_in_bins)
vel_circ_max_radius = DistanceBin.maxs[np.argmax(vel_circ_in_bins)]
halo_mass = 0
part_indices = {}
for spec_i, spec in enumerate(species):
masks = (distancess[spec_i] < halo_radius)
halo_mass += np.sum(part[spec]['mass'][masks])
part_indices[spec] = ut.array.get_arange(part[spec]['mass'])[masks]
if print_results:
Say.say(
'R_{} = {:.1f} kpc\n M_{} = {} M_sun, log = {}\n V_max = {:.1f} km/s'.format(
virial_kind, halo_radius, virial_kind,
ut.io.get_string_from_numbers(halo_mass, 2),
ut.io.get_string_from_numbers(np.log10(halo_mass), 2),
vel_circ_max)
)
halo_prop = {}
halo_prop['radius'] = halo_radius
halo_prop['mass'] = halo_mass
halo_prop['vel.circ.max'] = vel_circ_max
halo_prop['vel.circ.max.radius'] = vel_circ_max_radius
if return_array and len(species) == 1:
part_indices = part_indices[species[0]]
halo_prop['indices'] = part_indices
return halo_prop
def get_galaxy_properties(
part, species_name='star', edge_kind='mass.percent', edge_value=90,
distance_max=20, distance_bin_width=0.02, distance_scaling='log', center_position=None,
axis_kind='', rotation_tensor=None, rotation_distance_max=20,
other_axis_distance_limits=None, part_indices=None, print_results=True):
'''
Compute galaxy radius according to edge_kind.
Return this radius, the mass from species within this radius, particle indices within this
radius, and rotation vectors (if applicable).
Parameters
----------
part : dict : catalog of particles at snapshot
species_name : str : name of particle species to use
edge_kind : str : method to define galaxy radius
'mass.percent' = radius at which edge_value (percent) of stellar mass within distance_max
'density' = radius at which density is edge_value [log(M_sun / kpc^3)]
edge_value : float : value to use to define galaxy radius
mass_percent : float : percent of mass (out to distance_max) to define radius
distance_max : float : maximum distance to consider [kpc physical]
distance_bin_width : float : width of distance bin
distance_scaling : str : distance bin scaling: 'log', 'linear'
axis_kind : str : 'major', 'minor', 'both'
rotation_tensor : array : rotation vectors that define principal axes
rotation_distance_max : float :
maximum distance to use in defining rotation vectors of principal axes [kpc physical]
other_axis_distance_limits : float :
min and max distances along other axis[s] to keep particles [kpc physical]
center_position : array : center position [kpc comoving]
if None, will use default center position in catalog
part_indices : array : star particle indices (if already know which ones are close)
print_results : bool : whether to print radius and mass of galaxy
Returns
-------
gal_prop : dict : dictionary of galaxy properties:
radius or radius.major & radius.minor : float : galaxy radius[s] [kpc physical]
mass : float : mass within radius[s] [M_sun]
indices : array : indices of partices within radius[s] (if get_part_indices)
rotation.vectors : array : eigen-vectors that defined rotation
'''
def get_radius_mass_indices(
masses, distances, distance_scaling, distance_limits, distance_bin_width, dimension_number,
edge_kind, edge_value):
'''
Utility function.
'''
Say = ut.io.SayClass(get_radius_mass_indices)
DistanceBin = ut.binning.DistanceBinClass(
distance_scaling, distance_limits, width=distance_bin_width,
dimension_number=dimension_number)
# get masses in distance bins
mass_in_bins = DistanceBin.get_histogram(distances, False, masses)
if edge_kind == 'mass.percent':
# get mass within distance minimum, for computing cumulative values
d_indices = np.where(distances < np.min(distance_limits))[0]
log_masses_cum = ut.math.get_log(np.sum(masses[d_indices]) + np.cumsum(mass_in_bins))
log_mass = np.log10(edge_value / 100) + log_masses_cum.max()
try:
# interpolate in log space
log_radius = np.interp(log_mass, log_masses_cum, DistanceBin.log_maxs)
except ValueError:
Say.say('! could not find object radius - increase distance_max')
return
elif edge_kind == 'density':
log_density_in_bins = ut.math.get_log(mass_in_bins / DistanceBin.volumes)
# use only bins with defined density (has particles)
d_bin_indices = np.arange(DistanceBin.number)[np.isfinite(log_density_in_bins)]
# get smallest radius that satisfies density threshold
for d_bin_ii, d_bin_i in enumerate(d_bin_indices):
d_bin_i_plus_1 = d_bin_indices[d_bin_ii + 1]
if (log_density_in_bins[d_bin_i] >= edge_value and
log_density_in_bins[d_bin_i_plus_1] < edge_value):
# interpolate in log space
log_radius = np.interp(
edge_value, log_density_in_bins[[d_bin_i_plus_1, d_bin_i]],
DistanceBin.log_maxs[[d_bin_i_plus_1, d_bin_i]])
break
else:
Say.say('! could not find object radius - increase distance_max')
return
radius = 10 ** log_radius
masks = (distances < radius)
mass = np.sum(masses[masks])
indices = ut.array.get_arange(masses)[masks]
return radius, mass, indices
# start function
Say = ut.io.SayClass(get_galaxy_properties)
distance_min = 0.001 # [kpc physical]
distance_limits = [distance_min, distance_max]
if edge_kind == 'mass.percent':
# dealing with cumulative value - stable enough to decrease bin with
distance_bin_width *= 0.1
center_position = parse_property(part, 'center_position', center_position)
if part_indices is None or not len(part_indices):
part_indices = ut.array.get_arange(part[species_name]['position'].shape[0])
distance_vectors = ut.coordinate.get_distances(
part[species_name]['position'][part_indices], center_position,
part.info['box.length'], part.snapshot['scalefactor']) # [kpc physical]
distances = np.sqrt(np.sum(distance_vectors ** 2, 1)) # 3-D distance
masses = part[species_name].prop('mass', part_indices)
if axis_kind:
# radius along 2-D major axes (projected radius) or along 1-D minor axis (height)
assert axis_kind in ['major', 'minor', 'both']
if rotation_tensor is None or not len(rotation_tensor):
if (len(part[species_name].host_rotation_tensors) and
len(part[species_name].host_rotation_tensors[0])):
# use only the primary host
rotation_tensor = part[species_name].host_rotation_tensors[0]
else:
masks = (distances < rotation_distance_max)
rotation_tensor = ut.coordinate.get_principal_axes(
distance_vectors[masks], masses[masks])[0]
distance_vectors = ut.coordinate.get_coordinates_rotated(
distance_vectors, rotation_tensor=rotation_tensor)
distances_cyl = ut.coordinate.get_positions_in_coordinate_system(
distance_vectors, 'cartesian', 'cylindrical')
major_distances, minor_distances = distances_cyl[:, 0], distances_cyl[:, 1]
minor_distances = np.abs(minor_distances) # need only absolute distances
if axis_kind in ['major', 'minor']:
if axis_kind == 'minor':
dimension_number = 1
distances = minor_distances
other_distances = major_distances
elif axis_kind == 'major':
dimension_number = 2
distances = major_distances
other_distances = minor_distances
if (other_axis_distance_limits is not None and
(min(other_axis_distance_limits) > 0 or max(other_axis_distance_limits) < Inf)):
masks = ((other_distances >= min(other_axis_distance_limits)) *
(other_distances < max(other_axis_distance_limits)))
distances = distances[masks]
masses = masses[masks]
else:
# spherical average
dimension_number = 3
gal_prop = {}
if axis_kind == 'both':
# first get 3-D radius
galaxy_radius_3d, _galaxy_mass_3d, indices = get_radius_mass_indices(
masses, distances, distance_scaling, distance_limits, distance_bin_width, 3,
edge_kind, edge_value)
galaxy_radius_major = galaxy_radius_3d
axes_mass_dif = 1
# then iterate to get both major and minor axes
while axes_mass_dif > 0.005:
# get 1-D radius along minor axis
masks = (major_distances < galaxy_radius_major)
galaxy_radius_minor, galaxy_mass_minor, indices = get_radius_mass_indices(
masses[masks], minor_distances[masks], distance_scaling, distance_limits,
distance_bin_width, 1, edge_kind, edge_value)
# get 2-D radius along major axes
masks = (minor_distances < galaxy_radius_minor)
galaxy_radius_major, galaxy_mass_major, indices = get_radius_mass_indices(
masses[masks], major_distances[masks], distance_scaling, distance_limits,
distance_bin_width, 2, edge_kind, edge_value)
axes_mass_dif = (abs(galaxy_mass_major - galaxy_mass_minor) /
(0.5 * (galaxy_mass_major + galaxy_mass_minor)))
indices = (major_distances < galaxy_radius_major) * (minor_distances < galaxy_radius_minor)
gal_prop['radius.major'] = galaxy_radius_major
gal_prop['radius.minor'] = galaxy_radius_minor
gal_prop['mass'] = galaxy_mass_major
gal_prop['log mass'] = np.log10(galaxy_mass_major)
gal_prop['rotation.tensor'] = rotation_tensor
gal_prop['indices'] = part_indices[indices]
if print_results:
Say.say('R_{:.0f} along major, minor axes = {:.2f}, {:.2f} kpc physical'.format(
edge_value, galaxy_radius_major, galaxy_radius_minor))
else:
galaxy_radius, galaxy_mass, indices = get_radius_mass_indices(
masses, distances, distance_scaling, distance_limits, distance_bin_width,
dimension_number, edge_kind, edge_value)
gal_prop['radius'] = galaxy_radius
gal_prop['mass'] = galaxy_mass
gal_prop['log mass'] = np.log10(galaxy_mass)
gal_prop['indices'] = part_indices[indices]
if print_results:
Say.say('R_{:.0f} = {:.2f} kpc physical'.format(edge_value, galaxy_radius))
if print_results:
Say.say('M_star = {:.2e} M_sun, log = {:.2f}'.format(
gal_prop['mass'], gal_prop['log mass']))
return gal_prop
#===================================================================================================
# profiles of properties
#===================================================================================================
class SpeciesProfileClass(ut.binning.DistanceBinClass):
'''
Get profiles of either histogram/sum or stastitics (such as average, median) of given
property for given particle species.
__init__ is defined via ut.binning.DistanceBinClass
'''
def get_profiles(
self, part, species=['all'],
property_name='', property_statistic='sum', weight_by_mass=False,
center_position=None, center_velocity=None, rotation=None,
other_axis_distance_limits=None, property_select={}, part_indicess=None):
'''
Parse inputs into either get_sum_profiles() or get_statistics_profiles().
If know what you want, can skip this and jump to those functions.
Parameters
----------
part : dict : catalog of particles
species : str or list : name[s] of particle species to compute mass from
property_name : str : name of property to get statistics of
property_statistic : str : statistic to get profile of:
'sum', 'sum.cum', 'density', 'density.cum', 'vel.circ'
weight_by_mass : bool : whether to weight property by species mass
center_position : array : position of center
center_velocity : array : velocity of center
rotation : bool or array : whether to rotate particles - two options:
(a) if input array of eigen-vectors, will define rotation axes
(b) if True, will rotate to align with principal axes stored in species dictionary
other_axis_distance_limits : float :
min and max distances along other axis[s] to keep particles [kpc physical]
property_select : dict : (other) properties to select on: names as keys and limits as values
part_indicess : array (species number x particle number) :
indices of particles from which to select
Returns
-------
pros : dict : dictionary of profiles for each particle species
'''
if ('sum' in property_statistic or 'vel.circ' in property_statistic or
'density' in property_statistic):
pros = self.get_sum_profiles(
part, species, property_name, center_position, rotation, other_axis_distance_limits,
property_select, part_indicess)
else:
pros = self.get_statistics_profiles(
part, species, property_name, weight_by_mass, center_position, center_velocity,
rotation, other_axis_distance_limits, property_select, part_indicess)
for k in pros:
if '.cum' in property_statistic or 'vel.circ' in property_statistic:
pros[k]['distance'] = pros[k]['distance.cum']
pros[k]['log distance'] = pros[k]['log distance.cum']
else:
pros[k]['distance'] = pros[k]['distance.mid']
pros[k]['log distance'] = pros[k]['log distance.mid']
return pros
def get_sum_profiles(
self, part, species=['all'], property_name='mass', center_position=None,
rotation=None, other_axis_distance_limits=None, property_select={}, part_indicess=None):
'''
Get profiles of summed quantity (such as mass or density) for given property for each
particle species.
Parameters
----------
part : dict : catalog of particles
species : str or list : name[s] of particle species to compute mass from
property_name : str : property to get sum of
center_position : list : center position
rotation : bool or array : whether to rotate particles - two options:
(a) if input array of eigen-vectors, will define rotation axes
(b) if True, will rotate to align with principal axes stored in species dictionary
other_axis_distance_limits : float :
min and max distances along other axis[s] to keep particles [kpc physical]
property_select : dict : (other) properties to select on: names as keys and limits as values
part_indicess : array (species number x particle number) :
indices of particles from which to select
Returns
-------
pros : dict : dictionary of profiles for each particle species
'''
if 'gas' in species and 'consume.time' in property_name:
pros_mass = self.get_sum_profiles(
part, species, 'mass', center_position, rotation, other_axis_distance_limits,
property_select, part_indicess)
pros_sfr = self.get_sum_profiles(
part, species, 'sfr', center_position, rotation, other_axis_distance_limits,
property_select, part_indicess)
pros = pros_sfr
for k in pros_sfr['gas']:
if 'distance' not in k:
pros['gas'][k] = pros_mass['gas'][k] / pros_sfr['gas'][k] / 1e9
return pros
pros = {}
Fraction = ut.math.FractionClass()
if np.isscalar(species):
species = [species]
if species == ['baryon']:
# treat this case specially for baryon fraction
species = ['gas', 'star', 'dark', 'dark2']
species = parse_species(part, species)
center_position = parse_property(part, 'center_position', center_position)
part_indicess = parse_property(species, 'indices', part_indicess)
assert 0 < self.dimension_number <= 3
for spec_i, spec in enumerate(species):
part_indices = part_indicess[spec_i]
if part_indices is None or not len(part_indices):
part_indices = ut.array.get_arange(part[spec].prop(property_name))
if property_select:
part_indices = catalog.get_indices_catalog(
part[spec], property_select, part_indices)
prop_values = part[spec].prop(property_name, part_indices)
if self.dimension_number == 3:
# simple case: profile using scalar distance
distances = ut.coordinate.get_distances(
part[spec]['position'][part_indices], center_position, part.info['box.length'],
part.snapshot['scalefactor'], total_distance=True) # [kpc physical]
elif self.dimension_number in [1, 2]:
# other cases: profile along R (2 major axes) or Z (minor axis)
if rotation is not None and not isinstance(rotation, bool) and len(rotation):
rotation_tensor = rotation
elif (len(part[spec].host_rotation_tensors) and
len(part[spec].host_rotation_tensors[0])):
rotation_tensor = part[spec].host_rotation_tensors[0]
else:
raise ValueError('want 2-D or 1-D profile but no means to define rotation')
distancess = get_distances_wrt_center(
part, spec, part_indices, center_position, rotation_tensor,
coordinate_system='cylindrical')
# ensure all distances are positive definite
distancess = np.abs(distancess)
if self.dimension_number == 1:
# compute profile along minor axis (Z)
distances = distancess[:, 1]
other_distances = distancess[:, 0]
elif self.dimension_number == 2:
# compute profile along major axes (R)
distances = distancess[:, 0]
other_distances = distancess[:, 1]
if (other_axis_distance_limits is not None and
(min(other_axis_distance_limits) > 0 or
max(other_axis_distance_limits) < Inf)):
masks = ((other_distances >= min(other_axis_distance_limits)) *
(other_distances < max(other_axis_distance_limits)))
distances = distances[masks]
prop_values = prop_values[masks]
pros[spec] = self.get_sum_profile(distances, prop_values) # defined in DistanceBinClass
props = [pro_prop for pro_prop in pros[species[0]] if 'distance' not in pro_prop]
props_dist = [pro_prop for pro_prop in pros[species[0]] if 'distance' in pro_prop]
if property_name == 'mass':
# create dictionary for baryonic mass
if 'star' in species or 'gas' in species:
spec_new = 'baryon'
pros[spec_new] = {}
for spec in np.intersect1d(species, ['star', 'gas']):
for pro_prop in props:
if pro_prop not in pros[spec_new]:
pros[spec_new][pro_prop] = np.array(pros[spec][pro_prop])
elif 'log' in pro_prop:
pros[spec_new][pro_prop] = ut.math.get_log(
10 ** pros[spec_new][pro_prop] +
10 ** pros[spec][pro_prop])
else:
pros[spec_new][pro_prop] += pros[spec][pro_prop]
for pro_prop in props_dist:
pros[spec_new][pro_prop] = pros[species[0]][pro_prop]
species.append(spec_new)
if len(species) > 1:
# create dictionary for total mass
spec_new = 'total'
pros[spec_new] = {}
for spec in np.setdiff1d(species, ['baryon', 'total']):
for pro_prop in props:
if pro_prop not in pros[spec_new]:
pros[spec_new][pro_prop] = np.array(pros[spec][pro_prop])
elif 'log' in pro_prop:
pros[spec_new][pro_prop] = ut.math.get_log(
10 ** pros[spec_new][pro_prop] +
10 ** pros[spec][pro_prop])
else:
pros[spec_new][pro_prop] += pros[spec][pro_prop]
for pro_prop in props_dist:
pros[spec_new][pro_prop] = pros[species[0]][pro_prop]
species.append(spec_new)
# create mass fraction wrt total mass
for spec in np.setdiff1d(species, ['total']):
for pro_prop in ['sum', 'sum.cum']:
pros[spec][pro_prop + '.fraction'] = Fraction.get_fraction(
pros[spec][pro_prop], pros['total'][pro_prop])
if spec == 'baryon':
# units of cosmic baryon fraction
pros[spec][pro_prop + '.fraction'] /= (
part.Cosmology['omega_baryon'] / part.Cosmology['omega_matter'])
# create circular velocity = sqrt (G m(< r) / r)
for spec in species:
pros[spec]['vel.circ'] = halo_property.get_circular_velocity(
pros[spec]['sum.cum'], pros[spec]['distance.cum'])
return pros
def get_statistics_profiles(
self, part, species=['all'], property_name='', weight_by_mass=True,
center_position=None, center_velocity=None, rotation=None, other_axis_distance_limits=None,
property_select={}, part_indicess=None):
'''
Get profiles of statistics (such as median, average) for given property for each
particle species.
Parameters
----------
part : dict : catalog of particles
species : str or list : name[s] of particle species to compute mass from
property_name : str : name of property to get statistics of
weight_by_mass : bool : whether to weight property by species mass
center_position : array : position of center
center_velocity : array : velocity of center
rotation : bool or array : whether to rotate particles - two options:
(a) if input array of eigen-vectors, will define rotation axes
(b) if True, will rotate to align with principal axes stored in species dictionary
other_axis_distance_limits : float :
min and max distances along other axis[s] to keep particles [kpc physical]
property_select : dict : (other) properties to select on: names as keys and limits as values
part_indicess : array or list : indices of particles from which to select
Returns
-------
pros : dict : dictionary of profiles for each particle species
'''
pros = {}
species = parse_species(part, species)
center_position = parse_property(part, 'center_position', center_position)
if 'velocity' in property_name:
center_velocity = parse_property(part, 'center_velocity', center_velocity)
part_indicess = parse_property(species, 'indices', part_indicess)
assert 0 < self.dimension_number <= 3
for spec_i, spec in enumerate(species):
prop_test = property_name
if 'velocity' in prop_test:
prop_test = 'velocity' # treat velocity specially because compile below
assert part[spec].prop(prop_test) is not None
part_indices = part_indicess[spec_i]
if part_indices is None or not len(part_indices):
part_indices = ut.array.get_arange(part[spec].prop(property_name))
if property_select:
part_indices = catalog.get_indices_catalog(
part[spec], property_select, part_indices)
masses = None
if weight_by_mass:
masses = part[spec].prop('mass', part_indices)
if 'velocity' in property_name:
distance_vectors = ut.coordinate.get_distances(
part[spec]['position'][part_indices], center_position,
part.info['box.length'], part.snapshot['scalefactor']) # [kpc physical]
velocity_vectors = ut.coordinate.get_velocity_differences(
part[spec]['velocity'][part_indices], center_velocity,
part[spec]['position'][part_indices], center_position, part.info['box.length'],
part.snapshot['scalefactor'], part.snapshot['time.hubble'])
# defined in DistanceBinClass
pro = self.get_velocity_profile(distance_vectors, velocity_vectors, masses)
pros[spec] = pro[property_name.replace('host.', '')]
for prop in pro:
if 'velocity' not in prop:
pros[spec][prop] = pro[prop]
else:
prop_values = part[spec].prop(property_name, part_indices)
if self.dimension_number == 3:
# simple case: profile using total distance [kpc physical]
distances = ut.coordinate.get_distances(
part[spec]['position'][part_indices], center_position,
part.info['box.length'], part.snapshot['scalefactor'], total_distance=True)
elif self.dimension_number in [1, 2]:
# other cases: profile along R (2 major axes) or Z (minor axis)
if rotation is not None and not isinstance(rotation, bool) and len(rotation):
rotation_tensor = rotation
elif (len(part[spec].host_rotation_tensors) and
len(part[spec].host_rotation_tensors[0])):
rotation_tensor = part[spec].host_rotation_tensors[0]
else:
raise ValueError('want 2-D or 1-D profile but no means to define rotation')
distancess = get_distances_wrt_center(
part, spec, part_indices, center_position, rotation_tensor, 'cylindrical')
distancess = np.abs(distancess)
if self.dimension_number == 1:
# compute profile alongminor axis (Z)
distances = distancess[:, 1]
other_distances = distancess[:, 0]
elif self.dimension_number == 2:
# compute profile along 2 major axes (R)
distances = distancess[:, 0]
other_distances = distancess[:, 1]
if (other_axis_distance_limits is not None and
(min(other_axis_distance_limits) >= 0 or
max(other_axis_distance_limits) < Inf)):
masks = ((other_distances >= min(other_axis_distance_limits)) *
(other_distances < max(other_axis_distance_limits)))
distances = distances[masks]
masses = masses[masks]
prop_values = prop_values[masks]
# defined in DistanceBinClass
pros[spec] = self.get_statistics_profile(distances, prop_values, masses)
return pros
| [
"numpy.log10",
"numpy.sqrt",
"numpy.array",
"numpy.isfinite",
"numpy.arange",
"numpy.nanargmin",
"numpy.isscalar",
"numpy.sort",
"numpy.asarray",
"numpy.ndim",
"numpy.max",
"numpy.concatenate",
"numpy.min",
"numpy.abs",
"numpy.argmax",
"numpy.interp",
"numpy.shape",
"numpy.intersec... | [((1139, 1159), 'numpy.isscalar', 'np.isscalar', (['species'], {}), '(species)\n', (1150, 1159), True, 'import numpy as np\n'), ((6332, 6382), 'numpy.array', 'np.array', (['ids_all'], {'dtype': 'part[spec][id_name].dtype'}), '(ids_all, dtype=part[spec][id_name].dtype)\n', (6340, 6382), True, 'import numpy as np\n'), ((14522, 14548), 'numpy.array', 'np.array', (['center_positions'], {}), '(center_positions)\n', (14530, 14548), True, 'import numpy as np\n'), ((17215, 17285), 'numpy.zeros', 'np.zeros', (['center_positions.shape', "part[species_name]['velocity'].dtype"], {}), "(center_positions.shape, part[species_name]['velocity'].dtype)\n", (17223, 17285), True, 'import numpy as np\n'), ((44475, 44502), 'numpy.asarray', 'np.asarray', (['distance_limits'], {}), '(distance_limits)\n', (44485, 44502), True, 'import numpy as np\n'), ((45084, 45112), 'numpy.zeros', 'np.zeros', (['DistanceBin.number'], {}), '(DistanceBin.number)\n', (45092, 45112), True, 'import numpy as np\n'), ((47686, 47710), 'numpy.max', 'np.max', (['vel_circ_in_bins'], {}), '(vel_circ_in_bins)\n', (47692, 47710), True, 'import numpy as np\n'), ((15228, 15277), 'numpy.abs', 'np.abs', (['(center_positions - center_positions_other)'], {}), '(center_positions - center_positions_other)\n', (15234, 15277), True, 'import numpy as np\n'), ((36523, 36608), 'numpy.unique', 'np.unique', (["part[spec]['id'][part_indices]"], {'return_index': '(True)', 'return_counts': '(True)'}), "(part[spec]['id'][part_indices], return_index=True, return_counts=True\n )\n", (36532, 36608), True, 'import numpy as np\n'), ((36640, 36680), 'numpy.sort', 'np.sort', (['part_indices[piis[counts == 1]]'], {}), '(part_indices[piis[counts == 1]])\n', (36647, 36680), True, 'import numpy as np\n'), ((42352, 42379), 'numpy.array', 'np.array', (['principal_axes[k]'], {}), '(principal_axes[k])\n', (42360, 42379), True, 'import numpy as np\n'), ((47581, 47657), 'numpy.sqrt', 'np.sqrt', (['(ut.constant.grav_kpc_msun_sec * mass_cum_in_bins / DistanceBin.maxs)'], {}), '(ut.constant.grav_kpc_msun_sec * mass_cum_in_bins / DistanceBin.maxs)\n', (47588, 47657), True, 'import numpy as np\n'), ((47754, 47781), 'numpy.argmax', 'np.argmax', (['vel_circ_in_bins'], {}), '(vel_circ_in_bins)\n', (47763, 47781), True, 'import numpy as np\n'), ((47940, 47973), 'numpy.sum', 'np.sum', (["part[spec]['mass'][masks]"], {}), "(part[spec]['mass'][masks])\n", (47946, 47973), True, 'import numpy as np\n'), ((53348, 53369), 'numpy.sum', 'np.sum', (['masses[masks]'], {}), '(masses[masks])\n', (53354, 53369), True, 'import numpy as np\n'), ((54222, 54254), 'numpy.sum', 'np.sum', (['(distance_vectors ** 2)', '(1)'], {}), '(distance_vectors ** 2, 1)\n', (54228, 54254), True, 'import numpy as np\n'), ((55402, 55425), 'numpy.abs', 'np.abs', (['minor_distances'], {}), '(minor_distances)\n', (55408, 55425), True, 'import numpy as np\n'), ((57892, 57919), 'numpy.log10', 'np.log10', (['galaxy_mass_major'], {}), '(galaxy_mass_major)\n', (57900, 57919), True, 'import numpy as np\n'), ((58556, 58577), 'numpy.log10', 'np.log10', (['galaxy_mass'], {}), '(galaxy_mass)\n', (58564, 58577), True, 'import numpy as np\n'), ((64127, 64147), 'numpy.isscalar', 'np.isscalar', (['species'], {}), '(species)\n', (64138, 64147), True, 'import numpy as np\n'), ((6565, 6578), 'numpy.sum', 'np.sum', (['masks'], {}), '(masks)\n', (6571, 6578), True, 'import numpy as np\n'), ((6976, 6989), 'numpy.sum', 'np.sum', (['masks'], {}), '(masks)\n', (6982, 6989), True, 'import numpy as np\n'), ((11211, 11236), 'numpy.ndim', 'np.ndim', (['center_positions'], {}), '(center_positions)\n', (11218, 11236), True, 'import numpy as np\n'), ((11362, 11388), 'numpy.shape', 'np.shape', (['center_positions'], {}), '(center_positions)\n', (11370, 11388), True, 'import numpy as np\n'), ((13049, 13105), 'numpy.nanargmin', 'np.nanargmin', (["part[spec_name]['potential'][part_indices]"], {}), "(part[spec_name]['potential'][part_indices])\n", (13061, 13105), True, 'import numpy as np\n'), ((29826, 29851), 'numpy.ndim', 'np.ndim', (['distance_limitss'], {}), '(distance_limitss)\n', (29833, 29851), True, 'import numpy as np\n'), ((32946, 32971), 'numpy.ndim', 'np.ndim', (['velocity_limitss'], {}), '(velocity_limitss)\n', (32953, 32971), True, 'import numpy as np\n'), ((39685, 39717), 'numpy.sum', 'np.sum', (['(distance_vectors ** 2)', '(1)'], {}), '(distance_vectors ** 2, 1)\n', (39691, 39717), True, 'import numpy as np\n'), ((41996, 42033), 'numpy.median', 'np.median', (['velocity_vectors_cyl[:, 2]'], {}), '(velocity_vectors_cyl[:, 2])\n', (42005, 42033), True, 'import numpy as np\n'), ((45710, 45754), 'numpy.sum', 'np.sum', (["part[spec]['mass'][distance_indices]"], {}), "(part[spec]['mass'][distance_indices])\n", (45716, 45754), True, 'import numpy as np\n'), ((45786, 45809), 'numpy.cumsum', 'np.cumsum', (['mass_in_bins'], {}), '(mass_in_bins)\n', (45795, 45809), True, 'import numpy as np\n'), ((6607, 6644), 'numpy.unique', 'np.unique', (['part[spec][id_name][masks]'], {}), '(part[spec][id_name][masks])\n', (6616, 6644), True, 'import numpy as np\n'), ((7018, 7043), 'numpy.unique', 'np.unique', (['ids_all[masks]'], {}), '(ids_all[masks])\n', (7027, 7043), True, 'import numpy as np\n'), ((13539, 13609), 'numpy.concatenate', 'np.concatenate', (["[positions, part[spec_name]['position'][part_indices]]"], {}), "([positions, part[spec_name]['position'][part_indices]])\n", (13553, 13609), True, 'import numpy as np\n'), ((13656, 13719), 'numpy.concatenate', 'np.concatenate', (["[masses, part[spec_name]['mass'][part_indices]]"], {}), "([masses, part[spec_name]['mass'][part_indices]])\n", (13670, 13719), True, 'import numpy as np\n'), ((15352, 15372), 'numpy.max', 'np.max', (['position_dif'], {}), '(position_dif)\n', (15358, 15372), True, 'import numpy as np\n'), ((36824, 36863), 'numpy.setdiff1d', 'np.setdiff1d', (['part_indices', 'pis_unsplit'], {}), '(part_indices, pis_unsplit)\n', (36836, 36863), True, 'import numpy as np\n'), ((42405, 42431), 'numpy.shape', 'np.shape', (['center_positions'], {}), '(center_positions)\n', (42413, 42431), True, 'import numpy as np\n'), ((46627, 46651), 'numpy.log10', 'np.log10', (['virial_density'], {}), '(virial_density)\n', (46635, 46651), True, 'import numpy as np\n'), ((46653, 46706), 'numpy.log10', 'np.log10', (['density_cum_in_bins[[d_bin_i + 1, d_bin_i]]'], {}), '(density_cum_in_bins[[d_bin_i + 1, d_bin_i]])\n', (46661, 46706), True, 'import numpy as np\n'), ((51915, 51941), 'numpy.log10', 'np.log10', (['(edge_value / 100)'], {}), '(edge_value / 100)\n', (51923, 51941), True, 'import numpy as np\n'), ((52055, 52112), 'numpy.interp', 'np.interp', (['log_mass', 'log_masses_cum', 'DistanceBin.log_maxs'], {}), '(log_mass, log_masses_cum, DistanceBin.log_maxs)\n', (52064, 52112), True, 'import numpy as np\n'), ((67707, 67747), 'numpy.intersect1d', 'np.intersect1d', (['species', "['star', 'gas']"], {}), "(species, ['star', 'gas'])\n", (67721, 67747), True, 'import numpy as np\n'), ((68633, 68675), 'numpy.setdiff1d', 'np.setdiff1d', (['species', "['baryon', 'total']"], {}), "(species, ['baryon', 'total'])\n", (68645, 68675), True, 'import numpy as np\n'), ((69460, 69492), 'numpy.setdiff1d', 'np.setdiff1d', (['species', "['total']"], {}), "(species, ['total'])\n", (69472, 69492), True, 'import numpy as np\n'), ((8688, 8742), 'numpy.array', 'np.array', (['part[spec].id_to_index[part_id]'], {'dtype': 'dtype'}), '(part[spec].id_to_index[part_id], dtype=dtype)\n', (8696, 8742), True, 'import numpy as np\n'), ((45653, 45676), 'numpy.min', 'np.min', (['distance_limits'], {}), '(distance_limits)\n', (45659, 45676), True, 'import numpy as np\n'), ((48344, 48363), 'numpy.log10', 'np.log10', (['halo_mass'], {}), '(halo_mass)\n', (48352, 48363), True, 'import numpy as np\n'), ((51838, 51863), 'numpy.sum', 'np.sum', (['masses[d_indices]'], {}), '(masses[d_indices])\n', (51844, 51863), True, 'import numpy as np\n'), ((51866, 51889), 'numpy.cumsum', 'np.cumsum', (['mass_in_bins'], {}), '(mass_in_bins)\n', (51875, 51889), True, 'import numpy as np\n'), ((52466, 52495), 'numpy.arange', 'np.arange', (['DistanceBin.number'], {}), '(DistanceBin.number)\n', (52475, 52495), True, 'import numpy as np\n'), ((52496, 52528), 'numpy.isfinite', 'np.isfinite', (['log_density_in_bins'], {}), '(log_density_in_bins)\n', (52507, 52528), True, 'import numpy as np\n'), ((66278, 66296), 'numpy.abs', 'np.abs', (['distancess'], {}), '(distancess)\n', (66284, 66296), True, 'import numpy as np\n'), ((8020, 8064), 'numpy.isscalar', 'np.isscalar', (['part[spec].id_to_index[part_id]'], {}), '(part[spec].id_to_index[part_id])\n', (8031, 8064), True, 'import numpy as np\n'), ((51765, 51788), 'numpy.min', 'np.min', (['distance_limits'], {}), '(distance_limits)\n', (51771, 51788), True, 'import numpy as np\n'), ((52943, 53065), 'numpy.interp', 'np.interp', (['edge_value', 'log_density_in_bins[[d_bin_i_plus_1, d_bin_i]]', 'DistanceBin.log_maxs[[d_bin_i_plus_1, d_bin_i]]'], {}), '(edge_value, log_density_in_bins[[d_bin_i_plus_1, d_bin_i]],\n DistanceBin.log_maxs[[d_bin_i_plus_1, d_bin_i]])\n', (52952, 53065), True, 'import numpy as np\n'), ((75096, 75114), 'numpy.abs', 'np.abs', (['distancess'], {}), '(distancess)\n', (75102, 75114), True, 'import numpy as np\n'), ((31528, 31552), 'numpy.min', 'np.min', (['distance_limitss'], {}), '(distance_limitss)\n', (31534, 31552), True, 'import numpy as np\n'), ((31594, 31618), 'numpy.max', 'np.max', (['distance_limitss'], {}), '(distance_limitss)\n', (31600, 31618), True, 'import numpy as np\n'), ((33914, 33938), 'numpy.min', 'np.min', (['velocity_limitss'], {}), '(velocity_limitss)\n', (33920, 33938), True, 'import numpy as np\n'), ((33981, 34005), 'numpy.max', 'np.max', (['velocity_limitss'], {}), '(velocity_limitss)\n', (33987, 34005), True, 'import numpy as np\n'), ((67906, 67936), 'numpy.array', 'np.array', (['pros[spec][pro_prop]'], {}), '(pros[spec][pro_prop])\n', (67914, 67936), True, 'import numpy as np\n'), ((68834, 68864), 'numpy.array', 'np.array', (['pros[spec][pro_prop]'], {}), '(pros[spec][pro_prop])\n', (68842, 68864), True, 'import numpy as np\n'), ((32098, 32125), 'numpy.max', 'np.max', (['distance_limitss[1]'], {}), '(distance_limitss[1])\n', (32104, 32125), True, 'import numpy as np\n'), ((34490, 34517), 'numpy.max', 'np.max', (['velocity_limitss[1]'], {}), '(velocity_limitss[1])\n', (34496, 34517), True, 'import numpy as np\n'), ((32022, 32049), 'numpy.min', 'np.min', (['distance_limitss[1]'], {}), '(distance_limitss[1])\n', (32028, 32049), True, 'import numpy as np\n'), ((32713, 32740), 'numpy.max', 'np.max', (['distance_limitss[2]'], {}), '(distance_limitss[2])\n', (32719, 32740), True, 'import numpy as np\n'), ((34413, 34440), 'numpy.min', 'np.min', (['velocity_limitss[1]'], {}), '(velocity_limitss[1])\n', (34419, 34440), True, 'import numpy as np\n'), ((35112, 35139), 'numpy.max', 'np.max', (['velocity_limitss[2]'], {}), '(velocity_limitss[2])\n', (35118, 35139), True, 'import numpy as np\n'), ((31869, 31896), 'numpy.min', 'np.min', (['distance_limitss[0]'], {}), '(distance_limitss[0])\n', (31875, 31896), True, 'import numpy as np\n'), ((31945, 31972), 'numpy.max', 'np.max', (['distance_limitss[0]'], {}), '(distance_limitss[0])\n', (31951, 31972), True, 'import numpy as np\n'), ((34258, 34285), 'numpy.min', 'np.min', (['velocity_limitss[0]'], {}), '(velocity_limitss[0])\n', (34264, 34285), True, 'import numpy as np\n'), ((34335, 34362), 'numpy.max', 'np.max', (['velocity_limitss[0]'], {}), '(velocity_limitss[0])\n', (34341, 34362), True, 'import numpy as np\n'), ((32486, 32513), 'numpy.min', 'np.min', (['distance_limitss[1]'], {}), '(distance_limitss[1])\n', (32492, 32513), True, 'import numpy as np\n'), ((32562, 32589), 'numpy.max', 'np.max', (['distance_limitss[1]'], {}), '(distance_limitss[1])\n', (32568, 32589), True, 'import numpy as np\n'), ((32637, 32664), 'numpy.min', 'np.min', (['distance_limitss[2]'], {}), '(distance_limitss[2])\n', (32643, 32664), True, 'import numpy as np\n'), ((34882, 34909), 'numpy.min', 'np.min', (['velocity_limitss[1]'], {}), '(velocity_limitss[1])\n', (34888, 34909), True, 'import numpy as np\n'), ((34959, 34986), 'numpy.max', 'np.max', (['velocity_limitss[1]'], {}), '(velocity_limitss[1])\n', (34965, 34986), True, 'import numpy as np\n'), ((35035, 35062), 'numpy.min', 'np.min', (['velocity_limitss[2]'], {}), '(velocity_limitss[2])\n', (35041, 35062), True, 'import numpy as np\n'), ((32333, 32360), 'numpy.min', 'np.min', (['distance_limitss[0]'], {}), '(distance_limitss[0])\n', (32339, 32360), True, 'import numpy as np\n'), ((32409, 32436), 'numpy.max', 'np.max', (['distance_limitss[0]'], {}), '(distance_limitss[0])\n', (32415, 32436), True, 'import numpy as np\n'), ((34727, 34754), 'numpy.min', 'np.min', (['velocity_limitss[0]'], {}), '(velocity_limitss[0])\n', (34733, 34754), True, 'import numpy as np\n'), ((34804, 34831), 'numpy.max', 'np.max', (['velocity_limitss[0]'], {}), '(velocity_limitss[0])\n', (34810, 34831), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Ensure environmental variable i.e. paths are set to used the modules
from xf_fintech_python import DeviceManager, HJM
import numpy as np
import argparse
def zcbAnalytical(rawData, maturity, tau = 0.5):
# Take last row
fc = np.copy(rawData[rawData.shape[0] - 1])
fc *= 0.01
accum = 0.0
for i in range(int(maturity / tau)):
accum += fc[i]
return np.exp(-tau * accum)
def zcbExample(hjm, hist_data, maturity, paths):
seeds = (np.random.rand(N_FACTORS * MC_UN) * 1000).astype(int)
print("Using seeds " + str(seeds))
outPrice = []
hjm.run(list(hist_data.flat), list(seeds.flat), outPrice, tenors, curves, paths, maturity, maturity)
runtime = hjm.lastruntime()
analyticalPrice = zcbAnalytical(hist_data, maturity)
print("[CPU] ZCB calculated analytically: %10.6f" % analyticalPrice)
print("[FPGA] ZCB calculated with HJM framework: %10.6f" % outPrice[0])
print("[FPGA] Runtime = %d" %runtime, "us")
diff = (outPrice[0] - analyticalPrice) / analyticalPrice * 100
print(" Diff = %.4f" % diff, "%")
parser = argparse.ArgumentParser(description='Example of Heath-Jarrow-Morton framework running on a FPGA')
parser.add_argument('data_in', type=str, help='Path to csv with historical rates data')
parser.add_argument('load', type=str, help='filename of xlcbin load, e.g. hjm.xclbin')
args = parser.parse_args()
N_FACTORS = 3
MC_UN = 4
hist_data = np.loadtxt(args.data_in, delimiter=',')
tenors = hist_data.shape[1]
curves = hist_data.shape[0]
xclbin_load = (args.load)
print("\nThe Heath-Jarrow-Morton model\n=================================\n")
# Program variables
deviceList = DeviceManager.getDeviceList("u200")
lastruntime = 0
runtime = 0
# Identify which cards installed and choose the first available U200 card
print("Found these {0} device(s):".format(len(deviceList)))
for x in deviceList:
print(x.getName())
chosenDevice = deviceList[0]
print("Choosing the first, ", str(chosenDevice), "\n")
# Selecting and loadings into FPGA of chosen card the financial model to be used
hjm = HJM(xclbin_load)
hjm.claimDevice(chosenDevice)
# Examples of possible operations, showing MC convergence of prices
print("Example 1) Pricing ZCB of maturity 10Y with 50 MonteCarlo paths")
zcbExample(hjm, hist_data, 10.0, 50)
print("Example 2) Pricing ZCB of maturity 10Y with 100 MonteCarlo paths")
zcbExample(hjm, hist_data, 10.0, 100)
print("Example 3) Pricing ZCB of maturity 10Y with 200 MonteCarlo paths")
zcbExample(hjm, hist_data, 10.0, 200)
print("Example 4) Pricing ZCB of maturity 10Y with 400 MonteCarlo paths")
zcbExample(hjm, hist_data, 10.0, 400)
hjm.releaseDevice()
print("End of example/test.\n")
| [
"numpy.copy",
"numpy.random.rand",
"argparse.ArgumentParser",
"xf_fintech_python.DeviceManager.getDeviceList",
"numpy.exp",
"xf_fintech_python.HJM",
"numpy.loadtxt"
] | [((1152, 1254), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Example of Heath-Jarrow-Morton framework running on a FPGA"""'}), "(description=\n 'Example of Heath-Jarrow-Morton framework running on a FPGA')\n", (1175, 1254), False, 'import argparse\n'), ((1500, 1539), 'numpy.loadtxt', 'np.loadtxt', (['args.data_in'], {'delimiter': '""","""'}), "(args.data_in, delimiter=',')\n", (1510, 1539), True, 'import numpy as np\n'), ((1745, 1780), 'xf_fintech_python.DeviceManager.getDeviceList', 'DeviceManager.getDeviceList', (['"""u200"""'], {}), "('u200')\n", (1772, 1780), False, 'from xf_fintech_python import DeviceManager, HJM\n'), ((2174, 2190), 'xf_fintech_python.HJM', 'HJM', (['xclbin_load'], {}), '(xclbin_load)\n', (2177, 2190), False, 'from xf_fintech_python import DeviceManager, HJM\n'), ((268, 306), 'numpy.copy', 'np.copy', (['rawData[rawData.shape[0] - 1]'], {}), '(rawData[rawData.shape[0] - 1])\n', (275, 306), True, 'import numpy as np\n'), ((418, 438), 'numpy.exp', 'np.exp', (['(-tau * accum)'], {}), '(-tau * accum)\n', (424, 438), True, 'import numpy as np\n'), ((507, 540), 'numpy.random.rand', 'np.random.rand', (['(N_FACTORS * MC_UN)'], {}), '(N_FACTORS * MC_UN)\n', (521, 540), True, 'import numpy as np\n')] |
#
#*******************************************************************************
# Copyright 2014-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#******************************************************************************/
import numpy as np
def getFPType(X):
dt = getattr(X, 'dtype', None)
if dt == np.double:
return "double"
elif dt == np.single:
return "float"
else:
raise ValueError("Input array has unexpected dtype = {}".format(dt))
def make2d(X):
if np.isscalar(X):
X = np.asarray(X)[np.newaxis, np.newaxis]
elif isinstance(X, np.ndarray) and X.ndim == 1:
X = X.reshape((X.size, 1))
return X
| [
"numpy.asarray",
"numpy.isscalar"
] | [((1031, 1045), 'numpy.isscalar', 'np.isscalar', (['X'], {}), '(X)\n', (1042, 1045), True, 'import numpy as np\n'), ((1059, 1072), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (1069, 1072), True, 'import numpy as np\n')] |
#El promedio histórico del salario de los egresados de una universidad
#es salario_historico, la desviación estándar es
#de std_salario_historico.
#Calcule con un método Monte Carlo el salario promedio
#salario_promedio que una muestra aleatoria de n_egresados debe tener
#para considerar que cualquier otro grupo de n_egresados tiene una
#probabilidad p_excede de tener un salario promedio que excede
#salario_promedio.
#Esta probabilidad se debe calcular con una función llamada
#probabilidad_excede. Las variables de entrada de las función son (en ese orden):
#salario_historico, std_salario_historico, n_egresados, p_excede, donde
#todas las variables de entrada son de tipo entero, excepto p_excede
#que es de tipo float. Suponga que el valor de p_excede es un múltiplo
#entero de 0.001. La función solamente debe devolver el valor pedido
#del salario (un número de tipo entero).
#La solución debe estar en un archivo llamado
#"ApellidoNombre_Ejercicio14.py" donde Apellido y Nombre debe
#reemplazarlos con su apellido y nombre. Suba ese archivo como
#respuesta a esta actividad. Al ejecutar "python
#ApellidoNombre_Ejercicio14.py" o al llamar la función no se debe
#producir ningún error ni escribir nada en la terminal. Calificaremos
#la función con valores de n_egresados hasta 10000. Se considera que la
#función no corre si cada llamada de la función se demora más de 10
#segundos en correr.
#Solamente puede utilizar las funciones y métodos vistos en clase
#(videos o clases sincrónicas, o que ya se encuentren en el
# repositorio).
import numpy as np
def probabilidad_excede(salario_historico, std_salario_historico, n_egresados, p_excede):
n_sample = 10000000
x = np.random.normal(loc=salario_historico,
scale=std_salario_historico/np.sqrt(n_egresados),
size=n_sample)
x = np.sort(x)
salario_promedio = int(x[n_sample - int(p_excede*n_sample)])
return salario_promedio
#a = probabilidad_excede(50000, 1000, 4, 0.158)
| [
"numpy.sort",
"numpy.sqrt"
] | [((1865, 1875), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (1872, 1875), True, 'import numpy as np\n'), ((1794, 1814), 'numpy.sqrt', 'np.sqrt', (['n_egresados'], {}), '(n_egresados)\n', (1801, 1814), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from PIL import ImageDraw
import scipy
from scipy import ndimage
import skimage
from skimage.morphology import medial_axis
import time
import cv2
import random
import os
import sys
from scipy import linalg as LA
def find_files(files, dirs=[], contains=[]):
for d in dirs:
onlyfiles = [os.path.join(d, f) for f in os.listdir(d) if os.path.isfile(os.path.join(d, f))]
for i, part in enumerate(contains):
files += [os.path.join(d, f) for f in onlyfiles if part in f]
onlydirs = [os.path.join(d, dd) for dd in os.listdir(d) if os.path.isdir(os.path.join(d, dd))]
if onlydirs:
recursive_files, _ = find_files([], onlydirs, contains)
files += recursive_files
return files, len(files)
def make_list(load_root='/Users/junkyungkim/Desktop/by_class/', save_name='list.npy',
only_include=[]):
class_names = [class_name for class_name in os.listdir(load_root) if
os.path.isdir(os.path.join(load_root, class_name))]
class_path_list = [os.path.join(load_root, class_name) for class_name in os.listdir(load_root) if
os.path.isdir(os.path.join(load_root, class_name))]
if len(only_include) > 0:
for i, cl in enumerate(class_names):
if cl not in only_include:
class_path_list.remove(os.path.join(load_root, cl))
ims_list = []
num_ims_list = []
for i, iclass in enumerate(class_path_list):
print('class ' + str(i))
out_tuple = find_files([], dirs=[iclass], contains=['.png'])
ims_list.append(out_tuple[0])
num_ims_list.append(out_tuple[1])
np.save(os.path.join(load_root,save_name),(ims_list, num_ims_list))
# (2,62) object. (paths, num_ims) x categories
def load_list(load_fn):
npy = np.load(load_fn)
# (2,62) object. (paths, num_ims) x categories
return npy[0], npy[1]
if __name__ == "__main__":
# only capitals
capitals = ['4a', '4b', '4c', '4e', '4f',
'5a', '6c', '6f',
'41', '42', '43', '44', '45', '47', '48', '49',
'50', '51', '52', '53', '54', '55', '56', '57', '58']
numbers = ['30', '31', '32', '33', '34', '35', '36', '37', '38', '39']
nist_root = str(sys.argv[1])
make_list(nist_root, only_include=capitals) | [
"os.listdir",
"numpy.load",
"os.path.join"
] | [((1881, 1897), 'numpy.load', 'np.load', (['load_fn'], {}), '(load_fn)\n', (1888, 1897), True, 'import numpy as np\n'), ((1115, 1150), 'os.path.join', 'os.path.join', (['load_root', 'class_name'], {}), '(load_root, class_name)\n', (1127, 1150), False, 'import os\n'), ((1734, 1768), 'os.path.join', 'os.path.join', (['load_root', 'save_name'], {}), '(load_root, save_name)\n', (1746, 1768), False, 'import os\n'), ((371, 389), 'os.path.join', 'os.path.join', (['d', 'f'], {}), '(d, f)\n', (383, 389), False, 'import os\n'), ((590, 609), 'os.path.join', 'os.path.join', (['d', 'dd'], {}), '(d, dd)\n', (602, 609), False, 'import os\n'), ((996, 1017), 'os.listdir', 'os.listdir', (['load_root'], {}), '(load_root)\n', (1006, 1017), False, 'import os\n'), ((1169, 1190), 'os.listdir', 'os.listdir', (['load_root'], {}), '(load_root)\n', (1179, 1190), False, 'import os\n'), ((399, 412), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (409, 412), False, 'import os\n'), ((518, 536), 'os.path.join', 'os.path.join', (['d', 'f'], {}), '(d, f)\n', (530, 536), False, 'import os\n'), ((620, 633), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (630, 633), False, 'import os\n'), ((1054, 1089), 'os.path.join', 'os.path.join', (['load_root', 'class_name'], {}), '(load_root, class_name)\n', (1066, 1089), False, 'import os\n'), ((1231, 1266), 'os.path.join', 'os.path.join', (['load_root', 'class_name'], {}), '(load_root, class_name)\n', (1243, 1266), False, 'import os\n'), ((431, 449), 'os.path.join', 'os.path.join', (['d', 'f'], {}), '(d, f)\n', (443, 449), False, 'import os\n'), ((651, 670), 'os.path.join', 'os.path.join', (['d', 'dd'], {}), '(d, dd)\n', (663, 670), False, 'import os\n'), ((1422, 1449), 'os.path.join', 'os.path.join', (['load_root', 'cl'], {}), '(load_root, cl)\n', (1434, 1449), False, 'import os\n')] |
import numpy as np
import re
import random
def prepare_data():
"""This method prepares input positive and negative datasets as bitvectors for the Rap1 binding problem. Output: three lists of bitvectors, one containing positive samples, negative samples that are similar to positive samples, and negative examples that are randomly chosen from the fasta sequences. All bitvectors are 17 bp (34 bits) long"""
# read in all positive data, convert to bitvectors
pos_str = read_positives()
pos_vec = str_to_vec(pos_str)
# read in all negative data. then, remove false negatives from the negative fa sequences and their reverse complements. Call this new set of sequences and their reverse complements "neg_str".
neg_str = read_negatives()
neg_str = remove_falseneg(neg_str, pos_str)
rc_neg_str = reverse_complement(neg_str)
rc_neg_str = remove_falseneg(rc_neg_str, pos_str)
neg_str = reverse_complement(rc_neg_str)
neg_str = neg_str + rc_neg_str
# cache interesting cases as "neg_simiar". interesting cases are those that look similar to the positive sequences (in that they contain cysteines at positions 5, 6, and 10) but are considered negative. also cache randomly chosen sequences, so that the neural net can be trained on sequences that are not similar to positive examples.
neg_sim, neg_rand = cache_cases(neg_str)
neg_sim_vec = str_to_vec(neg_sim)
neg_rand_vec = str_to_vec(neg_rand)
return pos_vec, neg_sim_vec, neg_rand_vec
def read_positives():
"reads in positive samples as strings"
seqs = []
file = '/Users/cjmathy/Documents/courses/bmi203/Final-Project/ann_bmi203/rap1-lieb-positives.txt'
with open(file, 'rb') as f:
for seq in f:
seqs.append(seq.strip())
return seqs
def read_negatives():
"reads in negative samples as strings"
seqs = []
file = '/Users/cjmathy/Documents/courses/bmi203/Final-Project/ann_bmi203/yeast-upstream-1k-negative.fa'
with open(file, 'rb') as f:
sequence = ''
for line in f:
if line[0] is not '>':
sequence += line.strip()
else:
if sequence:
seqs.append(sequence)
sequence = ""
return seqs
def str_to_vec(sequences):
"""converts nucleotide strings into vectors using a 2-bit encoding scheme."""
vecs = []
nuc2bit = {"A": (0, 0),
"C": (0, 1),
"T": (1, 0),
"G": (1, 1)}
for seq in sequences:
vec = []
for nuc in seq:
vec.append(nuc2bit[nuc][0])
vec.append(nuc2bit[nuc][1])
vecs.append(vec)
return vecs
def remove_falseneg(negatives, positives):
"""this method removes any negative fasta sequences that contain one of the positive sample sequences (essentially making them false negatives."""
seqs = []
for n in negatives:
if not any(p in n for p in positives):
seqs.append(n)
return seqs
def reverse_complement(sequences):
"""returns a list of reverse complemented sequences"""
rc = []
complement = {'A': 'T',
'C': 'G',
'G': 'C',
'T': 'A'}
for seq in sequences:
seq = list(seq)
seq = reversed([complement.get(nuc) for nuc in seq])
seq = ''.join(seq)
rc.append(seq)
return rc
def cache_cases(sequences):
"""this method separates the negative data into two sets: those that contain the Rap1 binding signature sequence, and a set that is randomly chosen from the negative data."""
# 1) cache negative cases that are similar to positives
sim_cache = []
for seq in sequences:
matches = re.findall(r'....CC...C.......', seq)
for match in matches:
sim_cache.append(match)
sim_cache = list(set(sim_cache))
# 2) cache randomly chosen 17 bp negatives. 5 from each fa sequence (including reverse complements). there are about 30000 neg_sim samples, so this will create about 30000 neg_rand samples from the 3000 sequences and their 3000 reverse complements.
bp = 17
rand_cache = []
for seq in sequences:
for _ in xrange(5):
i = random.randint(0, len(seq)-bp)
substr = seq[i:i+bp]
rand_cache.append(substr)
return sim_cache, rand_cache
def build_training_set(pos, neg_sim, neg_rand):
"""Builds a training set using 50% positive data, and 50% negative data. Negative data consists equally of similar-to-positve and random negative sequences"""
# we have 137 positive examples, 30000 special negative examples, and 30000 random negative examples, all 34 bits long. take 69 special negative examples and 68 random negative examples. add them to the positive examples to make our training set.
neg = []
for _ in xrange(69):
i = np.random.randint(0, len(neg_sim))
neg.append(neg_sim[i])
for _ in xrange(68):
i = np.random.randint(0, len(neg_rand))
neg.append(neg_rand[i])
Xp = np.array(pos)
Xn = np.array(neg)
X = np.concatenate((Xp, Xn), axis=0) # nd array, 274 x 34
yp = np.ones((Xp.shape[0],))
yn = np.zeros((Xn.shape[0],))
y = np.concatenate((yp, yn), axis=0) # nd array, 34 x 1
return X, y
def build_training_set_100(pos, neg_sim, neg_rand):
"""same as above, but allowing for some positive and negative samples to be held out as a test set"""
neg = []
for _ in xrange(50):
i = np.random.randint(0, len(neg_sim))
neg.append(neg_sim[i])
for _ in xrange(50):
i = np.random.randint(0, len(neg_rand))
neg.append(neg_rand[i])
Xp = np.array(pos)
Xn = np.array(neg)
X = np.concatenate((Xp, Xn), axis=0)
yp = np.ones((Xp.shape[0],))
yn = np.zeros((Xn.shape[0],))
y = np.concatenate((yp, yn), axis=0)
return X, y
def build_test_set(pos, neg_sim, neg_rand):
"""same as above, but allowing for some positive and negative samples to be held out as a test set"""
neg = []
for _ in xrange(19):
i = np.random.randint(0, len(neg_sim))
neg.append(neg_sim[i])
for _ in xrange(18):
i = np.random.randint(0, len(neg_rand))
neg.append(neg_rand[i])
Xp = np.array(pos)
Xn = np.array(neg)
X = np.concatenate((Xp, Xn), axis=0)
yp = np.ones((Xp.shape[0],))
yn = np.zeros((Xn.shape[0],))
y = np.concatenate((yp, yn), axis=0)
return X, y
| [
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.concatenate",
"re.findall"
] | [((5076, 5089), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (5084, 5089), True, 'import numpy as np\n'), ((5099, 5112), 'numpy.array', 'np.array', (['neg'], {}), '(neg)\n', (5107, 5112), True, 'import numpy as np\n'), ((5121, 5153), 'numpy.concatenate', 'np.concatenate', (['(Xp, Xn)'], {'axis': '(0)'}), '((Xp, Xn), axis=0)\n', (5135, 5153), True, 'import numpy as np\n'), ((5184, 5207), 'numpy.ones', 'np.ones', (['(Xp.shape[0],)'], {}), '((Xp.shape[0],))\n', (5191, 5207), True, 'import numpy as np\n'), ((5217, 5241), 'numpy.zeros', 'np.zeros', (['(Xn.shape[0],)'], {}), '((Xn.shape[0],))\n', (5225, 5241), True, 'import numpy as np\n'), ((5250, 5282), 'numpy.concatenate', 'np.concatenate', (['(yp, yn)'], {'axis': '(0)'}), '((yp, yn), axis=0)\n', (5264, 5282), True, 'import numpy as np\n'), ((5710, 5723), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (5718, 5723), True, 'import numpy as np\n'), ((5733, 5746), 'numpy.array', 'np.array', (['neg'], {}), '(neg)\n', (5741, 5746), True, 'import numpy as np\n'), ((5755, 5787), 'numpy.concatenate', 'np.concatenate', (['(Xp, Xn)'], {'axis': '(0)'}), '((Xp, Xn), axis=0)\n', (5769, 5787), True, 'import numpy as np\n'), ((5797, 5820), 'numpy.ones', 'np.ones', (['(Xp.shape[0],)'], {}), '((Xp.shape[0],))\n', (5804, 5820), True, 'import numpy as np\n'), ((5830, 5854), 'numpy.zeros', 'np.zeros', (['(Xn.shape[0],)'], {}), '((Xn.shape[0],))\n', (5838, 5854), True, 'import numpy as np\n'), ((5863, 5895), 'numpy.concatenate', 'np.concatenate', (['(yp, yn)'], {'axis': '(0)'}), '((yp, yn), axis=0)\n', (5877, 5895), True, 'import numpy as np\n'), ((6296, 6309), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (6304, 6309), True, 'import numpy as np\n'), ((6319, 6332), 'numpy.array', 'np.array', (['neg'], {}), '(neg)\n', (6327, 6332), True, 'import numpy as np\n'), ((6341, 6373), 'numpy.concatenate', 'np.concatenate', (['(Xp, Xn)'], {'axis': '(0)'}), '((Xp, Xn), axis=0)\n', (6355, 6373), True, 'import numpy as np\n'), ((6383, 6406), 'numpy.ones', 'np.ones', (['(Xp.shape[0],)'], {}), '((Xp.shape[0],))\n', (6390, 6406), True, 'import numpy as np\n'), ((6416, 6440), 'numpy.zeros', 'np.zeros', (['(Xn.shape[0],)'], {}), '((Xn.shape[0],))\n', (6424, 6440), True, 'import numpy as np\n'), ((6449, 6481), 'numpy.concatenate', 'np.concatenate', (['(yp, yn)'], {'axis': '(0)'}), '((yp, yn), axis=0)\n', (6463, 6481), True, 'import numpy as np\n'), ((3746, 3782), 're.findall', 're.findall', (['"""....CC...C......."""', 'seq'], {}), "('....CC...C.......', seq)\n", (3756, 3782), False, 'import re\n')] |
import os
import sys
import numpy as np
import scipy.io as sio
import more_itertools as mit
chan = ['Fp1','AF3','F3','F7','FC5','FC1','C3','T7','CP5','CP1','P3','P7','PO3','O1','Oz','Pz','Fp2','AF4','Fz','F4','F8','FC6','FC2','Cz','C4','T8','CP6','CP2','P4','P8','PO4','O2']
nLabel, nTrial, nUser, nChannel, nTime = 4, 40, 1, 32, 8064
print ("Program started \n")
m=[]
fout_labels0 = open("labels_0.dat",'w')
fout_labels1 = open("labels_1.dat",'w')
for i in range(nUser):#4, 40, 32, 32, 8064
if i < 10:
name = '%0*d' % (2,i+1)
else:
name = i+1
fname = "s"+str(name)+".mat"
x = sio.loadmat(fname)
print (fname)
for tr in range(nTrial):
fout_data = open("features_raw.csv",'w')
for dat in range(384,nTime):
for ch in range(nChannel):
m.append(str(x['data'][tr][ch][dat]))
windows = list(mit.windowed(m, n=512, step=256))
if(x['labels'][tr][0]<4.5):
fout_labels0.write(str(1) + "\n");
else:
fout_labels0.write(str(2) + "\n");
if(x['labels'][tr][1]<4.5):
fout_labels1.write(str(1) + "\n");
else:
fout_labels1.write(str(2) + "\n");
#Normalizing the data between [0,1]
windows.append(tuple([x for x in range(512)]))
for l in range(928):
for n in range(512):
if n==511:
fout_data.write(str(windows[l][n]))
else:
fout_data.write(str(windows[l][n])+",")
fout_data.write("\n")
fout_data.close()
#maximum=np.amax(array)
#minimum=np.amin(array)
#normalise all data in the array except the first value of each row
array = np.genfromtxt('features_raw.csv',delimiter=',')
maximum=array[:928, 1:].max()
minimum=array[:928, 1:].min()
#normalise all data in the array except the first value of each row
a = (array[:928,:] - minimum)/(maximum - minimum)
np.savetxt("features_raw.csv", a, delimiter=",", fmt='%s')
os.system('python entropy1.py')
print("user "+ str(i+1) +" trail"+ str(tr+1))
fout_labels0.close()
fout_labels1.close()
print ("\n"+"Print Successful") | [
"scipy.io.loadmat",
"numpy.savetxt",
"more_itertools.windowed",
"os.system",
"numpy.genfromtxt"
] | [((607, 625), 'scipy.io.loadmat', 'sio.loadmat', (['fname'], {}), '(fname)\n', (618, 625), True, 'import scipy.io as sio\n'), ((1567, 1615), 'numpy.genfromtxt', 'np.genfromtxt', (['"""features_raw.csv"""'], {'delimiter': '""","""'}), "('features_raw.csv', delimiter=',')\n", (1580, 1615), True, 'import numpy as np\n'), ((1808, 1866), 'numpy.savetxt', 'np.savetxt', (['"""features_raw.csv"""', 'a'], {'delimiter': '""","""', 'fmt': '"""%s"""'}), "('features_raw.csv', a, delimiter=',', fmt='%s')\n", (1818, 1866), True, 'import numpy as np\n'), ((1870, 1901), 'os.system', 'os.system', (['"""python entropy1.py"""'], {}), "('python entropy1.py')\n", (1879, 1901), False, 'import os\n'), ((846, 878), 'more_itertools.windowed', 'mit.windowed', (['m'], {'n': '(512)', 'step': '(256)'}), '(m, n=512, step=256)\n', (858, 878), True, 'import more_itertools as mit\n')] |
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: mo-mo-
#
# Created: 05/08/2018
# Copyright: (c) mo-mo- 2018
# Licence: <your licence>
#-------------------------------------------------------------------------------
import numpy as np
l = list(input())
l = np.array(l)
state = 'AC'
if l[0] != 'A':
state = 'WA'
else:
l[0] = 'a'
l2 = l[2:-1].copy()
Cplace = np.where(l2=='C')[0]
if len(Cplace) == 1:
l2[Cplace[0]] = 'c'
l[2:-1] = l2
arr = ''.join(l)
if not arr.islower():
state = 'WA'
else:
state = 'WA'
print(state)
| [
"numpy.where",
"numpy.array"
] | [((366, 377), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (374, 377), True, 'import numpy as np\n'), ((495, 514), 'numpy.where', 'np.where', (["(l2 == 'C')"], {}), "(l2 == 'C')\n", (503, 514), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
from DeepSparseCoding.tf1x.ops.init_ops import L2NormalizedTruncatedNormalInitializer
from DeepSparseCoding.tf1x.utils.trainable_variable_dict import TrainableVariableDict
class AeModule(object):
def __init__(self, data_tensor, layer_types, enc_channels, dec_channels, patch_size,
conv_strides, w_decay_mult, w_norm_mult, act_funcs, dropout, tie_dec_weights,
w_init_type, variable_scope="ae"):
"""
Autoencoder module
Inputs:
data_tensor
enc_channels [list of ints] the number of output channels per encoder layer
Last entry is the number of latent units
dec_channels [list of ints] the number of output channels per decoder layer
Last entry must be the number of input pixels for FC layers and channels for CONV layers
w_decay_mult: tradeoff multiplier for weight decay loss
w_norm_mult: tradeoff multiplier for weight norm loss (asks weight norm to == 1)
act_funcs: activation functions
dropout: specifies the keep probability or None
conv: if True, do convolution
conv_strides: list of strides for convolution [batch, y, x, channels]
patch_size: number of (y, x) inputs for convolutional patches
w_init_type: [str] which w_init to use, options are 'normal', 'xavier', or 'l2_normed'
variable_scope: specifies the variable_scope for the module
Outputs:
dictionary
"""
self.conv_strides = conv_strides
self.variable_scope = variable_scope
self.trainable_variables = TrainableVariableDict()
self.data_tensor = data_tensor
self.enc_channels = enc_channels
self.dec_channels = dec_channels
self.patch_size_y = [int(size[0]) for size in patch_size]
self.patch_size_x = [int(size[1]) for size in patch_size]
self.dropout = dropout
self.w_decay_mult = w_decay_mult
self.w_norm_mult = w_norm_mult
self.act_funcs = act_funcs
self.num_enc_layers = len(self.enc_channels)
self.num_dec_layers = len(self.dec_channels)
self.tie_dec_weights = tie_dec_weights
self.enc_layer_types = layer_types[:self.num_enc_layers]
self.dec_layer_types = layer_types[self.num_enc_layers:]
self.layer_types = [self.enc_layer_types, self.dec_layer_types]
self.num_enc_conv_layers = self.enc_layer_types.count("conv")
self.num_dec_conv_layers = self.dec_layer_types.count("conv")
self.num_conv_layers = self.num_enc_conv_layers + self.num_dec_conv_layers
self.num_enc_fc_layers = self.enc_layer_types.count("fc")
self.num_dec_fc_layers = self.dec_layer_types.count("fc")
self.num_fc_layers = self.num_enc_fc_layers + self.num_dec_fc_layers
self.num_layers = self.num_enc_layers + self.num_dec_layers
data_ndim = len(data_tensor.get_shape().as_list())
self.all_strides = [] # Full list of strides, including FC layers
for enc_conv_id in range(self.num_enc_conv_layers):
self.all_strides.append(self.conv_strides[enc_conv_id])
for enc_fc_id in range(self.num_enc_fc_layers):
self.all_strides.append(None)
for dec_fc_id in range(self.num_dec_fc_layers):
self.all_strides.append(None)
for dec_conv_id in range(self.num_dec_conv_layers):
self.all_strides.append(self.conv_strides[self.num_enc_conv_layers + dec_conv_id])
if data_ndim == 2:
self.batch_size, self.num_pixels = self.data_tensor.get_shape()
else:
self.batch_size, self.num_pixels_y, self.num_pixels_x, self.num_channels = \
self.data_tensor.get_shape()
self.num_pixels = self.num_pixels_y * self.num_pixels_x * self.num_channels
self.w_init_type = w_init_type
# Parameter checks
if self.enc_layer_types[0] == "conv":
assert data_ndim == 4, (
"Module requires data_tensor to have shape" +
" [batch, num_pixels_y, num_pixels_x, num_features] if first layer is conv")
else:
assert data_ndim == 2, (
"Module requires data_tensor to have shape [batch, num_pixels]")
if(self.tie_dec_weights):
assert self.num_enc_layers == self.num_dec_layers, (
"num_enc_layers must equal num_dec_layers, but are %g and %g"%(
self.num_enc_layers, self.num_dec_layers))
if self.num_enc_conv_layers > 0 and self.num_enc_fc_layers > 0:
assert np.all("conv" in self.enc_layer_types[:self.num_enc_conv_layers]), \
("Encoder conv layers must come before fc layers")
if self.num_dec_conv_layers > 0 and self.num_dec_fc_layers > 0:
assert np.all("fc" in self.dec_layer_types[:self.num_dec_fc_layers]), \
("Decoder fc layers must come before conv layers")
assert self.num_enc_layers == len(self.enc_layer_types), \
("The number of encoder channels must match the number of encoder layer types")
assert self.num_dec_layers == len(self.dec_layer_types), \
("The number of decoder channels must match the number of decoder layer types")
assert all([layer_type in ["conv", "fc"] for layer_type in layer_types]), \
("All layer_types must be conv or fc")
assert len(self.patch_size_y) == self.num_conv_layers, \
("patch_size_y must be a list of size " + str(self.num_conv_layers))
assert len(self.patch_size_x) == self.num_conv_layers, \
("patch_size_x must be a list of size " + str(self.num_conv_layers))
assert len(self.conv_strides) == self.num_conv_layers, \
("conv_strides must be a list of size " + str(self.num_conv_layers))
assert len(self.act_funcs) == self.num_layers, \
("act_funcs parameter must be a list of size " + str(self.num_layers))
self.build_graph()
def compute_weight_norm_loss(self):
with tf.compat.v1.variable_scope("w_norm"):
w_norm_list = []
for w in self.w_list:
reduc_axis = np.arange(1, len(w.get_shape().as_list()))
w_norm = tf.reduce_sum(input_tensor=tf.square(1 - tf.reduce_sum(input_tensor=tf.square(w), axis=reduc_axis)))
w_norm_list.append(w_norm)
norm_loss = tf.multiply(0.5 * self.w_norm_mult, tf.add_n(w_norm_list))
return norm_loss
def compute_weight_decay_loss(self):
with tf.compat.v1.variable_scope("unsupervised"):
w_decay_list = [tf.reduce_sum(input_tensor=tf.square(w)) for w in self.w_list]
decay_loss = tf.multiply(0.5*self.w_decay_mult, tf.add_n(w_decay_list))
return decay_loss
def compute_recon_loss(self, reconstruction):
with tf.compat.v1.variable_scope("unsupervised"):
# If the encoder and decoder are different types (conv vs fc) then there may be a shape mismatch
recon_shape = reconstruction.get_shape()
data_shape = self.data_tensor.get_shape()
if(recon_shape.ndims != data_shape.ndims):
if(np.prod(recon_shape.as_list()[1:]) == np.prod(data_shape.as_list()[1:])):
reconstruction = tf.reshape(reconstruction, tf.shape(input=self.data_tensor))
else:
assert False, ("Reconstructiion and input must have the same size")
reduc_dim = list(range(1, len(reconstruction.shape)))# We want to avg over batch
recon_loss = 0.5 * tf.reduce_mean(
input_tensor=tf.reduce_sum(input_tensor=tf.square(tf.subtract(reconstruction, self.data_tensor)),
axis=reduc_dim), name="recon_loss")
return recon_loss
def compute_total_loss(self):
with tf.compat.v1.variable_scope("loss") as scope:
self.loss_dict = {"recon_loss":self.compute_recon_loss(self.reconstruction),
"weight_decay_loss":self.compute_weight_decay_loss(),
"weight_norm_loss":self.compute_weight_norm_loss()}
self.total_loss = tf.add_n([loss for loss in self.loss_dict.values()], name="total_loss")
def flatten_feature_map(self, feature_map):
"""
Flatten input tensor from [batch, y, x, f] to [batch, y*x*f]
"""
map_shape = feature_map.get_shape()
if(map_shape.ndims == 4):
(batch, y, x, f) = map_shape
prev_input_features = int(y * x * f)
resh_map = tf.reshape(feature_map, [-1, prev_input_features])
elif(map_shape.ndims == 2):
resh_map = feature_map
else:
assert False, ("Input feature_map has incorrect ndims")
return resh_map
def get_dec_shapes(self, input_shape):
# The following assumes decoder fc->conv operation mirrors encoder conv->fc
conv_output_length = tf.python.keras.utils.conv_utils.conv_output_length
in_y, in_x, in_f = input_shape[1:]
dec_conv_strides = self.conv_strides[:-self.num_dec_conv_layers]
filter_size_y = self.patch_size_y[:-self.num_dec_conv_layers]
filter_size_x = self.patch_size_x[:-self.num_dec_conv_layers]
dec_channels = self.dec_channels[:self.num_dec_conv_layers][::-1]
last_enc_conv_channels = self.enc_channels[self.num_enc_conv_layers-1]
dec_channels[-1] = last_enc_conv_channels
layer_shapes = [[int(in_y), int(in_x), int(in_f)]]
for layer_id in range(self.num_dec_conv_layers):
out_y = conv_output_length(
input_length=layer_shapes[layer_id][0],
filter_size=filter_size_y[layer_id],
padding="same",
stride=dec_conv_strides[layer_id][1])
out_x = conv_output_length(
input_length=layer_shapes[layer_id][1],
filter_size=filter_size_x[layer_id],
padding="same",
stride=dec_conv_strides[layer_id][2])
layer_shapes.append([int(out_y), int(out_x), int(dec_channels[layer_id])])
return layer_shapes[::-1]
def compute_pre_activation(self, layer_id, input_tensor, w, b, conv, decode):
if conv:
strides = self.all_strides[layer_id]
if decode:
height_const = tf.shape(input=input_tensor)[1] % strides[1]
out_height = (tf.shape(input=input_tensor)[1] * strides[1]) - height_const
width_const = tf.shape(input=input_tensor)[2] % strides[2]
out_width = (tf.shape(input=input_tensor)[2] * strides[2]) - width_const
out_shape = tf.stack([tf.shape(input=input_tensor)[0], # Batch
out_height, # Height
out_width, # Width
tf.shape(input=w)[2]]) # Channels
pre_act = tf.add(tf.nn.conv2d_transpose(input_tensor, w, out_shape, strides,
padding="SAME"), b)
else:
pre_act = tf.add(tf.nn.conv2d(input=input_tensor, filters=w, strides=strides, padding="SAME"), b)
else:
pre_act = tf.add(tf.matmul(input_tensor, w), b)
return pre_act
def layer_maker(self, layer_id, input_tensor, activation_function, w_shape,
keep_prob=1.0, conv=False, decode=False, tie_dec_weights=False, name_suffix=""):
"""
Make layer that does act(u*w+b) where * is a dot product or convolution
Example case for w_read_id logic:
layer_id: [0 1 2 3 4] [5 6 7 8 9]
10-6 10-7 10-8 10-9 10-10
weight_id: [0 1 2 3 4] [ 4 3 2 1 0 ]
num_layers: 10
weight_id = num_layers - (layer_id + 1)
"""
with tf.compat.v1.variable_scope("layer"+str(layer_id), reuse=tf.compat.v1.AUTO_REUSE) as scope:
if tie_dec_weights:
w_read_id = self.num_layers - (layer_id+1)
else:
w_read_id = layer_id
name_prefix = "conv_" if conv else "fc_"
w_name = name_prefix+"w_"+str(w_read_id)+name_suffix
if self.w_init_type.lower() == "normal":
w = tf.compat.v1.get_variable(name=w_name, shape=w_shape, dtype=tf.float32,
initializer=self.w_normal_init, trainable=True)
elif self.w_init_type.lower() == "xavier":
w = tf.compat.v1.get_variable(name=w_name, shape=w_shape, dtype=tf.float32,
initializer=self.w_xavier_init, trainable=True)
elif self.w_init_type.lower() == "l2_normed":
if decode:
w = tf.compat.v1.get_variable(name=w_name, shape=w_shape, dtype=tf.float32,
initializer=self.w_normed_dec_init, trainable=True)
else:
w = tf.compat.v1.get_variable(name=w_name, shape=w_shape, dtype=tf.float32,
initializer=self.w_normed_enc_init, trainable=True)
else:
assert False, ("w_init_type parameter must be 'normal', 'xavier', or 'l2_normed', not %s"%(
self.w_init_type))
b_name = name_prefix+"b_"+str(layer_id)+name_suffix
if conv and decode:
b_shape = w_shape[-2]
else:
b_shape = w_shape[-1]
b = tf.compat.v1.get_variable(name=b_name, shape=b_shape,
dtype=tf.float32, initializer=self.b_init, trainable=True)
pre_act = self.compute_pre_activation(layer_id, input_tensor, w, b, conv, decode)
output_tensor = activation_function(pre_act)
output_tensor = tf.nn.dropout(output_tensor, rate=1-keep_prob)
return output_tensor, w, b
def build_encoder(self, input_tensor, activation_functions):
enc_u_list = [input_tensor]
enc_w_list = []
enc_b_list = []
prev_input_features = input_tensor.get_shape().as_list()[-1]
# Make conv layers first
for layer_id in range(self.num_enc_conv_layers):
w_shape = [self.patch_size_y[layer_id], self.patch_size_x[layer_id],
int(prev_input_features), int(self.enc_channels[layer_id])]
u_out, w, b = self.layer_maker(layer_id, enc_u_list[layer_id],
activation_functions[layer_id], w_shape, keep_prob=self.dropout[layer_id],
conv=True, decode=False, tie_dec_weights=self.tie_dec_weights)
enc_u_list.append(u_out)
enc_w_list.append(w)
enc_b_list.append(b)
prev_input_features = int(self.enc_channels[layer_id])
# Make fc layers second
for enc_fc_layer_id in range(self.num_enc_fc_layers):
layer_id = enc_fc_layer_id + self.num_enc_conv_layers
if enc_fc_layer_id == 0: # Input needs to be reshaped to [batch, num_units] for FC layers
in_tensor = self.flatten_feature_map(enc_u_list[-1])
prev_input_features = in_tensor.get_shape().as_list()[1]
else:
in_tensor = enc_u_list[layer_id]
w_shape = [int(prev_input_features), int(self.enc_channels[layer_id])]
u_out, w, b = self.layer_maker(layer_id, in_tensor, activation_functions[layer_id],
w_shape, keep_prob=self.dropout[layer_id], conv=False, decode=False,
tie_dec_weights=self.tie_dec_weights)
enc_u_list.append(u_out)
enc_w_list.append(w)
enc_b_list.append(b)
prev_input_features = int(self.enc_channels[layer_id])
return enc_u_list, enc_w_list, enc_b_list
def build_decoder(self, input_tensor, activation_functions):
dec_u_list = [input_tensor]
dec_w_list = []
dec_b_list = []
# Build FC layers first
for dec_layer_id in range(self.num_dec_fc_layers):
layer_id = self.num_enc_layers + dec_layer_id
input_shape = dec_u_list[dec_layer_id].get_shape()
if input_shape.ndims == 4: # if final enc layer was conv then flatten
in_tensor = self.flatten_feature_map(dec_u_list[dec_layer_id])
else: # final enc layer was fc
in_tensor = dec_u_list[dec_layer_id]
if dec_layer_id == self.num_dec_fc_layers - 1 and self.num_dec_conv_layers > 0:
# If there are decoder conv layers, then
# the last decoder FC layer needs to output a vector of the correct length
# correct_length = feature_map_y * feature_map_x * feature_map_f
# where feature_map_f = self.dec_channels[dec_layer_id]
conv_layer_shapes = self.get_dec_shapes(self.data_tensor.get_shape())
out_channels = np.prod(conv_layer_shapes[0])
else:
out_channels = self.dec_channels[dec_layer_id]
w_shape = [in_tensor.get_shape()[-1], out_channels]
u_out, w, b = self.layer_maker(layer_id, in_tensor,
activation_functions[dec_layer_id], w_shape, keep_prob=self.dropout[layer_id],
conv=False, decode=True, tie_dec_weights=self.tie_dec_weights)
dec_u_list.append(u_out)
dec_w_list.append(w)
dec_b_list.append(b)
# Build conv layers second
for dec_conv_layer_id in range(self.num_dec_conv_layers):
dec_layer_id = self.num_dec_fc_layers + dec_conv_layer_id
layer_id = self.num_enc_layers + dec_layer_id
input_shape = dec_u_list[dec_layer_id].get_shape()
if input_shape.ndims == 4: # prev layer was conv
(batch, y, x, f) = input_shape
in_tensor = dec_u_list[dec_layer_id]
w_shape = [
self.patch_size_y[self.num_enc_conv_layers + dec_conv_layer_id],
self.patch_size_x[self.num_enc_conv_layers + dec_conv_layer_id],
self.dec_channels[dec_layer_id],
f]
else: # prev layer was fc
conv_layer_shapes = self.get_dec_shapes(self.data_tensor.get_shape())
new_shape = [-1] + conv_layer_shapes[dec_conv_layer_id]
in_tensor = tf.reshape(dec_u_list[dec_layer_id], new_shape)
w_shape = [
self.patch_size_y[self.num_enc_conv_layers + dec_conv_layer_id],
self.patch_size_x[self.num_enc_conv_layers + dec_conv_layer_id],
self.dec_channels[dec_layer_id],
new_shape[-1]]
u_out, w, b = self.layer_maker(layer_id, in_tensor, activation_functions[dec_layer_id],
w_shape, keep_prob=self.dropout[layer_id], conv=True, decode=True,
tie_dec_weights=self.tie_dec_weights)
dec_u_list.append(u_out)
dec_w_list.append(w)
dec_b_list.append(b)
return dec_u_list, dec_w_list, dec_b_list
def build_graph(self):
with tf.compat.v1.variable_scope(self.variable_scope) as scope:
with tf.compat.v1.variable_scope("weight_inits") as scope:
self.w_normal_init = tf.compat.v1.initializers.truncated_normal(mean=0.0, stddev=0.001)
self.w_xavier_init = tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution=("uniform" if False else "truncated_normal"))
self.w_normed_enc_init = L2NormalizedTruncatedNormalInitializer(mean=0.0, stddev=0.001,
axis=0, epsilon=1e-12, dtype=tf.float32) #TODO: Fix axis to be general to conv layers
self.w_normed_dec_init = L2NormalizedTruncatedNormalInitializer(mean=0.0, stddev=0.001,
axis=-1, epsilon=1e-12, dtype=tf.float32)
self.b_init = tf.compat.v1.initializers.constant(1e-4)
self.u_list = [self.data_tensor]
self.w_list = []
self.b_list = []
enc_u_list, enc_w_list, enc_b_list = self.build_encoder(self.u_list[0],
self.act_funcs[:self.num_enc_layers])
self.u_list += enc_u_list[1:] # build_encoder() will place self.u_list[0] as enc_u_list[0]
self.w_list += enc_w_list
self.b_list += enc_b_list
with tf.compat.v1.variable_scope("inference") as scope:
self.a = tf.identity(enc_u_list[-1], name="activity")
dec_u_list, dec_w_list, dec_b_list = self.build_decoder(self.a,
self.act_funcs[self.num_enc_layers:])
self.u_list += dec_u_list[1:] # build_decoder() will place self.u_list[-1] as dec_u_list[0]
if not self.tie_dec_weights:
self.w_list += dec_w_list
self.b_list += dec_b_list
with tf.compat.v1.variable_scope("norm_weights") as scope:
w_enc_norm_dim = list(range(len(self.w_list[0].get_shape().as_list())-1))
self.norm_enc_w = self.w_list[0].assign(tf.nn.l2_normalize(self.w_list[0],
axis=w_enc_norm_dim, epsilon=1e-8, name="row_l2_norm"))
self.norm_dec_w = self.w_list[-1].assign(tf.nn.l2_normalize(self.w_list[-1],
axis=-1, epsilon=1e-8, name="col_l2_norm"))
self.norm_w = tf.group(self.norm_enc_w, self.norm_dec_w, name="l2_norm_weights")
for w,b in zip(self.w_list, self.b_list):
self.trainable_variables[w.name] = w
self.trainable_variables[b.name] = b
with tf.compat.v1.variable_scope("output") as scope:
self.reconstruction = tf.identity(self.u_list[-1], name="reconstruction")
self.compute_total_loss()
| [
"numpy.prod",
"tensorflow.shape",
"tensorflow.group",
"tensorflow.nn.dropout",
"tensorflow.nn.conv2d_transpose",
"tensorflow.matmul",
"tensorflow.compat.v1.keras.initializers.VarianceScaling",
"tensorflow.square",
"DeepSparseCoding.tf1x.ops.init_ops.L2NormalizedTruncatedNormalInitializer",
"tensor... | [((1547, 1570), 'DeepSparseCoding.tf1x.utils.trainable_variable_dict.TrainableVariableDict', 'TrainableVariableDict', ([], {}), '()\n', (1568, 1570), False, 'from DeepSparseCoding.tf1x.utils.trainable_variable_dict import TrainableVariableDict\n'), ((4283, 4348), 'numpy.all', 'np.all', (["('conv' in self.enc_layer_types[:self.num_enc_conv_layers])"], {}), "('conv' in self.enc_layer_types[:self.num_enc_conv_layers])\n", (4289, 4348), True, 'import numpy as np\n'), ((4492, 4553), 'numpy.all', 'np.all', (["('fc' in self.dec_layer_types[:self.num_dec_fc_layers])"], {}), "('fc' in self.dec_layer_types[:self.num_dec_fc_layers])\n", (4498, 4553), True, 'import numpy as np\n'), ((5648, 5685), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""w_norm"""'], {}), "('w_norm')\n", (5675, 5685), True, 'import tensorflow as tf\n'), ((6102, 6145), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""unsupervised"""'], {}), "('unsupervised')\n", (6129, 6145), True, 'import tensorflow as tf\n'), ((6390, 6433), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""unsupervised"""'], {}), "('unsupervised')\n", (6417, 6433), True, 'import tensorflow as tf\n'), ((7289, 7324), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""loss"""'], {}), "('loss')\n", (7316, 7324), True, 'import tensorflow as tf\n'), ((7930, 7980), 'tensorflow.reshape', 'tf.reshape', (['feature_map', '[-1, prev_input_features]'], {}), '(feature_map, [-1, prev_input_features])\n', (7940, 7980), True, 'import tensorflow as tf\n'), ((12237, 12353), 'tensorflow.compat.v1.get_variable', 'tf.compat.v1.get_variable', ([], {'name': 'b_name', 'shape': 'b_shape', 'dtype': 'tf.float32', 'initializer': 'self.b_init', 'trainable': '(True)'}), '(name=b_name, shape=b_shape, dtype=tf.float32,\n initializer=self.b_init, trainable=True)\n', (12262, 12353), True, 'import tensorflow as tf\n'), ((12519, 12567), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['output_tensor'], {'rate': '(1 - keep_prob)'}), '(output_tensor, rate=1 - keep_prob)\n', (12532, 12567), True, 'import tensorflow as tf\n'), ((17256, 17304), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['self.variable_scope'], {}), '(self.variable_scope)\n', (17283, 17304), True, 'import tensorflow as tf\n'), ((6009, 6030), 'tensorflow.add_n', 'tf.add_n', (['w_norm_list'], {}), '(w_norm_list)\n', (6017, 6030), True, 'import tensorflow as tf\n'), ((6286, 6308), 'tensorflow.add_n', 'tf.add_n', (['w_decay_list'], {}), '(w_decay_list)\n', (6294, 6308), True, 'import tensorflow as tf\n'), ((10271, 10297), 'tensorflow.matmul', 'tf.matmul', (['input_tensor', 'w'], {}), '(input_tensor, w)\n', (10280, 10297), True, 'import tensorflow as tf\n'), ((11224, 11347), 'tensorflow.compat.v1.get_variable', 'tf.compat.v1.get_variable', ([], {'name': 'w_name', 'shape': 'w_shape', 'dtype': 'tf.float32', 'initializer': 'self.w_normal_init', 'trainable': '(True)'}), '(name=w_name, shape=w_shape, dtype=tf.float32,\n initializer=self.w_normal_init, trainable=True)\n', (11249, 11347), True, 'import tensorflow as tf\n'), ((15308, 15337), 'numpy.prod', 'np.prod', (['conv_layer_shapes[0]'], {}), '(conv_layer_shapes[0])\n', (15315, 15337), True, 'import numpy as np\n'), ((16589, 16636), 'tensorflow.reshape', 'tf.reshape', (['dec_u_list[dec_layer_id]', 'new_shape'], {}), '(dec_u_list[dec_layer_id], new_shape)\n', (16599, 16636), True, 'import tensorflow as tf\n'), ((17326, 17369), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""weight_inits"""'], {}), "('weight_inits')\n", (17353, 17369), True, 'import tensorflow as tf\n'), ((17409, 17475), 'tensorflow.compat.v1.initializers.truncated_normal', 'tf.compat.v1.initializers.truncated_normal', ([], {'mean': '(0.0)', 'stddev': '(0.001)'}), '(mean=0.0, stddev=0.001)\n', (17451, 17475), True, 'import tensorflow as tf\n'), ((17505, 17640), 'tensorflow.compat.v1.keras.initializers.VarianceScaling', 'tf.compat.v1.keras.initializers.VarianceScaling', ([], {'scale': '(1.0)', 'mode': '"""fan_avg"""', 'distribution': "('uniform' if False else 'truncated_normal')"}), "(scale=1.0, mode='fan_avg',\n distribution='uniform' if False else 'truncated_normal')\n", (17552, 17640), True, 'import tensorflow as tf\n'), ((17672, 17779), 'DeepSparseCoding.tf1x.ops.init_ops.L2NormalizedTruncatedNormalInitializer', 'L2NormalizedTruncatedNormalInitializer', ([], {'mean': '(0.0)', 'stddev': '(0.001)', 'axis': '(0)', 'epsilon': '(1e-12)', 'dtype': 'tf.float32'}), '(mean=0.0, stddev=0.001, axis=0,\n epsilon=1e-12, dtype=tf.float32)\n', (17710, 17779), False, 'from DeepSparseCoding.tf1x.ops.init_ops import L2NormalizedTruncatedNormalInitializer\n'), ((17864, 17972), 'DeepSparseCoding.tf1x.ops.init_ops.L2NormalizedTruncatedNormalInitializer', 'L2NormalizedTruncatedNormalInitializer', ([], {'mean': '(0.0)', 'stddev': '(0.001)', 'axis': '(-1)', 'epsilon': '(1e-12)', 'dtype': 'tf.float32'}), '(mean=0.0, stddev=0.001, axis=-1,\n epsilon=1e-12, dtype=tf.float32)\n', (17902, 17972), False, 'from DeepSparseCoding.tf1x.ops.init_ops import L2NormalizedTruncatedNormalInitializer\n'), ((18001, 18043), 'tensorflow.compat.v1.initializers.constant', 'tf.compat.v1.initializers.constant', (['(0.0001)'], {}), '(0.0001)\n', (18035, 18043), True, 'import tensorflow as tf\n'), ((18423, 18463), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""inference"""'], {}), "('inference')\n", (18450, 18463), True, 'import tensorflow as tf\n'), ((18491, 18535), 'tensorflow.identity', 'tf.identity', (['enc_u_list[-1]'], {'name': '"""activity"""'}), "(enc_u_list[-1], name='activity')\n", (18502, 18535), True, 'import tensorflow as tf\n'), ((18862, 18905), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""norm_weights"""'], {}), "('norm_weights')\n", (18889, 18905), True, 'import tensorflow as tf\n'), ((19308, 19374), 'tensorflow.group', 'tf.group', (['self.norm_enc_w', 'self.norm_dec_w'], {'name': '"""l2_norm_weights"""'}), "(self.norm_enc_w, self.norm_dec_w, name='l2_norm_weights')\n", (19316, 19374), True, 'import tensorflow as tf\n'), ((19524, 19561), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""output"""'], {}), "('output')\n", (19551, 19561), True, 'import tensorflow as tf\n'), ((19602, 19653), 'tensorflow.identity', 'tf.identity', (['self.u_list[-1]'], {'name': '"""reconstruction"""'}), "(self.u_list[-1], name='reconstruction')\n", (19613, 19653), True, 'import tensorflow as tf\n'), ((10030, 10105), 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['input_tensor', 'w', 'out_shape', 'strides'], {'padding': '"""SAME"""'}), "(input_tensor, w, out_shape, strides, padding='SAME')\n", (10052, 10105), True, 'import tensorflow as tf\n'), ((10157, 10233), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'input_tensor', 'filters': 'w', 'strides': 'strides', 'padding': '"""SAME"""'}), "(input=input_tensor, filters=w, strides=strides, padding='SAME')\n", (10169, 10233), True, 'import tensorflow as tf\n'), ((11415, 11538), 'tensorflow.compat.v1.get_variable', 'tf.compat.v1.get_variable', ([], {'name': 'w_name', 'shape': 'w_shape', 'dtype': 'tf.float32', 'initializer': 'self.w_xavier_init', 'trainable': '(True)'}), '(name=w_name, shape=w_shape, dtype=tf.float32,\n initializer=self.w_xavier_init, trainable=True)\n', (11440, 11538), True, 'import tensorflow as tf\n'), ((19046, 19141), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['self.w_list[0]'], {'axis': 'w_enc_norm_dim', 'epsilon': '(1e-08)', 'name': '"""row_l2_norm"""'}), "(self.w_list[0], axis=w_enc_norm_dim, epsilon=1e-08, name\n ='row_l2_norm')\n", (19064, 19141), True, 'import tensorflow as tf\n'), ((19196, 19275), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['self.w_list[-1]'], {'axis': '(-1)', 'epsilon': '(1e-08)', 'name': '"""col_l2_norm"""'}), "(self.w_list[-1], axis=-1, epsilon=1e-08, name='col_l2_norm')\n", (19214, 19275), True, 'import tensorflow as tf\n'), ((6196, 6208), 'tensorflow.square', 'tf.square', (['w'], {}), '(w)\n', (6205, 6208), True, 'import tensorflow as tf\n'), ((6821, 6853), 'tensorflow.shape', 'tf.shape', ([], {'input': 'self.data_tensor'}), '(input=self.data_tensor)\n', (6829, 6853), True, 'import tensorflow as tf\n'), ((9554, 9582), 'tensorflow.shape', 'tf.shape', ([], {'input': 'input_tensor'}), '(input=input_tensor)\n', (9562, 9582), True, 'import tensorflow as tf\n'), ((9704, 9732), 'tensorflow.shape', 'tf.shape', ([], {'input': 'input_tensor'}), '(input=input_tensor)\n', (9712, 9732), True, 'import tensorflow as tf\n'), ((9621, 9649), 'tensorflow.shape', 'tf.shape', ([], {'input': 'input_tensor'}), '(input=input_tensor)\n', (9629, 9649), True, 'import tensorflow as tf\n'), ((9770, 9798), 'tensorflow.shape', 'tf.shape', ([], {'input': 'input_tensor'}), '(input=input_tensor)\n', (9778, 9798), True, 'import tensorflow as tf\n'), ((9860, 9888), 'tensorflow.shape', 'tf.shape', ([], {'input': 'input_tensor'}), '(input=input_tensor)\n', (9868, 9888), True, 'import tensorflow as tf\n'), ((9971, 9988), 'tensorflow.shape', 'tf.shape', ([], {'input': 'w'}), '(input=w)\n', (9979, 9988), True, 'import tensorflow as tf\n'), ((11630, 11757), 'tensorflow.compat.v1.get_variable', 'tf.compat.v1.get_variable', ([], {'name': 'w_name', 'shape': 'w_shape', 'dtype': 'tf.float32', 'initializer': 'self.w_normed_dec_init', 'trainable': '(True)'}), '(name=w_name, shape=w_shape, dtype=tf.float32,\n initializer=self.w_normed_dec_init, trainable=True)\n', (11655, 11757), True, 'import tensorflow as tf\n'), ((11794, 11921), 'tensorflow.compat.v1.get_variable', 'tf.compat.v1.get_variable', ([], {'name': 'w_name', 'shape': 'w_shape', 'dtype': 'tf.float32', 'initializer': 'self.w_normed_enc_init', 'trainable': '(True)'}), '(name=w_name, shape=w_shape, dtype=tf.float32,\n initializer=self.w_normed_enc_init, trainable=True)\n', (11819, 11921), True, 'import tensorflow as tf\n'), ((7133, 7178), 'tensorflow.subtract', 'tf.subtract', (['reconstruction', 'self.data_tensor'], {}), '(reconstruction, self.data_tensor)\n', (7144, 7178), True, 'import tensorflow as tf\n'), ((5887, 5899), 'tensorflow.square', 'tf.square', (['w'], {}), '(w)\n', (5896, 5899), True, 'import tensorflow as tf\n')] |
# ---
# jupyter:
# jupytext:
# metadata_filter:
# cells:
# additional: all
# notebook:
# additional: all
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 0.8.6
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# language_info:
# codemirror_mode:
# name: ipython
# version: 3
# file_extension: .py
# mimetype: text/x-python
# name: python
# nbconvert_exporter: python
# pygments_lexer: ipython3
# version: 3.7.2
# ---
# %% [markdown]
# This is a `bqplot` recreation of Mike Bostock's [Wealth of Nations](https://bost.ocks.org/mike/nations/). This was also done by [Gapminder](http://www.gapminder.org/world/#$majorMode=chart$is;shi=t;ly=2003;lb=f;il=t;fs=11;al=30;stl=t;st=t;nsl=t;se=t$wst;tts=C$ts;sp=5.59290322580644;ti=2013$zpv;v=0$inc_x;mmid=XCOORDS;iid=phAwcNAVuyj1jiMAkmq1iMg;by=ind$inc_y;mmid=YCOORDS;iid=phAwcNAVuyj2tPLxKvvnNPA;by=ind$inc_s;uniValue=8.21;iid=phAwcNAVuyj0XOoBL_n5tAQ;by=ind$inc_c;uniValue=255;gid=CATID0;by=grp$map_x;scale=log;dataMin=194;dataMax=96846$map_y;scale=lin;dataMin=23;dataMax=86$map_s;sma=49;smi=2.65$cd;bd=0$inds=;modified=60). It is originally based on a TED Talk by [<NAME>](http://www.ted.com/talks/hans_rosling_shows_the_best_stats_you_ve_ever_seen).
# %% {"collapsed": true}
# %reset -f
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina' # High-res graphs
# %config InlineBackend.print_figure_kwargs = {'bbox_inches':'tight'} # No extra white space
# %config InlineBackend.figure_format = 'svg' # 'png' is default
import pandas as pd
import numpy as np
import os
from bqplot import (
LogScale, LinearScale, OrdinalColorScale, ColorAxis,
Axis, Scatter, Lines, CATEGORY10, Label, Figure, Tooltip
)
from ipywidgets import HBox, VBox, IntSlider, Play, jslink
# %% {"collapsed": true}
initial_year = 1800
# %% [markdown]
# #### Cleaning and Formatting JSON Data
# %% {"collapsed": true, "scrolled": false}
data = pd.read_json(os.path.abspath("data/nations.json"))
# %% {"collapsed": true}
def clean_data(data):
for column in ['income', 'lifeExpectancy', 'population']:
data = data.drop(data[data[column].apply(len) <= 4].index)
return data
def extrap_interp(data):
data = np.array(data)
x_range = np.arange(1800, 2009, 1.)
y_range = np.interp(x_range, data[:, 0], data[:, 1])
return y_range
def extrap_data(data):
for column in ['income', 'lifeExpectancy', 'population']:
data[column] = data[column].apply(extrap_interp)
return data
# %% {"collapsed": true}
data = clean_data(data)
data = extrap_data(data)
# %% {"collapsed": true, "scrolled": true}
income_min, income_max = np.min(data['income'].apply(np.min)), np.max(data['income'].apply(np.max))
life_exp_min, life_exp_max = np.min(data['lifeExpectancy'].apply(np.min)), np.max(data['lifeExpectancy'].apply(np.max))
pop_min, pop_max = np.min(data['population'].apply(np.min)), np.max(data['population'].apply(np.max))
# %% {"collapsed": true}
def get_data(year):
year_index = year - 1800
income = data['income'].apply(lambda x: x[year_index])
life_exp = data['lifeExpectancy'].apply(lambda x: x[year_index])
pop = data['population'].apply(lambda x: x[year_index])
return income, life_exp, pop
# %% [markdown]
# #### Creating the Tooltip to display the required fields
#
# `bqplot`'s native `Tooltip` allows us to simply display the data fields we require on a mouse-interaction.
# %% {"collapsed": true}
tt = Tooltip(fields=['name', 'x', 'y'], labels=['Country Name', 'Income per Capita', 'Life Expectancy'])
# %% [markdown]
# #### Creating the Label to display the year
#
# Staying true to the `d3` recreation of the talk, we place a `Label` widget in the bottom-right of the `Figure` (it inherits the `Figure` co-ordinates when no scale is passed to it). With `enable_move` set to `True`, the `Label` can be dragged around.
# %% {"collapsed": true}
year_label = Label(x=[0.75], y=[0.10], default_size=46, font_weight='bolder', colors=['orange'],
text=[str(initial_year)], enable_move=True)
# %% [markdown]
# #### Defining Axes and Scales
#
# The inherent skewness of the income data favors the use of a `LogScale`. Also, since the color coding by regions does not follow an ordering, we use the `OrdinalColorScale`.
# %% {"collapsed": true}
x_sc = LogScale(min=income_min, max=income_max)
y_sc = LinearScale(min=life_exp_min, max=life_exp_max)
c_sc = OrdinalColorScale(domain=data['region'].unique().tolist(), colors=CATEGORY10[:6])
size_sc = LinearScale(min=pop_min, max=pop_max)
# %% {"collapsed": true}
ax_y = Axis(label='Life Expectancy', scale=y_sc, orientation='vertical', side='left', grid_lines='solid')
ax_x = Axis(label='Income per Capita', scale=x_sc, grid_lines='solid')
# %% [markdown]
# #### Creating the Scatter Mark with the appropriate size and color parameters passed
#
# To generate the appropriate graph, we need to pass the population of the country to the `size` attribute and its region to the `color` attribute.
# %% {"collapsed": true}
# Start with the first year's data
cap_income, life_exp, pop = get_data(initial_year)
# %% {"collapsed": true}
wealth_scat = Scatter(x=cap_income, y=life_exp, color=data['region'], size=pop,
names=data['name'], display_names=False,
scales={'x': x_sc, 'y': y_sc, 'color': c_sc, 'size': size_sc},
default_size=4112, tooltip=tt, animate=True, stroke='Black',
unhovered_style={'opacity': 0.5})
# %% {"collapsed": true}
nation_line = Lines(x=data['income'][0], y=data['lifeExpectancy'][0], colors=['Gray'],
scales={'x': x_sc, 'y': y_sc}, visible=False)
# %% [markdown]
# #### Creating the Figure
# %% {"collapsed": true}
time_interval = 10
# %% {"collapsed": true}
fig = Figure(marks=[wealth_scat, year_label, nation_line], axes=[ax_x, ax_y],
title='Health and Wealth of Nations', animation_duration=time_interval)
# %% [markdown]
# #### Using a Slider to allow the user to change the year and a button for animation
#
# Here we see how we can seamlessly integrate `bqplot` into the jupyter widget infrastructure.
# %% {"collapsed": true}
year_slider = IntSlider(min=1800, max=2008, step=1, description='Year', value=initial_year)
# %% [markdown]
# When the `hovered_point` of the `Scatter` plot is changed (i.e. when the user hovers over a different element), the entire path of that country is displayed by making the `Lines` object visible and setting it's `x` and `y` attributes.
# %% {"collapsed": true}
def hover_changed(change):
if change.new is not None:
nation_line.x = data[data['name'] == wealth_scat.names[change.new]]['income'].values[0]
nation_line.y = data[data['name'] == wealth_scat.names[change.new]]['lifeExpectancy'].values[0]
nation_line.visible = True
else:
nation_line.visible = False
wealth_scat.observe(hover_changed, 'hovered_point')
# %% [markdown]
# On the slider value `callback` (a function that is triggered everytime the `value` of the slider is changed) we change the `x`, `y` and `size` co-ordinates of the `Scatter`. We also update the `text` of the `Label` to reflect the current year.
# %% {"collapsed": true}
def year_changed(change):
wealth_scat.x, wealth_scat.y, wealth_scat.size = get_data(year_slider.value)
year_label.text = [str(year_slider.value)]
year_slider.observe(year_changed, 'value')
# %% [markdown]
# #### Add an animation button
# %% {"collapsed": true}
play_button = Play(min=1800, max=2008, interval=time_interval)
jslink((play_button, 'value'), (year_slider, 'value'))
# %% [markdown]
# #### Displaying the GUI
# %% {"scrolled": false}
VBox([HBox([play_button, year_slider]), fig])
| [
"ipywidgets.HBox",
"bqplot.Axis",
"ipywidgets.IntSlider",
"ipywidgets.Play",
"os.path.abspath",
"bqplot.Scatter",
"numpy.array",
"ipywidgets.jslink",
"bqplot.Tooltip",
"bqplot.Lines",
"bqplot.Figure",
"numpy.interp",
"bqplot.LinearScale",
"bqplot.LogScale",
"numpy.arange"
] | [((3611, 3714), 'bqplot.Tooltip', 'Tooltip', ([], {'fields': "['name', 'x', 'y']", 'labels': "['Country Name', 'Income per Capita', 'Life Expectancy']"}), "(fields=['name', 'x', 'y'], labels=['Country Name',\n 'Income per Capita', 'Life Expectancy'])\n", (3618, 3714), False, 'from bqplot import LogScale, LinearScale, OrdinalColorScale, ColorAxis, Axis, Scatter, Lines, CATEGORY10, Label, Figure, Tooltip\n'), ((4476, 4516), 'bqplot.LogScale', 'LogScale', ([], {'min': 'income_min', 'max': 'income_max'}), '(min=income_min, max=income_max)\n', (4484, 4516), False, 'from bqplot import LogScale, LinearScale, OrdinalColorScale, ColorAxis, Axis, Scatter, Lines, CATEGORY10, Label, Figure, Tooltip\n'), ((4524, 4571), 'bqplot.LinearScale', 'LinearScale', ([], {'min': 'life_exp_min', 'max': 'life_exp_max'}), '(min=life_exp_min, max=life_exp_max)\n', (4535, 4571), False, 'from bqplot import LogScale, LinearScale, OrdinalColorScale, ColorAxis, Axis, Scatter, Lines, CATEGORY10, Label, Figure, Tooltip\n'), ((4671, 4708), 'bqplot.LinearScale', 'LinearScale', ([], {'min': 'pop_min', 'max': 'pop_max'}), '(min=pop_min, max=pop_max)\n', (4682, 4708), False, 'from bqplot import LogScale, LinearScale, OrdinalColorScale, ColorAxis, Axis, Scatter, Lines, CATEGORY10, Label, Figure, Tooltip\n'), ((4742, 4845), 'bqplot.Axis', 'Axis', ([], {'label': '"""Life Expectancy"""', 'scale': 'y_sc', 'orientation': '"""vertical"""', 'side': '"""left"""', 'grid_lines': '"""solid"""'}), "(label='Life Expectancy', scale=y_sc, orientation='vertical', side=\n 'left', grid_lines='solid')\n", (4746, 4845), False, 'from bqplot import LogScale, LinearScale, OrdinalColorScale, ColorAxis, Axis, Scatter, Lines, CATEGORY10, Label, Figure, Tooltip\n'), ((4848, 4911), 'bqplot.Axis', 'Axis', ([], {'label': '"""Income per Capita"""', 'scale': 'x_sc', 'grid_lines': '"""solid"""'}), "(label='Income per Capita', scale=x_sc, grid_lines='solid')\n", (4852, 4911), False, 'from bqplot import LogScale, LinearScale, OrdinalColorScale, ColorAxis, Axis, Scatter, Lines, CATEGORY10, Label, Figure, Tooltip\n'), ((5318, 5596), 'bqplot.Scatter', 'Scatter', ([], {'x': 'cap_income', 'y': 'life_exp', 'color': "data['region']", 'size': 'pop', 'names': "data['name']", 'display_names': '(False)', 'scales': "{'x': x_sc, 'y': y_sc, 'color': c_sc, 'size': size_sc}", 'default_size': '(4112)', 'tooltip': 'tt', 'animate': '(True)', 'stroke': '"""Black"""', 'unhovered_style': "{'opacity': 0.5}"}), "(x=cap_income, y=life_exp, color=data['region'], size=pop, names=\n data['name'], display_names=False, scales={'x': x_sc, 'y': y_sc,\n 'color': c_sc, 'size': size_sc}, default_size=4112, tooltip=tt, animate\n =True, stroke='Black', unhovered_style={'opacity': 0.5})\n", (5325, 5596), False, 'from bqplot import LogScale, LinearScale, OrdinalColorScale, ColorAxis, Axis, Scatter, Lines, CATEGORY10, Label, Figure, Tooltip\n'), ((5711, 5833), 'bqplot.Lines', 'Lines', ([], {'x': "data['income'][0]", 'y': "data['lifeExpectancy'][0]", 'colors': "['Gray']", 'scales': "{'x': x_sc, 'y': y_sc}", 'visible': '(False)'}), "(x=data['income'][0], y=data['lifeExpectancy'][0], colors=['Gray'],\n scales={'x': x_sc, 'y': y_sc}, visible=False)\n", (5716, 5833), False, 'from bqplot import LogScale, LinearScale, OrdinalColorScale, ColorAxis, Axis, Scatter, Lines, CATEGORY10, Label, Figure, Tooltip\n'), ((5974, 6121), 'bqplot.Figure', 'Figure', ([], {'marks': '[wealth_scat, year_label, nation_line]', 'axes': '[ax_x, ax_y]', 'title': '"""Health and Wealth of Nations"""', 'animation_duration': 'time_interval'}), "(marks=[wealth_scat, year_label, nation_line], axes=[ax_x, ax_y],\n title='Health and Wealth of Nations', animation_duration=time_interval)\n", (5980, 6121), False, 'from bqplot import LogScale, LinearScale, OrdinalColorScale, ColorAxis, Axis, Scatter, Lines, CATEGORY10, Label, Figure, Tooltip\n'), ((6372, 6449), 'ipywidgets.IntSlider', 'IntSlider', ([], {'min': '(1800)', 'max': '(2008)', 'step': '(1)', 'description': '"""Year"""', 'value': 'initial_year'}), "(min=1800, max=2008, step=1, description='Year', value=initial_year)\n", (6381, 6449), False, 'from ipywidgets import HBox, VBox, IntSlider, Play, jslink\n'), ((7705, 7753), 'ipywidgets.Play', 'Play', ([], {'min': '(1800)', 'max': '(2008)', 'interval': 'time_interval'}), '(min=1800, max=2008, interval=time_interval)\n', (7709, 7753), False, 'from ipywidgets import HBox, VBox, IntSlider, Play, jslink\n'), ((7754, 7808), 'ipywidgets.jslink', 'jslink', (["(play_button, 'value')", "(year_slider, 'value')"], {}), "((play_button, 'value'), (year_slider, 'value'))\n", (7760, 7808), False, 'from ipywidgets import HBox, VBox, IntSlider, Play, jslink\n'), ((2097, 2133), 'os.path.abspath', 'os.path.abspath', (['"""data/nations.json"""'], {}), "('data/nations.json')\n", (2112, 2133), False, 'import os\n'), ((2365, 2379), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2373, 2379), True, 'import numpy as np\n'), ((2394, 2420), 'numpy.arange', 'np.arange', (['(1800)', '(2009)', '(1.0)'], {}), '(1800, 2009, 1.0)\n', (2403, 2420), True, 'import numpy as np\n'), ((2434, 2476), 'numpy.interp', 'np.interp', (['x_range', 'data[:, 0]', 'data[:, 1]'], {}), '(x_range, data[:, 0], data[:, 1])\n', (2443, 2476), True, 'import numpy as np\n'), ((7884, 7916), 'ipywidgets.HBox', 'HBox', (['[play_button, year_slider]'], {}), '([play_button, year_slider])\n', (7888, 7916), False, 'from ipywidgets import HBox, VBox, IntSlider, Play, jslink\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
try:
import cPickle as pickle
except:
import pickle
import numpy as np
import scipy.misc
IMAGE_SIZE = 32
def to_one_hot(y, num_label):
"""Converts a one-dimensional label array to a two-dimensional one-hot array"""
return np.eye(num_label)[y]
def load_character_image(path, preprocess=False, normalize=None):
"""Loads an image from the disk
After loaded from the disk, the 3-channel image is flattened to a grayscale
image. If preprocess is set to True, the image is resized to 32x32 using
bilinear interpolation.
Args:
path: path to the image
preprocess: if set to True, the image is preprocessed. Default is False
normalize: a tuple of the form (mean, std). Each of these two values is a
one-dimensional array of the size that matches the size of the
image. Each pixel of the image is subtracted and divided by the
corresponding value in each of these arrays. Default is None
Returns:
A two-dimensional floating-point array containing grayscale value of each
pixel of the loaded image.
"""
img = scipy.misc.imread(path, flatten=True)
if preprocess:
img = scipy.misc.imresize(img, (IMAGE_SIZE, IMAGE_SIZE), interp='bilinear')
if normalize is not None:
mean, std = normalize
img = ((img.ravel() - mean)/std).reshape(img.shape)
return img
class ImageDataset:
"""A class that contains datasets of images
On construction, this class load images and labels from the specified path.
As a convention for this project, this path can either be
'dataset/detectorData' or 'dataset/recognizerData'. Each of these paths
contains a full training set and a test set.
The full training set is later split into training set and validation set. On
splitting, the mean and standard deviation of each feature in the training set
is recorded as mean_train and std_train. These two arrays are used to
normalized all training set, validation set and test set.
Attributes:
X_train: input of training set
y_train: label of training set
X_val: input of validation set
y_val: label of validation set
X_test: input of test set
y_test: label of test set
mean_train: mean of the training set
std_train: standard deviation of the training set
"""
def __init__(self, location_prefix, **kwargs):
self.mean_train = kwargs.get('mean_train', 0)
self.std_train = kwargs.get('std_train', 1)
self.test_only = kwargs.get('test_only', False)
self.train_val_ratio = kwargs.get('train_val_ratio', 0.85)
self._load_character_images(location_prefix)
def _load_character_images(self, prefix):
if not self.test_only:
train_label_path = os.path.join(prefix, 'trainLabels.csv')
self.all_y_train = np.genfromtxt(train_label_path, delimiter=',', usecols=1,
dtype=np.int32)
N_all_train = self.all_y_train.shape[0]
print('Loading train set...')
all_X_train = []
for i in xrange(1, N_all_train + 1):
img_path = os.path.join(prefix, 'train', str(i) + '.png')
I = load_character_image(img_path)
all_X_train.append(I.ravel())
print('Finished loading train set')
self.all_X_train = np.array(all_X_train)
self._split_train_val()
self.X_train = self.normalize(self.X_train)
self.X_val = self.normalize(self.X_val)
test_label_path = os.path.join(prefix, 'testLabels.csv')
self.y_test = np.genfromtxt(test_label_path, delimiter=',', usecols=1,
dtype=np.int32)
N_test = self.y_test.shape[0]
print('Loading test set...')
X_test = []
for i in xrange(1, N_test + 1):
img_path = os.path.join(prefix, 'test', str(i) + '.png')
I = load_character_image(img_path)
X_test.append(I.ravel())
print('Finished loading test set')
self.X_test = np.array(X_test)
self.X_test = self.normalize(self.X_test)
def normalize(self, sample):
if self.mean_train is not None:
return (sample - self.mean_train) / self.std_train
return sample
def save_normalize(self, path):
"""Saves normalization values to a file
Normalization values are the mean and standard deviation of the training
set. This is saved as a tuple (mean, std) to a pickle file.
Args:
path: path to the pickle file that the normalization is saved to.
"""
if self.mean_train is not None:
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'wb') as f:
pickle.dump((self.mean_train, self.std_train), f)
def train_batches(self, batch_size):
"""Returns a generator for fetching a batch of training data
Args:
batch_size: size of each batch
"""
if self.test_only:
yield
self._shuffle_train()
for start in xrange(0, self.X_train.shape[0], batch_size):
yield ( self.X_train[start: start + batch_size],
self.y_train[start: start + batch_size])
def val_batches(self, batch_size):
if self.test_only:
yield
for start in xrange(0, self.X_val.shape[0], batch_size):
yield ( self.X_val[start: start + batch_size],
self.y_val[start: start + batch_size])
def _split_train_val(self):
perm = np.arange(self.all_X_train.shape[0])
np.random.shuffle(perm)
self.all_X_train = self.all_X_train[perm]
self.all_y_train = self.all_y_train[perm]
train_val_split = int(self.all_y_train.shape[0] * self.train_val_ratio)
self.X_train = self.all_X_train[:train_val_split]
self.y_train = self.all_y_train[:train_val_split]
self.X_val = self.all_X_train[train_val_split:]
self.y_val = self.all_y_train[train_val_split:]
self.mean_train = np.mean(self.X_train, axis=0)
self.std_train = np.std(self.X_train, axis=0)
def _shuffle_train(self):
perm = np.arange(self.X_train.shape[0])
np.random.shuffle(perm)
self.X_train = self.X_train[perm]
self.y_train = self.y_train[perm]
| [
"numpy.mean",
"numpy.eye",
"pickle.dump",
"os.path.join",
"numpy.array",
"os.path.dirname",
"numpy.std",
"numpy.genfromtxt",
"numpy.arange",
"numpy.random.shuffle"
] | [((354, 371), 'numpy.eye', 'np.eye', (['num_label'], {}), '(num_label)\n', (360, 371), True, 'import numpy as np\n'), ((3542, 3580), 'os.path.join', 'os.path.join', (['prefix', '"""testLabels.csv"""'], {}), "(prefix, 'testLabels.csv')\n", (3554, 3580), False, 'import os\n'), ((3600, 3672), 'numpy.genfromtxt', 'np.genfromtxt', (['test_label_path'], {'delimiter': '""","""', 'usecols': '(1)', 'dtype': 'np.int32'}), "(test_label_path, delimiter=',', usecols=1, dtype=np.int32)\n", (3613, 3672), True, 'import numpy as np\n'), ((4019, 4035), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (4027, 4035), True, 'import numpy as np\n'), ((5442, 5478), 'numpy.arange', 'np.arange', (['self.all_X_train.shape[0]'], {}), '(self.all_X_train.shape[0])\n', (5451, 5478), True, 'import numpy as np\n'), ((5483, 5506), 'numpy.random.shuffle', 'np.random.shuffle', (['perm'], {}), '(perm)\n', (5500, 5506), True, 'import numpy as np\n'), ((5915, 5944), 'numpy.mean', 'np.mean', (['self.X_train'], {'axis': '(0)'}), '(self.X_train, axis=0)\n', (5922, 5944), True, 'import numpy as np\n'), ((5967, 5995), 'numpy.std', 'np.std', (['self.X_train'], {'axis': '(0)'}), '(self.X_train, axis=0)\n', (5973, 5995), True, 'import numpy as np\n'), ((6036, 6068), 'numpy.arange', 'np.arange', (['self.X_train.shape[0]'], {}), '(self.X_train.shape[0])\n', (6045, 6068), True, 'import numpy as np\n'), ((6073, 6096), 'numpy.random.shuffle', 'np.random.shuffle', (['perm'], {}), '(perm)\n', (6090, 6096), True, 'import numpy as np\n'), ((2822, 2861), 'os.path.join', 'os.path.join', (['prefix', '"""trainLabels.csv"""'], {}), "(prefix, 'trainLabels.csv')\n", (2834, 2861), False, 'import os\n'), ((2888, 2961), 'numpy.genfromtxt', 'np.genfromtxt', (['train_label_path'], {'delimiter': '""","""', 'usecols': '(1)', 'dtype': 'np.int32'}), "(train_label_path, delimiter=',', usecols=1, dtype=np.int32)\n", (2901, 2961), True, 'import numpy as np\n'), ((3365, 3386), 'numpy.array', 'np.array', (['all_X_train'], {}), '(all_X_train)\n', (3373, 3386), True, 'import numpy as np\n'), ((4712, 4761), 'pickle.dump', 'pickle.dump', (['(self.mean_train, self.std_train)', 'f'], {}), '((self.mean_train, self.std_train), f)\n', (4723, 4761), False, 'import pickle\n'), ((4603, 4624), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (4618, 4624), False, 'import os\n'), ((4647, 4668), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (4662, 4668), False, 'import os\n')] |
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter, num2date
from matplotlib import patches
import matplotlib.patches as mpatches
from matplotlib import ticker, cm, colors
import sys
sys.path.insert(0, "..")
sys.path.insert(0, "../utilities/")
def plot_acfs(rad="kap", fname="figs/acfs.png"):
w_range, v_range = np.arange(0, 1001, 5), np.arange(0, 1001, 5)
w, v = np.meshgrid(w_range, v_range)
cmap = cm.gray
cmap.set_bad(color="k")
levs = [10**c for c in np.linspace(-6, 0, 10, dtype=float)]
fig = plt.figure(figsize=(5, 4), dpi=100)
X = np.load("../../SuperDARN-Clustering/sd/data/%s.acfs.npy"%rad)
count = np.nansum(X)
X = X / count
ax = fig.add_subplot(111)
cs = ax.contour(w, v, X, levs, linewidths=0.5, colors='k', norm=colors.LogNorm())
ax.clabel(cs, levels=levs, inline=1, fontsize=6, fmt=matplotlib.ticker.LogFormatterSciNotation())
cntr = ax.contourf(w, v, X, levs, norm=colors.LogNorm(), cmap=cmap)
ax.set_xlim(5, 100)
ax.set_ylim(5, 100)
cb = fig.colorbar(cntr, ax=ax, shrink=0.7)
cb.set_label(r"$P(w,v), s^{2}m^{-2}$")
ax.set_xlabel(r"Spectral Width (W), $ms^{-1}$")
ax.set_ylabel(r"Velocity (V), $ms^{-1}$")
ax.plot(w_range, equations[0](w_range), ls="--", color="r", lw=1., label=r"$|v|+\frac{w}{3}\leq 30$")
ax.plot(w_range, equations[1](w_range), ls="--", color="b", lw=1., label=r"$|v|+\frac{w}{4}\leq 60$")
ax.plot(w_range, equations[2](w_range), ls="--", color="g", lw=1., label=r"$|v|-0.139w+0.00113w^2\leq 33.1$")
ax.text(0.25, 1.05, "Rad:"+rad +"(2011-2015)", horizontalalignment="center", verticalalignment="center", transform=ax.transAxes)
ax.text(0.75, 1.05, r"ACFs~%.2f$\times 10^6$"%(count/1e6), horizontalalignment="center", verticalalignment="center", transform=ax.transAxes)
fig.savefig(fname, bbox_inches="tight")
return | [
"sys.path.insert",
"matplotlib.use",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.ticker.LogFormatterSciNotation",
"numpy.meshgrid",
"numpy.nansum",
"numpy.load",
"numpy.arange",
"matplotlib.colors.LogNorm"
] | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((301, 325), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (316, 325), False, 'import sys\n'), ((326, 361), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../utilities/"""'], {}), "(0, '../utilities/')\n", (341, 361), False, 'import sys\n'), ((491, 520), 'numpy.meshgrid', 'np.meshgrid', (['w_range', 'v_range'], {}), '(w_range, v_range)\n', (502, 520), True, 'import numpy as np\n'), ((643, 678), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4)', 'dpi': '(100)'}), '(figsize=(5, 4), dpi=100)\n', (653, 678), True, 'import matplotlib.pyplot as plt\n'), ((687, 750), 'numpy.load', 'np.load', (["('../../SuperDARN-Clustering/sd/data/%s.acfs.npy' % rad)"], {}), "('../../SuperDARN-Clustering/sd/data/%s.acfs.npy' % rad)\n", (694, 750), True, 'import numpy as np\n'), ((761, 773), 'numpy.nansum', 'np.nansum', (['X'], {}), '(X)\n', (770, 773), True, 'import numpy as np\n'), ((435, 456), 'numpy.arange', 'np.arange', (['(0)', '(1001)', '(5)'], {}), '(0, 1001, 5)\n', (444, 456), True, 'import numpy as np\n'), ((458, 479), 'numpy.arange', 'np.arange', (['(0)', '(1001)', '(5)'], {}), '(0, 1001, 5)\n', (467, 479), True, 'import numpy as np\n'), ((596, 631), 'numpy.linspace', 'np.linspace', (['(-6)', '(0)', '(10)'], {'dtype': 'float'}), '(-6, 0, 10, dtype=float)\n', (607, 631), True, 'import numpy as np\n'), ((890, 906), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {}), '()\n', (904, 906), False, 'from matplotlib import ticker, cm, colors\n'), ((965, 1008), 'matplotlib.ticker.LogFormatterSciNotation', 'matplotlib.ticker.LogFormatterSciNotation', ([], {}), '()\n', (1006, 1008), False, 'import matplotlib\n'), ((1053, 1069), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {}), '()\n', (1067, 1069), False, 'from matplotlib import ticker, cm, colors\n')] |
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# @file main.py
# @Author <NAME> (adityavaishampayan)
# @copyright MIT
# @brief main file for traditional methods of face swapping
method = 'thin_plate_spline'
import sys
# noinspection PyBroadException
try:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
except BaseException:
pass
import cv2
import numpy as np
import dlib
import copy
from imutils import face_utils
from scripts.Traditional.forwardwarping.forward import for_warping
from scripts.Traditional.InverseWarping.inverse import inv_warping
from scripts.Traditional.ThinPlateSplines.ThinPlateSpline import *
def facial_landmark_detection(gray_img):
"""
Function for detecting facial landmarks
:param gray_img: grayscale image
:return: facial landmark points
"""
faces = detector(gray_img)
for face in faces:
landmarks = predictor(gray_img, face)
landmarks_points = []
for n in range(0, 68):
x = landmarks.part(n).x
y = landmarks.part(n).y
landmarks_points.append((x, y))
return landmarks_points
def extract_index_nparray(nparray):
"""
A function to extract numpy array indexes
:param nparray: numpy array
:return:index
"""
index = None
for num in nparray[0]:
index = num
break
return index
def delaunay_triangulation(convex_hull, landmarks_points):
"""
A function to perform delaunday triangulation
:param convex_hull: convex hull made from the facial landmark points
:param landmarks_points: facial landmark points
:return:
"""
rect = cv2.boundingRect(convex_hull)
subdiv = cv2.Subdiv2D(rect)
subdiv.insert(landmarks_points)
triangles = subdiv.getTriangleList()
triangles = np.array(triangles, dtype=np.int32)
landmarks_points = np.array(landmarks_points,np.int32)
idx_triangles = []
for t in triangles:
pt1 = (t[0], t[1])
pt2 = (t[2], t[3])
pt3 = (t[4], t[5])
index_pt1 = np.where((landmarks_points == pt1).all(axis=1))
index_pt1 = extract_index_nparray(index_pt1)
index_pt2 = np.where((landmarks_points == pt2).all(axis=1))
index_pt2 = extract_index_nparray(index_pt2)
index_pt3 = np.where((landmarks_points == pt3).all(axis=1))
index_pt3 = extract_index_nparray(index_pt3)
if index_pt1 is not None and index_pt2 is not None and index_pt3 is not None:
triangle = [index_pt1, index_pt2, index_pt3]
idx_triangles.append(triangle)
return idx_triangles
def features(img,detector,predictor,resize_val):
#initialize facial detector
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# gray = cv2.resize(gray,(resize_val,resize_val))
# img = cv2.resize(img,(resize_val,resize_val))
rects = detector(gray,1)
if np.shape(rects)[0] == 0:
shape = 0
feature_found = False
else:
feature_found = True
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
for (i,rect) in enumerate(rects):
# print('before shape')
shape = predictor(gray,rect)
shape = face_utils.shape_to_np(shape)
# print('shape = ')
# print(shape)
(x,y,w,h) = face_utils.rect_to_bb(rect)
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
for (x,y) in shape:
cv2.circle(img,(x,y),2,(0,0,255),-1)
# plt.imshow(img)
# plt.show()
return img, shape, feature_found
if __name__ == '__main__':
########################################################
#img_tar = cv2.imread("/home/aditya/Desktop/to_add/FaceSwap/TestSet/Rambo.jpg")
#img_src = cv2.imread("/home/aditya/Desktop/to_add/FaceSwap/TestSet/Test1/frame16.jpg")
# Reading Image 1
img1 = cv2.imread("/home/aditya/Desktop/to_add/FaceSwap/images/bradley_cooper.jpg")
# converting image 1 to grayscale
img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
# making a mask of image 1
mask = np.zeros_like(img1_gray)
########################################################
# reading the image 2
img2 = cv2.imread("/home/aditya/Desktop/to_add/FaceSwap/images/aditya.jpg")
# converting image 2 to gray scale
img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
height, width, channels = img2.shape
img2_new_face = np.zeros((height, width, channels), np.uint8)
########################################################
# Initialising the facial landmark detector
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("/home/aditya/FaceSwap/shape_predictor_68_face_landmarks.dat")
########################################################
# FACE 1
face1_points = facial_landmark_detection(img1_gray)
f1_points = np.array(face1_points, np.int32)
# convex hull for face 1
convexhull1 = cv2.convexHull(f1_points)
# drawing the convex hull
# cv2.polylines(img1, [convexhull1], True, (255, 0, 0), 3)
# obtaining the mask of face 1
cv2.fillConvexPoly(mask, convexhull1, 255)
# extracting the outline of face 1
face_image_1 = cv2.bitwise_and(img1, img1, mask=mask)
########################################################
# FACE 2
face2_points = facial_landmark_detection(img2_gray)
f2_points = np.array(face2_points, np.int32)
convexhull2 = cv2.convexHull(f2_points)
# drawing the convex hull
#cv2.polylines(img2, [convexhull2], True, (255, 0, 0), 3)
lines_space_mask = np.zeros_like(img1_gray)
rect = cv2.boundingRect(convexhull1)
subdiv = cv2.Subdiv2D(rect)
subdiv.insert(face1_points)
triangles = subdiv.getTriangleList()
triangles = np.array(triangles, dtype=np.int32)
indexes_triangle1 = delaunay_triangulation(convexhull1,face1_points)
########################################################
if method == 'forward_warp':
for_warping(indexes_triangle1, img1, img2, face1_points, face2_points,
lines_space_mask, img2_new_face)
img2_face_mask = np.zeros_like(img2_gray)
img2_head_mask = cv2.fillConvexPoly(img2_face_mask, convexhull2, 255)
img2_face_mask = cv2.bitwise_not(img2_head_mask)
img2_head_noface = cv2.bitwise_and(img2, img2, mask=img2_face_mask)
result = cv2.add(img2_head_noface, img2_new_face)
(x, y, w, h) = cv2.boundingRect(convexhull2)
center_face2 = (int((x + x + w) / 2), int((y + y + h) / 2))
seamlessclone = cv2.seamlessClone(result, img2, img2_head_mask, center_face2, cv2.NORMAL_CLONE)
cv2.imshow("seamlessclone", seamlessclone)
if method == 'inverse_warp':
inv_warping(indexes_triangle1, img1, img2, face1_points, face2_points, lines_space_mask, img2_new_face)
img2_face_mask = np.zeros_like(img2_gray)
img2_head_mask = cv2.fillConvexPoly(img2_face_mask, convexhull2, 255)
img2_face_mask = cv2.bitwise_not(img2_head_mask)
img2_head_noface = cv2.bitwise_and(img2, img2, mask=img2_face_mask)
result = cv2.add(img2_head_noface, img2_new_face)
(x, y, w, h) = cv2.boundingRect(convexhull2)
center_face2 = (int((x + x + w) / 2), int((y + y + h) / 2))
seamlessclone = cv2.seamlessClone(result, img2, img2_head_mask, center_face2, cv2.MIXED_CLONE)
cv2.imshow("seamlessclone", seamlessclone)
if method == 'thin_plate_spline':
img_src = cv2.imread("/home/aditya/Desktop/to_add/FaceSwap/images/bradley_cooper.jpg")
img_tar = cv2.imread("/home/aditya/Desktop/to_add/FaceSwap/images/aditya.jpg")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("/home/aditya/Desktop/to_add/FaceSwap/shape_predictor_68_face_landmarks.dat")
rects = detector(img_src, 1)
img_source = copy.deepcopy(img_src)
img_target = img_tar.copy()
if len(rects) == 1:
img_source = img_source[rects[0].top() - 40:rects[0].bottom() + 40,
rects[0].left() - 40:rects[0].right() + 40, :]
elif len(rects) > 1:
img_target = img_source[rects[1].top() - 40:rects[1].bottom() + 40,
rects[1].left() - 40:rects[1].right() + 40, :]
img_source = img_source[rects[0].top() - 40:rects[0].bottom() + 40,
rects[0].left() - 40:rects[0].right() + 40, :]
else:
# cv2.imwrite('./result1/' + str(i) + '.jpg', img_src)
# print("2 faces not detected")
pass
img1 = img_source.copy()
img2 = img_target.copy()
a = 200
img1, points1, flag1 = features(img_source, detector, predictor, a)
img2, points2, flag2 = features(img_target, detector, predictor, a)
print(points1)
output1 = swap(img_source.copy(), img_target.copy(), points1, points2)
output1 = cv2.resize(output1, (
(rects[0].right() + 40) - (rects[0].left() - 40), (rects[0].bottom() + 40) - (rects[0].top() - 40)))
img_src[rects[0].top() - 40:rects[0].bottom() + 40, rects[0].left() - 40:rects[0].right() + 40, :] = output1
if len(rects) > 1:
output2 = swap(img_target.copy(), img_source.copy(), points2, points1)
output2 = cv2.resize(output2, (
(rects[1].right() + 40) - (rects[1].left() - 40), (rects[1].bottom() + 40) - (rects[1].top() - 40)))
img_src[rects[1].top() - 40:rects[1].bottom() + 40, rects[1].left() - 40:rects[1].right() + 40, :] = output2
cv2.imshow("img src", img_src)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"cv2.seamlessClone",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"copy.deepcopy",
"scripts.Traditional.forwardwarping.forward.for_warping",
"imutils.face_utils.rect_to_bb",
"scripts.Traditional.InverseWarping.inverse.inv_warping",
"dlib.shape_predictor",
"sys.path.remo... | [((1314, 1377), 'sys.path.remove', 'sys.path.remove', (['"""/opt/ros/kinetic/lib/python2.7/dist-packages"""'], {}), "('/opt/ros/kinetic/lib/python2.7/dist-packages')\n", (1329, 1377), False, 'import sys\n'), ((2697, 2726), 'cv2.boundingRect', 'cv2.boundingRect', (['convex_hull'], {}), '(convex_hull)\n', (2713, 2726), False, 'import cv2\n'), ((2740, 2758), 'cv2.Subdiv2D', 'cv2.Subdiv2D', (['rect'], {}), '(rect)\n', (2752, 2758), False, 'import cv2\n'), ((2852, 2887), 'numpy.array', 'np.array', (['triangles'], {'dtype': 'np.int32'}), '(triangles, dtype=np.int32)\n', (2860, 2887), True, 'import numpy as np\n'), ((2912, 2948), 'numpy.array', 'np.array', (['landmarks_points', 'np.int32'], {}), '(landmarks_points, np.int32)\n', (2920, 2948), True, 'import numpy as np\n'), ((3749, 3786), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (3761, 3786), False, 'import cv2\n'), ((4891, 4967), 'cv2.imread', 'cv2.imread', (['"""/home/aditya/Desktop/to_add/FaceSwap/images/bradley_cooper.jpg"""'], {}), "('/home/aditya/Desktop/to_add/FaceSwap/images/bradley_cooper.jpg')\n", (4901, 4967), False, 'import cv2\n'), ((5022, 5060), 'cv2.cvtColor', 'cv2.cvtColor', (['img1', 'cv2.COLOR_BGR2GRAY'], {}), '(img1, cv2.COLOR_BGR2GRAY)\n', (5034, 5060), False, 'import cv2\n'), ((5103, 5127), 'numpy.zeros_like', 'np.zeros_like', (['img1_gray'], {}), '(img1_gray)\n', (5116, 5127), True, 'import numpy as np\n'), ((5222, 5290), 'cv2.imread', 'cv2.imread', (['"""/home/aditya/Desktop/to_add/FaceSwap/images/aditya.jpg"""'], {}), "('/home/aditya/Desktop/to_add/FaceSwap/images/aditya.jpg')\n", (5232, 5290), False, 'import cv2\n'), ((5346, 5384), 'cv2.cvtColor', 'cv2.cvtColor', (['img2', 'cv2.COLOR_BGR2GRAY'], {}), '(img2, cv2.COLOR_BGR2GRAY)\n', (5358, 5384), False, 'import cv2\n'), ((5446, 5491), 'numpy.zeros', 'np.zeros', (['(height, width, channels)', 'np.uint8'], {}), '((height, width, channels), np.uint8)\n', (5454, 5491), True, 'import numpy as np\n'), ((5612, 5644), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (5642, 5644), False, 'import dlib\n'), ((5661, 5749), 'dlib.shape_predictor', 'dlib.shape_predictor', (['"""/home/aditya/FaceSwap/shape_predictor_68_face_landmarks.dat"""'], {}), "(\n '/home/aditya/FaceSwap/shape_predictor_68_face_landmarks.dat')\n", (5681, 5749), False, 'import dlib\n'), ((5887, 5919), 'numpy.array', 'np.array', (['face1_points', 'np.int32'], {}), '(face1_points, np.int32)\n', (5895, 5919), True, 'import numpy as np\n'), ((5967, 5992), 'cv2.convexHull', 'cv2.convexHull', (['f1_points'], {}), '(f1_points)\n', (5981, 5992), False, 'import cv2\n'), ((6125, 6167), 'cv2.fillConvexPoly', 'cv2.fillConvexPoly', (['mask', 'convexhull1', '(255)'], {}), '(mask, convexhull1, 255)\n', (6143, 6167), False, 'import cv2\n'), ((6226, 6264), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img1', 'img1'], {'mask': 'mask'}), '(img1, img1, mask=mask)\n', (6241, 6264), False, 'import cv2\n'), ((6408, 6440), 'numpy.array', 'np.array', (['face2_points', 'np.int32'], {}), '(face2_points, np.int32)\n', (6416, 6440), True, 'import numpy as np\n'), ((6459, 6484), 'cv2.convexHull', 'cv2.convexHull', (['f2_points'], {}), '(f2_points)\n', (6473, 6484), False, 'import cv2\n'), ((6600, 6624), 'numpy.zeros_like', 'np.zeros_like', (['img1_gray'], {}), '(img1_gray)\n', (6613, 6624), True, 'import numpy as np\n'), ((6636, 6665), 'cv2.boundingRect', 'cv2.boundingRect', (['convexhull1'], {}), '(convexhull1)\n', (6652, 6665), False, 'import cv2\n'), ((6679, 6697), 'cv2.Subdiv2D', 'cv2.Subdiv2D', (['rect'], {}), '(rect)\n', (6691, 6697), False, 'import cv2\n'), ((6787, 6822), 'numpy.array', 'np.array', (['triangles'], {'dtype': 'np.int32'}), '(triangles, dtype=np.int32)\n', (6795, 6822), True, 'import numpy as np\n'), ((10694, 10708), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (10705, 10708), False, 'import cv2\n'), ((10713, 10736), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (10734, 10736), False, 'import cv2\n'), ((4055, 4091), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (4067, 4091), False, 'import cv2\n'), ((6996, 7103), 'scripts.Traditional.forwardwarping.forward.for_warping', 'for_warping', (['indexes_triangle1', 'img1', 'img2', 'face1_points', 'face2_points', 'lines_space_mask', 'img2_new_face'], {}), '(indexes_triangle1, img1, img2, face1_points, face2_points,\n lines_space_mask, img2_new_face)\n', (7007, 7103), False, 'from scripts.Traditional.forwardwarping.forward import for_warping\n'), ((7170, 7194), 'numpy.zeros_like', 'np.zeros_like', (['img2_gray'], {}), '(img2_gray)\n', (7183, 7194), True, 'import numpy as np\n'), ((7220, 7272), 'cv2.fillConvexPoly', 'cv2.fillConvexPoly', (['img2_face_mask', 'convexhull2', '(255)'], {}), '(img2_face_mask, convexhull2, 255)\n', (7238, 7272), False, 'import cv2\n'), ((7298, 7329), 'cv2.bitwise_not', 'cv2.bitwise_not', (['img2_head_mask'], {}), '(img2_head_mask)\n', (7313, 7329), False, 'import cv2\n'), ((7358, 7406), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img2', 'img2'], {'mask': 'img2_face_mask'}), '(img2, img2, mask=img2_face_mask)\n', (7373, 7406), False, 'import cv2\n'), ((7424, 7464), 'cv2.add', 'cv2.add', (['img2_head_noface', 'img2_new_face'], {}), '(img2_head_noface, img2_new_face)\n', (7431, 7464), False, 'import cv2\n'), ((7488, 7517), 'cv2.boundingRect', 'cv2.boundingRect', (['convexhull2'], {}), '(convexhull2)\n', (7504, 7517), False, 'import cv2\n'), ((7611, 7690), 'cv2.seamlessClone', 'cv2.seamlessClone', (['result', 'img2', 'img2_head_mask', 'center_face2', 'cv2.NORMAL_CLONE'], {}), '(result, img2, img2_head_mask, center_face2, cv2.NORMAL_CLONE)\n', (7628, 7690), False, 'import cv2\n'), ((7699, 7741), 'cv2.imshow', 'cv2.imshow', (['"""seamlessclone"""', 'seamlessclone'], {}), "('seamlessclone', seamlessclone)\n", (7709, 7741), False, 'import cv2\n'), ((7784, 7891), 'scripts.Traditional.InverseWarping.inverse.inv_warping', 'inv_warping', (['indexes_triangle1', 'img1', 'img2', 'face1_points', 'face2_points', 'lines_space_mask', 'img2_new_face'], {}), '(indexes_triangle1, img1, img2, face1_points, face2_points,\n lines_space_mask, img2_new_face)\n', (7795, 7891), False, 'from scripts.Traditional.InverseWarping.inverse import inv_warping\n'), ((7914, 7938), 'numpy.zeros_like', 'np.zeros_like', (['img2_gray'], {}), '(img2_gray)\n', (7927, 7938), True, 'import numpy as np\n'), ((7964, 8016), 'cv2.fillConvexPoly', 'cv2.fillConvexPoly', (['img2_face_mask', 'convexhull2', '(255)'], {}), '(img2_face_mask, convexhull2, 255)\n', (7982, 8016), False, 'import cv2\n'), ((8042, 8073), 'cv2.bitwise_not', 'cv2.bitwise_not', (['img2_head_mask'], {}), '(img2_head_mask)\n', (8057, 8073), False, 'import cv2\n'), ((8102, 8150), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img2', 'img2'], {'mask': 'img2_face_mask'}), '(img2, img2, mask=img2_face_mask)\n', (8117, 8150), False, 'import cv2\n'), ((8168, 8208), 'cv2.add', 'cv2.add', (['img2_head_noface', 'img2_new_face'], {}), '(img2_head_noface, img2_new_face)\n', (8175, 8208), False, 'import cv2\n'), ((8232, 8261), 'cv2.boundingRect', 'cv2.boundingRect', (['convexhull2'], {}), '(convexhull2)\n', (8248, 8261), False, 'import cv2\n'), ((8355, 8433), 'cv2.seamlessClone', 'cv2.seamlessClone', (['result', 'img2', 'img2_head_mask', 'center_face2', 'cv2.MIXED_CLONE'], {}), '(result, img2, img2_head_mask, center_face2, cv2.MIXED_CLONE)\n', (8372, 8433), False, 'import cv2\n'), ((8442, 8484), 'cv2.imshow', 'cv2.imshow', (['"""seamlessclone"""', 'seamlessclone'], {}), "('seamlessclone', seamlessclone)\n", (8452, 8484), False, 'import cv2\n'), ((8543, 8619), 'cv2.imread', 'cv2.imread', (['"""/home/aditya/Desktop/to_add/FaceSwap/images/bradley_cooper.jpg"""'], {}), "('/home/aditya/Desktop/to_add/FaceSwap/images/bradley_cooper.jpg')\n", (8553, 8619), False, 'import cv2\n'), ((8638, 8706), 'cv2.imread', 'cv2.imread', (['"""/home/aditya/Desktop/to_add/FaceSwap/images/aditya.jpg"""'], {}), "('/home/aditya/Desktop/to_add/FaceSwap/images/aditya.jpg')\n", (8648, 8706), False, 'import cv2\n'), ((8727, 8759), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (8757, 8759), False, 'import dlib\n'), ((8780, 8888), 'dlib.shape_predictor', 'dlib.shape_predictor', (['"""/home/aditya/Desktop/to_add/FaceSwap/shape_predictor_68_face_landmarks.dat"""'], {}), "(\n '/home/aditya/Desktop/to_add/FaceSwap/shape_predictor_68_face_landmarks.dat'\n )\n", (8800, 8888), False, 'import dlib\n'), ((8938, 8960), 'copy.deepcopy', 'copy.deepcopy', (['img_src'], {}), '(img_src)\n', (8951, 8960), False, 'import copy\n'), ((10658, 10688), 'cv2.imshow', 'cv2.imshow', (['"""img src"""', 'img_src'], {}), "('img src', img_src)\n", (10668, 10688), False, 'import cv2\n'), ((3929, 3944), 'numpy.shape', 'np.shape', (['rects'], {}), '(rects)\n', (3937, 3944), True, 'import numpy as np\n'), ((4231, 4260), 'imutils.face_utils.shape_to_np', 'face_utils.shape_to_np', (['shape'], {}), '(shape)\n', (4253, 4260), False, 'from imutils import face_utils\n'), ((4345, 4372), 'imutils.face_utils.rect_to_bb', 'face_utils.rect_to_bb', (['rect'], {}), '(rect)\n', (4366, 4372), False, 'from imutils import face_utils\n'), ((4385, 4443), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (4398, 4443), False, 'import cv2\n'), ((4482, 4525), 'cv2.circle', 'cv2.circle', (['img', '(x, y)', '(2)', '(0, 0, 255)', '(-1)'], {}), '(img, (x, y), 2, (0, 0, 255), -1)\n', (4492, 4525), False, 'import cv2\n')] |
####################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: darknet_to_ell_impporter_test.py (importers)
# Authors: <NAME>
#
# Requires: Python 3.x
#
####################################################################################################
import sys
import os
import unittest
import getopt
import configparser
import re
import struct
import traceback
import inspect
import logging
import numpy as np
_logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format="%(message)s")
# Try to import ELL. If it doesn't exist it means it has not been built,
# so don't run the tests.
SkipTests = False
try:
script_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(script_path, "..", "..", "..", "utilities", "pythonlibs"))
sys.path.append(os.path.join(script_path, '..'))
import find_ell
import ell
import darknet_to_ell
except ImportError:
errorType, value, traceback = sys.exc_info()
if "Could not find ell package" in str(value):
_logger.info("Python was not built, so skipping test")
SkipTests = True
else:
raise value
# Load a test darknet model and verify its output.
# The unit test model verifies the most common darknet layers:
# - convolutional
# - maxpool
# - avgpool
# - fully connected
# - softmax
class DarknetModelTestCase(unittest.TestCase):
def setUp(self):
if SkipTests:
self.skipTest('Module not tested, ELL module missing')
def test_darknet_model(self):
# Create synthetic input data
input1 = np.arange(28 * 28, dtype=np.float).reshape(28, 28, 1)
# Create an ELL predictor from the darknet model files
predictor = darknet_to_ell.predictor_from_darknet_model(
'unittest.cfg', 'unittest.weights')
# Feed the input through the model
result1 = predictor.Predict(input1.ravel())
# Verify its what we expect
expectedResult1 = [0.09134083986282349, 0.09748589247465134, 0.09064911305904388, 0.13794259727001190, 0.16832095384597778,
0.08976214379072190, 0.06458559632301330, 0.07894224673509598, 0.12377665191888809, 0.05719388648867607]
np.testing.assert_array_almost_equal(
result1, expectedResult1, 5, 'prediction of first input does not match expected results!')
# Feed the next input through the model
input2 = np.flipud(input1)
result2 = predictor.Predict(input2.ravel())
# Verify its what we expect
expectedResult2 = [0.08052270114421844, 0.08739096671342850, 0.08180813491344452, 0.24630726873874664, 0.12944690883159637,
0.08548084646463394, 0.06091265007853508, 0.07173667103052139, 0.11159289628267288, 0.04480091854929924]
np.testing.assert_array_almost_equal(
result2, expectedResult2, 5, 'prediction of second input does not match expected results!')
# create a map and save to file
ell_map = ell.neural.utilities.ell_map_from_float_predictor(predictor)
ell_map.Save("darknet_test.map")
# create a map and save to file
ell_map = ell.neural.utilities.ell_map_from_float_predictor(predictor,
step_interval_msec=100, lag_threshold_msec=150, function_prefix="DarknetTest")
ell_map.Save("darknet_steppable_test.map")
return
if __name__ == '__main__':
unittest.main()
| [
"logging.getLogger",
"logging.basicConfig",
"numpy.testing.assert_array_almost_equal",
"numpy.flipud",
"ell.neural.utilities.ell_map_from_float_predictor",
"darknet_to_ell.predictor_from_darknet_model",
"os.path.join",
"unittest.main",
"sys.exc_info",
"os.path.abspath",
"numpy.arange"
] | [((524, 551), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (541, 551), False, 'import logging\n'), ((552, 613), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(message)s"""'}), "(level=logging.INFO, format='%(message)s')\n", (571, 613), False, 'import logging\n'), ((3520, 3535), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3533, 3535), False, 'import unittest\n'), ((771, 796), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (786, 796), False, 'import os\n'), ((818, 888), 'os.path.join', 'os.path.join', (['script_path', '""".."""', '""".."""', '""".."""', '"""utilities"""', '"""pythonlibs"""'], {}), "(script_path, '..', '..', '..', 'utilities', 'pythonlibs')\n", (830, 888), False, 'import os\n'), ((910, 941), 'os.path.join', 'os.path.join', (['script_path', '""".."""'], {}), "(script_path, '..')\n", (922, 941), False, 'import os\n'), ((1058, 1072), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1070, 1072), False, 'import sys\n'), ((1824, 1903), 'darknet_to_ell.predictor_from_darknet_model', 'darknet_to_ell.predictor_from_darknet_model', (['"""unittest.cfg"""', '"""unittest.weights"""'], {}), "('unittest.cfg', 'unittest.weights')\n", (1867, 1903), False, 'import darknet_to_ell\n'), ((2321, 2452), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['result1', 'expectedResult1', '(5)', '"""prediction of first input does not match expected results!"""'], {}), "(result1, expectedResult1, 5,\n 'prediction of first input does not match expected results!')\n", (2357, 2452), True, 'import numpy as np\n'), ((2528, 2545), 'numpy.flipud', 'np.flipud', (['input1'], {}), '(input1)\n', (2537, 2545), True, 'import numpy as np\n'), ((2906, 3038), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['result2', 'expectedResult2', '(5)', '"""prediction of second input does not match expected results!"""'], {}), "(result2, expectedResult2, 5,\n 'prediction of second input does not match expected results!')\n", (2942, 3038), True, 'import numpy as np\n'), ((3107, 3167), 'ell.neural.utilities.ell_map_from_float_predictor', 'ell.neural.utilities.ell_map_from_float_predictor', (['predictor'], {}), '(predictor)\n', (3156, 3167), False, 'import ell\n'), ((3268, 3416), 'ell.neural.utilities.ell_map_from_float_predictor', 'ell.neural.utilities.ell_map_from_float_predictor', (['predictor'], {'step_interval_msec': '(100)', 'lag_threshold_msec': '(150)', 'function_prefix': '"""DarknetTest"""'}), "(predictor,\n step_interval_msec=100, lag_threshold_msec=150, function_prefix=\n 'DarknetTest')\n", (3317, 3416), False, 'import ell\n'), ((1687, 1721), 'numpy.arange', 'np.arange', (['(28 * 28)'], {'dtype': 'np.float'}), '(28 * 28, dtype=np.float)\n', (1696, 1721), True, 'import numpy as np\n')] |
###########################################################################
# #
# physical_validation, #
# a python package to test the physical validity of MD results #
# #
# Written by <NAME> <<EMAIL>> #
# <NAME> <<EMAIL>> #
# #
# Copyright (c) 2017-2021 University of Colorado Boulder #
# (c) 2012 The University of Virginia #
# #
###########################################################################
r"""
Data structures carrying simulation data.
"""
from typing import Any, List, Optional, Tuple
import numpy as np
from ..util import error as pv_error
from ..util.util import array_equal_shape_and_close
class RectangularBox:
def __init__(self, box: np.ndarray):
self.__box = None
self.__nframes = 0
assert 0 < box.ndim < 3
if box.ndim == 1:
assert box.size == 3
self.__box = box
self.__nframes = 1
elif box.ndim == 2:
assert box.shape[1] == 3
self.__box = box
self.__nframes = box.shape[0]
@property
def box(self):
return self.__box
def gather(
self, positions: np.ndarray, bonds: List[List[int]], molec_idx: List[int]
):
bonds = np.array(bonds)
if bonds.size == 0:
return positions
positions = np.array(positions)
assert 1 < positions.ndim < 4
if positions.ndim == 2:
nframes = 1
positions = np.array([positions])
else:
nframes = positions.shape[0]
if self.__nframes != 1:
assert self.__nframes == nframes
for f in range(nframes):
p = positions[f]
if self.__nframes > 1:
box = self.__box[f]
else:
box = self.__box[0]
assert len(bonds) == len(molec_idx)
for mbonds, idx in zip(bonds, molec_idx):
for b in mbonds:
a1 = idx + b[0]
a2 = idx + b[1]
p[a2] += np.round((p[a1] - p[a2]) / box) * box
positions[f] = p
return positions
class TrajectoryData(object):
r"""TrajectoryData: The position and velocity trajectory along the simulation
The full trajectory is needed to calculate the equipartition of the kinetic energy.
As they are used in connection, the position and velocity trajectories are expected
to have the same shape and number of frames.
The position and velocity trajectories can be accessed either using the getters
of an object, as in
* trajectory.position
* trajectory.velocity
or using the key notation, as in
* trajectory['position']
* trajectory['velocity']
"""
@staticmethod
def trajectories() -> Tuple[str, str]:
return "position", "velocity"
def __init__(self, position: Optional[Any] = None, velocity: Optional[Any] = None):
self.__position = None
self.__velocity = None
self.__nframes = None
self.__natoms = None
self.__getters = {
"position": TrajectoryData.position.__get__,
"velocity": TrajectoryData.velocity.__get__,
}
self.__setters = {
"position": TrajectoryData.position.__set__,
"velocity": TrajectoryData.velocity.__set__,
}
# Consistency check
assert set(self.__getters.keys()) == set(self.__setters.keys())
assert set(self.__getters.keys()) == set(TrajectoryData.trajectories())
if position is not None:
self.position = position
if velocity is not None:
self.velocity = velocity
def __getitem__(self, key: str) -> Optional[np.ndarray]:
if key not in self.trajectories():
raise KeyError
return self.__getters[key](self)
def __setitem__(self, key: str, value: Any) -> None:
if key not in self.trajectories():
raise KeyError
self.__setters[key](self, value)
def __check_value(self, value: Any, key: str) -> np.ndarray:
value = np.array(value)
if value.ndim == 2:
# create 3-dimensional array
value = np.array([value])
if value.ndim != 3:
raise pv_error.InputError([key], "Expected 2- or 3-dimensional array.")
if self.__nframes is None:
self.__nframes = value.shape[0]
elif self.__nframes != value.shape[0]:
raise pv_error.InputError(
[key], "Expected equal number of frames as in all trajectories."
)
if self.__natoms is None:
self.__natoms = value.shape[1]
elif self.__natoms != value.shape[1]:
raise pv_error.InputError(
[key], "Expected equal number of atoms as in all trajectories."
)
if value.shape[2] != 3:
raise pv_error.InputError(
[key], "Expected 3 spatial dimensions (#frames x #atoms x 3)."
)
return value
@property
def position(self) -> Optional[np.ndarray]:
"""Get position"""
return self.__position
@position.setter
def position(self, pos: Any) -> None:
"""Set position"""
self.__position = self.__check_value(pos, "position")
@property
def velocity(self) -> Optional[np.ndarray]:
"""Get velocity"""
return self.__velocity
@velocity.setter
def velocity(self, vel: Any) -> None:
"""Set velocity"""
self.__velocity = self.__check_value(vel, "velocity")
def __eq__(self, other) -> bool:
if type(other) is not type(self):
return False
return (
array_equal_shape_and_close(self.__position, other.__position)
and array_equal_shape_and_close(self.__velocity, other.__velocity)
and self.__nframes == other.__nframes
)
| [
"numpy.array",
"numpy.round"
] | [((1657, 1672), 'numpy.array', 'np.array', (['bonds'], {}), '(bonds)\n', (1665, 1672), True, 'import numpy as np\n'), ((1750, 1769), 'numpy.array', 'np.array', (['positions'], {}), '(positions)\n', (1758, 1769), True, 'import numpy as np\n'), ((4541, 4556), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (4549, 4556), True, 'import numpy as np\n'), ((1888, 1909), 'numpy.array', 'np.array', (['[positions]'], {}), '([positions])\n', (1896, 1909), True, 'import numpy as np\n'), ((4646, 4663), 'numpy.array', 'np.array', (['[value]'], {}), '([value])\n', (4654, 4663), True, 'import numpy as np\n'), ((2465, 2496), 'numpy.round', 'np.round', (['((p[a1] - p[a2]) / box)'], {}), '((p[a1] - p[a2]) / box)\n', (2473, 2496), True, 'import numpy as np\n')] |
import h5py
import numpy as np
file = h5py.File('/data2/wt/openimages/vc_feature/1coco_train_all_bu_2.hdf5', 'r')
for keys in file:
feature = file[keys]['feature'][:]
np.save('/data2/wt/openimages/vc_feature/coco_vc_all_bu/'+keys+'.npy', feature)
| [
"numpy.save",
"h5py.File"
] | [((42, 117), 'h5py.File', 'h5py.File', (['"""/data2/wt/openimages/vc_feature/1coco_train_all_bu_2.hdf5"""', '"""r"""'], {}), "('/data2/wt/openimages/vc_feature/1coco_train_all_bu_2.hdf5', 'r')\n", (51, 117), False, 'import h5py\n'), ((182, 269), 'numpy.save', 'np.save', (["('/data2/wt/openimages/vc_feature/coco_vc_all_bu/' + keys + '.npy')", 'feature'], {}), "('/data2/wt/openimages/vc_feature/coco_vc_all_bu/' + keys + '.npy',\n feature)\n", (189, 269), True, 'import numpy as np\n')] |
import numpy as np
import os
import scipy.io.wavfile as sp_wr
import random
random.seed(0)
def load_data():
speech_path=os.path.join('speech-command/')
path =[speech_path+"zero/",
speech_path+"one/",
speech_path+"two/",
speech_path+"three/",
speech_path+"four/",
speech_path+"five/",
speech_path+"six/",
speech_path+"seven/",
speech_path+"eight/",
speech_path+"nine/"]
ncls=len(path)
fname1=[os.listdir(ii) for ii in path]
fname=[[] for _ in path]
for i in range(ncls):
ll=len(fname1[i])
j=0
while(j<ll):
k=j+1
while(k<ll and fname1[i][j][:8]==fname1[i][k][:8]):
k+=1
fname[i]+=random.sample(fname1[i][j:k], 1)
j=k
ntrain=1200
nval=100
ntest=100
nfile=ntrain+nval+ntest
print([len(ii) for ii in fname])
fname_s=[random.sample(ii, nfile) for ii in fname]
for tg in range(ncls):
for i in range(nfile):
fname_s[tg][i]=[tg, path[tg]+fname_s[tg][i]]
trn_ff=[]
for tg in range(ncls):
trn_ff+=fname_s[tg]
random.shuffle(trn_ff)
x_t=[(sp_wr.read(ii[1])[1])/1000.0 for ii in trn_ff]
y_t=np.array([ii[0] for ii in trn_ff])
lgstd,dspl = 16000,2
for ii in range(ncls*nfile):
lg=len(x_t[ii])
if(lg<lgstd):
x_t[ii]=np.concatenate([x_t[ii]-np.mean(x_t[ii]),np.zeros(lgstd-lg)])
elif(lg==lgstd):
x_t[ii]=x_t[ii]-np.mean(x_t[ii])
else:
print("Warning: wav tool long!")
x_t[ii]=x_t[ii][np.arange(0,lgstd,dspl)] #downsample
stdd=np.std(x_t[ii])
if(stdd<1e-3):
print("Warning: small std!")
x_t[ii]=x_t[ii]/stdd
x_t=np.vstack(x_t)
lgstd=lgstd//dspl
x_train_t=(x_t[:ncls*ntrain]).reshape([ncls*ntrain,lgstd,1])
x_val_t=(x_t[ncls*ntrain:ncls*(ntrain+nval)]).reshape([ncls*nval,lgstd,1])
x_test_t=(x_t[ncls*(ntrain+nval):]).reshape([ncls*ntest,lgstd,1])
y_train_t=(y_t[:ncls*ntrain])
y_val_t=(y_t[ncls*ntrain:ncls*(ntrain+nval)])
y_test_t=(y_t[ncls*(ntrain+nval):])
return (x_train_t,y_train_t),(x_val_t,y_val_t),(x_test_t,y_test_t)
(x_train,y_train),(x_val,y_val),(x_test,y_test) = load_data()
np.savez("Dspeech-command/numbers.npz",x_train=x_train,y_train=y_train,x_val=x_val,y_val=y_val,x_test=x_test,y_test=y_test) | [
"numpy.mean",
"numpy.savez",
"os.listdir",
"random.sample",
"random.shuffle",
"os.path.join",
"random.seed",
"numpy.array",
"numpy.zeros",
"scipy.io.wavfile.read",
"numpy.vstack",
"numpy.std",
"numpy.arange"
] | [((84, 98), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (95, 98), False, 'import random\n'), ((2423, 2556), 'numpy.savez', 'np.savez', (['"""Dspeech-command/numbers.npz"""'], {'x_train': 'x_train', 'y_train': 'y_train', 'x_val': 'x_val', 'y_val': 'y_val', 'x_test': 'x_test', 'y_test': 'y_test'}), "('Dspeech-command/numbers.npz', x_train=x_train, y_train=y_train,\n x_val=x_val, y_val=y_val, x_test=x_test, y_test=y_test)\n", (2431, 2556), True, 'import numpy as np\n'), ((138, 169), 'os.path.join', 'os.path.join', (['"""speech-command/"""'], {}), "('speech-command/')\n", (150, 169), False, 'import os\n'), ((1240, 1262), 'random.shuffle', 'random.shuffle', (['trn_ff'], {}), '(trn_ff)\n', (1254, 1262), False, 'import random\n'), ((1332, 1366), 'numpy.array', 'np.array', (['[ii[0] for ii in trn_ff]'], {}), '([ii[0] for ii in trn_ff])\n', (1340, 1366), True, 'import numpy as np\n'), ((1891, 1905), 'numpy.vstack', 'np.vstack', (['x_t'], {}), '(x_t)\n', (1900, 1905), True, 'import numpy as np\n'), ((539, 553), 'os.listdir', 'os.listdir', (['ii'], {}), '(ii)\n', (549, 553), False, 'import os\n'), ((1003, 1027), 'random.sample', 'random.sample', (['ii', 'nfile'], {}), '(ii, nfile)\n', (1016, 1027), False, 'import random\n'), ((1770, 1785), 'numpy.std', 'np.std', (['x_t[ii]'], {}), '(x_t[ii])\n', (1776, 1785), True, 'import numpy as np\n'), ((822, 854), 'random.sample', 'random.sample', (['fname1[i][j:k]', '(1)'], {}), '(fname1[i][j:k], 1)\n', (835, 854), False, 'import random\n'), ((1718, 1743), 'numpy.arange', 'np.arange', (['(0)', 'lgstd', 'dspl'], {}), '(0, lgstd, dspl)\n', (1727, 1743), True, 'import numpy as np\n'), ((1276, 1293), 'scipy.io.wavfile.read', 'sp_wr.read', (['ii[1]'], {}), '(ii[1])\n', (1286, 1293), True, 'import scipy.io.wavfile as sp_wr\n'), ((1539, 1559), 'numpy.zeros', 'np.zeros', (['(lgstd - lg)'], {}), '(lgstd - lg)\n', (1547, 1559), True, 'import numpy as np\n'), ((1615, 1631), 'numpy.mean', 'np.mean', (['x_t[ii]'], {}), '(x_t[ii])\n', (1622, 1631), True, 'import numpy as np\n'), ((1522, 1538), 'numpy.mean', 'np.mean', (['x_t[ii]'], {}), '(x_t[ii])\n', (1529, 1538), True, 'import numpy as np\n')] |
''' a set of utilities to create matrix representations (Laplacians) of graphs
'''
import numpy as np
def complete_gl(n):
"""
return the Laplacian of a complete graph (all nodes are connected to all
edges)
Parameters
----------
n : int
number of nodes in the graph
Examples
--------
>>> from robotarium.graph import complete_gl
>>> complete_gl(4)
array([[ 3., -1., -1., -1.],
[-1., 3., -1., -1.],
[-1., -1., 3., -1.],
[-1., -1., -1., 3.]])
"""
return n * np.eye(n) - np.ones((n, n))
def cycle_gl(n):
"""
return the Laplacian of a cycle graph (The order is assumed to be
1->2->3->...->n)
Parameters
----------
n : int
number of nodes in the graph
Examples
--------
>>> from robotarium.graph import cycle_gl
>>> cycle_gl(4)
array([[ 2., -1., 0., -1.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[-1., 0., -1., 2.]])
"""
laplacian = 2 * np.eye(n) - np.diag([1] * (n-1), 1) - \
np.diag([1] * (n-1), -1)
laplacian[n-1, 0] = -1
laplacian[0, n-1] = -1
return laplacian
def line_gl(n):
"""
return the Laplacian of a line graph
Parameters
----------
n : int
number of nodes in the graph
Examples
--------
>>> from robotarium.graph import line_gl
>>> line_gl(4)
array([[ 1., -1., 0., 0.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[ 0., 0., -1., 1.]])
"""
laplacian = 2 * np.eye(n) - np.diag(np.ones(n-1), 1) - \
np.diag(np.ones(n-1), -1)
laplacian[0, 0] = 1
laplacian[n-1, n-1] = 1
return laplacian
def random_connected_gl(v, e):
"""
Outputs a randomly generated, undirected, connected graph.
Laplacian with v - 1 + e edges
Parameters
----------
v : int
number of nodes
e : int
number of edges
Examples
--------
"""
laplacian = np.zeros((v, v))
for i in range(1, v):
edge = np.random.randint(i)
# Update adjacency relations.
laplacian[i, edge] = -1
laplacian[edge, i] = -1
# Update node degrees
laplacian[i, i] += 1
laplacian[edge, edge] += 1
# This works because all nodes have at least 1 degree. Choose from only
# upper diagonal portion.
temp = np.where(np.triu(laplacian).reshape(v*v) == 1)
pot_edges = temp[0]
sz = laplacian.shape
# num_edges = min(e, len(pot_edges))
num_edges = np.where(e <= len(pot_edges), e, len(pot_edges))
if num_edges <= 0:
return
# Indices of randomly chosen extra edges.
temp = np.random.permutation(len(pot_edges))
edge_indices = temp[0:num_edges]
i, j = ind_to_sub(sz, pot_edges[edge_indices])
# Update adjacency relation
laplacian[i, j] = -1
laplacian[j, i] = -1
# Update degree relation
laplacian[i, i] += 1
laplacian[j, j] += 1
return laplacian
def random_gl(v, e):
"""
Outputs a randomly generated, undirected, connected graph Laplacian with
'n' nodes.
Parameters
----------
v : SOMETHING
SOMETHING
e : SOMETHING
SOMETHING
"""
laplacian = np.tril(np.ones((v, v)))
# This works because I can't select diagonals
temp = np.where(np.triu(laplacian).reshape(v*v) == 0)
pot_edges = temp[0]
sz = laplacian.shape
# Rest to zeros
laplacian = np.zeros((v, v))
num_edges = np.where(e <= len(pot_edges), e, len(pot_edges))
# Indices of randomly chosen extra edges.
temp = np.random.permutation(len(pot_edges))
edge_indices = temp[0:num_edges]
i, j = ind_to_sub(sz, pot_edges[edge_indices])
# Update adjacency relation
laplacian[i, j] = -1
laplacian[j, i] = -1
# Update degree relation
laplacian[i, i] += 1
laplacian[j, j] += 1
return laplacian
def ind_to_sub(siz, ind):
"""
Subscripts from linear index.
This is a python formulation of the function ind2sub().
The original function can be found here:
https://www.mathworks.com/help/matlab/ref/ind2sub.html
The function provided below is a modification of a function provided here:
https://stackoverflow.com/questions/28995146/matlab-ind2sub-equivalent-in-python
The subtraction by one in the 'rows' variable is to keep index changes
consistent with a 0 index start compared to MATLAB's 1 start.
Parameters
----------
siz : int tuple
contains the size of the matrix that is passed through.
ind : np.ndarray
the matrix that the linear index subscripts will be derived.
Returns
-------
rows : np.ndarray
vector containing the equivalent row subscripts corresponding to each
linear index from the original matrix ind.
columns : np.ndarray
vector containing the equivalent column subscripts corresponding to each
linear index from the original matrix ind.
"""
ind[ind < 0] = -1
ind[ind >= siz[0] * siz[1]] = -1
rows = np.asarray((np.ceil(ind.astype('int') / siz[0]) - 1), dtype=int)
columns = (ind % siz[1])
return rows, columns
| [
"numpy.eye",
"numpy.ones",
"numpy.diag",
"numpy.zeros",
"numpy.random.randint",
"numpy.triu"
] | [((2031, 2047), 'numpy.zeros', 'np.zeros', (['(v, v)'], {}), '((v, v))\n', (2039, 2047), True, 'import numpy as np\n'), ((3510, 3526), 'numpy.zeros', 'np.zeros', (['(v, v)'], {}), '((v, v))\n', (3518, 3526), True, 'import numpy as np\n'), ((568, 583), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (575, 583), True, 'import numpy as np\n'), ((1086, 1112), 'numpy.diag', 'np.diag', (['([1] * (n - 1))', '(-1)'], {}), '([1] * (n - 1), -1)\n', (1093, 1112), True, 'import numpy as np\n'), ((2090, 2110), 'numpy.random.randint', 'np.random.randint', (['i'], {}), '(i)\n', (2107, 2110), True, 'import numpy as np\n'), ((3298, 3313), 'numpy.ones', 'np.ones', (['(v, v)'], {}), '((v, v))\n', (3305, 3313), True, 'import numpy as np\n'), ((556, 565), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (562, 565), True, 'import numpy as np\n'), ((1050, 1075), 'numpy.diag', 'np.diag', (['([1] * (n - 1))', '(1)'], {}), '([1] * (n - 1), 1)\n', (1057, 1075), True, 'import numpy as np\n'), ((1643, 1657), 'numpy.ones', 'np.ones', (['(n - 1)'], {}), '(n - 1)\n', (1650, 1657), True, 'import numpy as np\n'), ((1038, 1047), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (1044, 1047), True, 'import numpy as np\n'), ((1586, 1595), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (1592, 1595), True, 'import numpy as np\n'), ((1606, 1620), 'numpy.ones', 'np.ones', (['(n - 1)'], {}), '(n - 1)\n', (1613, 1620), True, 'import numpy as np\n'), ((2436, 2454), 'numpy.triu', 'np.triu', (['laplacian'], {}), '(laplacian)\n', (2443, 2454), True, 'import numpy as np\n'), ((3386, 3404), 'numpy.triu', 'np.triu', (['laplacian'], {}), '(laplacian)\n', (3393, 3404), True, 'import numpy as np\n')] |
# flake8: noqa
# Copyright (c) 2017 <NAME>
# Copyright (c) 2016-2017 The pytsrepr Developers
# <https://github.com/holgern/pytsrepr>
# See LICENSE for license details.
from __future__ import division, print_function, absolute_import
from .paa import *
from . import data
from pytsrepr.version import version as __version__
from numpy.testing import Tester
__all__ = [s for s in dir() if not s.startswith('_')]
try:
# In Python 2.x the name of the tempvar leaks out of the list
# comprehension. Delete it to not make it show up in the main namespace.
del s
except NameError:
pass
test = Tester().test
| [
"numpy.testing.Tester"
] | [((635, 643), 'numpy.testing.Tester', 'Tester', ([], {}), '()\n', (641, 643), False, 'from numpy.testing import Tester\n')] |
# --------------
# Using Numpy
print('Using Numpy')
import numpy as np
# Not every data format will be in csv there are other file formats also.
# This exercise will help you deal with other file formats and how toa read it.
data_ipl = np.genfromtxt(path, delimiter=',',dtype='str' ,skip_header=True)
# How many matches were held in total we need to know so that we can analyze further statistics keeping that in mind.
print('How many matches were held in total',len(set(data_ipl[:,0])))
# this exercise deals with you getting to know that which are all those six teams that played in the tournament.
team_1 = set(data_ipl[:,3])
team_2 = set(data_ipl[:,4])
print('six teams',team_1.union(team_2))
# An exercise to make you familiar with indexing and slicing up within data.
print('sum of all extras in all deliveries in all matches in the dataset',data_ipl[:,17].astype(float).sum())
print('Total number of exta',len(data_ipl[data_ipl[:,17].astype(float)>0]))
# Get the array of all delivery numbers when a given player got out. Also mention the wicket type.
given_batsman = '<NAME>'
out_deliveries = data_ipl[data_ipl[:,-3]==given_batsman][:,[11,-2]]
print('array of all delivery numbers when a given player got out',out_deliveries)
# this exercise will help you get the statistics on one particular team
given_team = 'Mumbai Indians'
print('matches the team Mumbai Indians has won the toss',len(set(data_ipl[data_ipl[:,5]==given_team][:,0])))
# An exercise to know who is the most aggresive player or maybe the scoring player
records_with_six_runs = data_ipl[data_ipl[:,-7].astype(int)==6]
from collections import Counter
sixes_scorers_count = Counter(records_with_six_runs[:,-10])
print('who has scored the maximum no. of sixes overall')
print(max(sixes_scorers_count,key=sixes_scorers_count.get))
print('Using Pandas')
# Using Pandas
# Not every data format will be in csv there are other file formats also.
# This exercise will help you deal with other file formats and how toa read it.
import pandas as pd
df = pd.read_csv(path)
# How many matches were held in total we need to know so that we can analyze further statistics keeping that in mind.
print('How many matches were held in total',len(df['match_code'].unique()))
# this exercise deals with you getting to know that which are all those six teams that played in the tournament.
teams = df['team1'].append(df['team2']).unique()
print('six teams',teams)
# An exercise to make you familiar with indexing and slicing up within data.
print('sum of all extras in all deliveries in all matches in the dataset',sum(df['extras']))
# Get the array of all delivery numbers when a given player got out. Also mention the wicket type.
given_batsman = '<NAME>'
out_deliveries = df[df['player_out']==given_batsman][['inning','delivery','wicket_kind']]
print('array of all delivery numbers when a given player got out',out_deliveries)
# this exercise will help you get the statistics on one particular team
print('matches the team Mumbai Indians has won the toss',len(df[df['toss_winner']=='Mumbai Indians']['match_code'].unique()))
# An exercise to know who is the most aggresive player or maybe the scoring player
records_with_six_runs = df[df['runs']==6]
sixes_scorers_count = records_with_six_runs['batsman'].value_counts()
print('who has scored the maximum no. of sixes overall')
print(sixes_scorers_count[sixes_scorers_count==sixes_scorers_count.max()])
| [
"collections.Counter",
"numpy.genfromtxt",
"pandas.read_csv"
] | [((237, 302), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'delimiter': '""","""', 'dtype': '"""str"""', 'skip_header': '(True)'}), "(path, delimiter=',', dtype='str', skip_header=True)\n", (250, 302), True, 'import numpy as np\n'), ((1654, 1692), 'collections.Counter', 'Counter', (['records_with_six_runs[:, -10]'], {}), '(records_with_six_runs[:, -10])\n', (1661, 1692), False, 'from collections import Counter\n'), ((2030, 2047), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (2041, 2047), True, 'import pandas as pd\n')] |
"""
Tests for inequality.py
"""
import numpy as np
from numpy.testing import assert_allclose, assert_raises
from scipy.stats import linregress
from quantecon import lorenz_curve, gini_coefficient, \
shorrocks_index, rank_size
def test_lorenz_curve():
"""
Tests `lorenz` function, which calculates the lorenz curve
An income distribution where everyone has almost the same wealth should
be similar to a straight line
An income distribution where one person has almost the wealth should
be flat and then shoot straight up when it approaches one
"""
n = 3000
# Almost Equal distribution
y = np.repeat(1, n) + np.random.normal(scale=0.0001, size=n)
cum_people, cum_income = lorenz_curve(y)
assert_allclose(cum_people, cum_income, rtol=1e-03)
# Very uneven distribution
y = np.repeat(0.001, n)
y[4] = 100000
pop_cum, income_cum = lorenz_curve(y)
expected_income_cum = np.repeat(0., n + 1)
expected_income_cum[-1] = 1.
assert_allclose(expected_income_cum, income_cum, atol=1e-4)
def test_gini_coeff():
"""
Tests how the function `gini_coefficient` calculates the Gini coefficient
with the Pareto and the Weibull distribution.
Analytically, we know that Pareto with parameter `a` has
G = 1 / (2*a - 1)
Likewise, for the Weibull distribution with parameter `a` we know that
G = 1 - 2**(-1/a)
"""
n = 10000
# Tests Pareto: G = 1 / (2*a - 1)
a = np.random.randint(2, 15)
expected = 1 / (2 * a - 1)
y = (np.random.pareto(a, size=n) + 1) * 2
coeff = gini_coefficient(y)
assert_allclose(expected, coeff, rtol=1e-01)
# Tests Weibull: G = 1 - 2**(-1/a)
a = np.random.randint(2, 15)
expected = 1 - 2 ** (-1 / a)
y = np.random.weibull(a, size=n)
coeff = gini_coefficient(y)
assert_allclose(expected, coeff, rtol=1e-01)
def test_shorrocks_index():
"""
Test Shorrocks mobility index function against the example used in 'Wealth
distribution and social mobility in the US: A quantitative approach'
(Benhabib, <NAME>, 2017).''
https://www.econ.nyu.edu/user/bisina/RevisionAugust.pdf
"""
# Construct the mobility matrix from Benhabib et al.
P = [[0.222, 0.222, 0.215, 0.187, 0.081, 0.038, 0.029, 0.006],
[0.221, 0.220, 0.215, 0.188, 0.082, 0.039, 0.029, 0.006],
[0.207, 0.209, 0.210, 0.194, 0.090, 0.046, 0.036, 0.008],
[0.198, 0.201, 0.207, 0.198, 0.095, 0.052, 0.040, 0.009],
[0.175, 0.178, 0.197, 0.207, 0.110, 0.067, 0.054, 0.012],
[0.182, 0.184, 0.200, 0.205, 0.106, 0.062, 0.050, 0.011],
[0.123, 0.125, 0.166, 0.216, 0.141, 0.114, 0.094, 0.021],
[0.084, 0.084, 0.142, 0.228, 0.170, 0.143, 0.121, 0.028]]
expected = 0.98 # result from paper
index = shorrocks_index(P)
assert_allclose(expected, index, rtol=1e-2)
def test_rank_size():
"""
Tests `rank_size` function, which generates rank-size data for
a Pareto distribution.
The rank-size plot for a sample drawn from a Pareto distribution
should be a straight line.
The length of the `rank_data` array should be within (c x 100)%
of the size of the distribution.
"""
np.random.seed(15)
sample_size = 10000
c = 0.74
# Tests Pareto; r_squared ~ 1
pareto_draw = np.exp(np.random.exponential(scale=1.0, size=sample_size))
rank_data, size_data = rank_size(pareto_draw, c=c)
assert len(rank_data) == len(size_data)
assert_allclose(c*sample_size, len(rank_data), rtol=1e-3)
_, _, r_value, _, _ = linregress(np.log(rank_data), np.log(size_data))
r_sqval = r_value**2
assert_allclose(r_sqval, 1, rtol=1e-3)
# Tests Exponential; r_squared < 1
z = np.random.randn(sample_size)
exp_draw = np.exp(z)
rank_data_exp, size_data_exp = rank_size(exp_draw, c=c)
_, _, r_value_exp, _, _ = linregress(np.log(rank_data_exp),
np.log(size_data_exp))
r_sqval_exp = r_value_exp**2
assert_raises(AssertionError, assert_allclose, r_sqval_exp, 1, rtol=1e-3)
| [
"numpy.random.normal",
"quantecon.lorenz_curve",
"quantecon.shorrocks_index",
"numpy.repeat",
"numpy.testing.assert_allclose",
"numpy.log",
"numpy.testing.assert_raises",
"numpy.random.exponential",
"numpy.random.pareto",
"numpy.exp",
"numpy.random.randint",
"numpy.random.weibull",
"numpy.ra... | [((727, 742), 'quantecon.lorenz_curve', 'lorenz_curve', (['y'], {}), '(y)\n', (739, 742), False, 'from quantecon import lorenz_curve, gini_coefficient, shorrocks_index, rank_size\n'), ((747, 798), 'numpy.testing.assert_allclose', 'assert_allclose', (['cum_people', 'cum_income'], {'rtol': '(0.001)'}), '(cum_people, cum_income, rtol=0.001)\n', (762, 798), False, 'from numpy.testing import assert_allclose, assert_raises\n'), ((839, 858), 'numpy.repeat', 'np.repeat', (['(0.001)', 'n'], {}), '(0.001, n)\n', (848, 858), True, 'import numpy as np\n'), ((903, 918), 'quantecon.lorenz_curve', 'lorenz_curve', (['y'], {}), '(y)\n', (915, 918), False, 'from quantecon import lorenz_curve, gini_coefficient, shorrocks_index, rank_size\n'), ((945, 966), 'numpy.repeat', 'np.repeat', (['(0.0)', '(n + 1)'], {}), '(0.0, n + 1)\n', (954, 966), True, 'import numpy as np\n'), ((1003, 1064), 'numpy.testing.assert_allclose', 'assert_allclose', (['expected_income_cum', 'income_cum'], {'atol': '(0.0001)'}), '(expected_income_cum, income_cum, atol=0.0001)\n', (1018, 1064), False, 'from numpy.testing import assert_allclose, assert_raises\n'), ((1476, 1500), 'numpy.random.randint', 'np.random.randint', (['(2)', '(15)'], {}), '(2, 15)\n', (1493, 1500), True, 'import numpy as np\n'), ((1591, 1610), 'quantecon.gini_coefficient', 'gini_coefficient', (['y'], {}), '(y)\n', (1607, 1610), False, 'from quantecon import lorenz_curve, gini_coefficient, shorrocks_index, rank_size\n'), ((1615, 1657), 'numpy.testing.assert_allclose', 'assert_allclose', (['expected', 'coeff'], {'rtol': '(0.1)'}), '(expected, coeff, rtol=0.1)\n', (1630, 1657), False, 'from numpy.testing import assert_allclose, assert_raises\n'), ((1708, 1732), 'numpy.random.randint', 'np.random.randint', (['(2)', '(15)'], {}), '(2, 15)\n', (1725, 1732), True, 'import numpy as np\n'), ((1775, 1803), 'numpy.random.weibull', 'np.random.weibull', (['a'], {'size': 'n'}), '(a, size=n)\n', (1792, 1803), True, 'import numpy as np\n'), ((1816, 1835), 'quantecon.gini_coefficient', 'gini_coefficient', (['y'], {}), '(y)\n', (1832, 1835), False, 'from quantecon import lorenz_curve, gini_coefficient, shorrocks_index, rank_size\n'), ((1840, 1882), 'numpy.testing.assert_allclose', 'assert_allclose', (['expected', 'coeff'], {'rtol': '(0.1)'}), '(expected, coeff, rtol=0.1)\n', (1855, 1882), False, 'from numpy.testing import assert_allclose, assert_raises\n'), ((2823, 2841), 'quantecon.shorrocks_index', 'shorrocks_index', (['P'], {}), '(P)\n', (2838, 2841), False, 'from quantecon import lorenz_curve, gini_coefficient, shorrocks_index, rank_size\n'), ((2846, 2889), 'numpy.testing.assert_allclose', 'assert_allclose', (['expected', 'index'], {'rtol': '(0.01)'}), '(expected, index, rtol=0.01)\n', (2861, 2889), False, 'from numpy.testing import assert_allclose, assert_raises\n'), ((3236, 3254), 'numpy.random.seed', 'np.random.seed', (['(15)'], {}), '(15)\n', (3250, 3254), True, 'import numpy as np\n'), ((3431, 3458), 'quantecon.rank_size', 'rank_size', (['pareto_draw'], {'c': 'c'}), '(pareto_draw, c=c)\n', (3440, 3458), False, 'from quantecon import lorenz_curve, gini_coefficient, shorrocks_index, rank_size\n'), ((3672, 3711), 'numpy.testing.assert_allclose', 'assert_allclose', (['r_sqval', '(1)'], {'rtol': '(0.001)'}), '(r_sqval, 1, rtol=0.001)\n', (3687, 3711), False, 'from numpy.testing import assert_allclose, assert_raises\n'), ((3759, 3787), 'numpy.random.randn', 'np.random.randn', (['sample_size'], {}), '(sample_size)\n', (3774, 3787), True, 'import numpy as np\n'), ((3804, 3813), 'numpy.exp', 'np.exp', (['z'], {}), '(z)\n', (3810, 3813), True, 'import numpy as np\n'), ((3849, 3873), 'quantecon.rank_size', 'rank_size', (['exp_draw'], {'c': 'c'}), '(exp_draw, c=c)\n', (3858, 3873), False, 'from quantecon import lorenz_curve, gini_coefficient, shorrocks_index, rank_size\n'), ((4041, 4115), 'numpy.testing.assert_raises', 'assert_raises', (['AssertionError', 'assert_allclose', 'r_sqval_exp', '(1)'], {'rtol': '(0.001)'}), '(AssertionError, assert_allclose, r_sqval_exp, 1, rtol=0.001)\n', (4054, 4115), False, 'from numpy.testing import assert_allclose, assert_raises\n'), ((641, 656), 'numpy.repeat', 'np.repeat', (['(1)', 'n'], {}), '(1, n)\n', (650, 656), True, 'import numpy as np\n'), ((659, 697), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.0001)', 'size': 'n'}), '(scale=0.0001, size=n)\n', (675, 697), True, 'import numpy as np\n'), ((3352, 3402), 'numpy.random.exponential', 'np.random.exponential', ([], {'scale': '(1.0)', 'size': 'sample_size'}), '(scale=1.0, size=sample_size)\n', (3373, 3402), True, 'import numpy as np\n'), ((3604, 3621), 'numpy.log', 'np.log', (['rank_data'], {}), '(rank_data)\n', (3610, 3621), True, 'import numpy as np\n'), ((3623, 3640), 'numpy.log', 'np.log', (['size_data'], {}), '(size_data)\n', (3629, 3640), True, 'import numpy as np\n'), ((3916, 3937), 'numpy.log', 'np.log', (['rank_data_exp'], {}), '(rank_data_exp)\n', (3922, 3937), True, 'import numpy as np\n'), ((3980, 4001), 'numpy.log', 'np.log', (['size_data_exp'], {}), '(size_data_exp)\n', (3986, 4001), True, 'import numpy as np\n'), ((1542, 1569), 'numpy.random.pareto', 'np.random.pareto', (['a'], {'size': 'n'}), '(a, size=n)\n', (1558, 1569), True, 'import numpy as np\n')] |
#!/usr/bin/env python2
import numpy as np
import cPickle
from numpy.linalg import norm
from numpy import exp, sqrt, sum, square
NUCLEAR_CHARGE = dict()
NUCLEAR_CHARGE["H"] = 1.0
NUCLEAR_CHARGE["C"] = 6.0
NUCLEAR_CHARGE["N"] = 7.0
NUCLEAR_CHARGE["O"] = 8.0
NUCLEAR_CHARGE["S"] = 16.0
COULOMB_MATRIX_SIZE = 23
HOF_DFTB3 = dict()
HOF_DFTB3["H"] = -172.3145
HOF_DFTB3["C"] = -906.4342
HOF_DFTB3["N"] = -1327.2991
HOF_DFTB3["O"] = -1936.6161
HOF_DFTB3["S"] = -1453.3907
class Molecule:
def __init__(self):
self.natoms = -1
self.energy = float("nan")
self.molid = -1
self.dftb3_energy = float("nan")
self.dftb3_hof = float("nan")
self.atomtypes = []
self.coordinates = []
def generate_coulomb_matrix(self):
self.coulomb_matrix = generate_coulomb_matrix(self.atomtypes, self.coordinates)
def get_lines(filename):
f = open(filename, "r")
lines = f.readlines()
f.close()
return lines
def parse_dft3_energy(molid):
filename = "logfiles/" + str(molid) + ".log"
f = open(filename, "r")
lines = f.readlines()
f.close()
energy = float("nan")
for line in lines:
if "Total Energy" in line:
tokens = line.split()
energy = float(tokens[2]) * 627.51
return energy
def parse_molecules(filename):
lines = get_lines(filename)
mols = []
mol = Molecule()
for line in lines:
tokens = line.split()
if len(tokens) == 1:
if mol.natoms > 0:
mols.append(mol)
mol = Molecule()
mol.natoms = int(tokens[0])
if len(tokens) == 2:
mol.molid = int(tokens[0])
mol.energy = float(tokens[1])
mol.dftb3_energy = parse_dft3_energy(mol.molid)
if len(tokens) == 7:
mol.atomtypes.append(tokens[0])
x = float(tokens[4])
y = float(tokens[5])
z = float(tokens[6])
mol.coordinates.append(np.array([x, y, z]))
mol.dftb3_hof = 0.0
mol.dftb3_hof += mol.dftb3_energy
for atom in ["H", "C", "N", "O", "S"]:
n = mol.atomtypes.count(atom)
mol.dftb3_hof -= n * HOF_DFTB3[atom]
# for mol in mols:
# print mol.molid, mol.energy, mol.dftb3_hof
return mols
def generate_coulomb_matrix(atomtypes, coordinates):
# Generate row norms for sorting
row_norms = []
for i, atomtype_i in enumerate(atomtypes):
row_norm = 0.0
for j, atomtype_j in enumerate(atomtypes):
if i == j:
row_norm += 0.5 * NUCLEAR_CHARGE[atomtype_i] ** 2.4
else:
row_norm += NUCLEAR_CHARGE[atomtype_i] * NUCLEAR_CHARGE[atomtype_j] \
/ np.linalg.norm(coordinates[i] - coordinates[j])
row_norms.append((row_norm, i))
# Sort by row norms
row_norms.sort(reverse=True)
sorted_atomtypes = []
sorted_coordinates = []
for row_norm in row_norms:
i = row_norm[1]
sorted_atomtypes.append(atomtypes[i])
sorted_coordinates.append(coordinates[i])
# Fill out
Mij = np.zeros((COULOMB_MATRIX_SIZE, COULOMB_MATRIX_SIZE))
for i, atomtype_i in enumerate(sorted_atomtypes):
for j, atomtype_j in enumerate(sorted_atomtypes):
if i == j:
Mij[i, j] = 0.5 * NUCLEAR_CHARGE[atomtype_i] ** 2.4
elif j > i:
continue
else:
Mij[i, j] = NUCLEAR_CHARGE[atomtype_i] * NUCLEAR_CHARGE[atomtype_j] \
/ np.linalg.norm(sorted_coordinates[i] - sorted_coordinates[j])
return Mij.flatten(COULOMB_MATRIX_SIZE**2)
def load_pickle(filename):
f = open(filename,"rb")
p = cPickle.load(f)
f.close()
return(p)
# def parse_dftb_energies(filename):
| [
"numpy.array",
"numpy.zeros",
"cPickle.load",
"numpy.linalg.norm"
] | [((3210, 3262), 'numpy.zeros', 'np.zeros', (['(COULOMB_MATRIX_SIZE, COULOMB_MATRIX_SIZE)'], {}), '((COULOMB_MATRIX_SIZE, COULOMB_MATRIX_SIZE))\n', (3218, 3262), True, 'import numpy as np\n'), ((3826, 3841), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (3838, 3841), False, 'import cPickle\n'), ((2014, 2033), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (2022, 2033), True, 'import numpy as np\n'), ((2825, 2872), 'numpy.linalg.norm', 'np.linalg.norm', (['(coordinates[i] - coordinates[j])'], {}), '(coordinates[i] - coordinates[j])\n', (2839, 2872), True, 'import numpy as np\n'), ((3651, 3712), 'numpy.linalg.norm', 'np.linalg.norm', (['(sorted_coordinates[i] - sorted_coordinates[j])'], {}), '(sorted_coordinates[i] - sorted_coordinates[j])\n', (3665, 3712), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
### importing OGB-LSC
from ogb.lsc import PCQM4MEvaluator
from ogb.lsc.pcqm4m_pyg import PygPCQM4MDataset
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import StepLR
from torch_geometric.data.dataloader import DataLoader
import os
import numpy as np
import random
import sys
sys.path.append('.')
from model import Net
from utils.config import process_config, get_args
from sklearn.model_selection import KFold
reg_criterion = torch.nn.L1Loss()
def train(model, device, loader, optimizer, config):
model.train()
loss_accum = 0
for batch in loader:
batch = batch.to(device)
pred = model(batch).view(-1, )
optimizer.zero_grad()
loss = reg_criterion(pred, batch.y)
loss.backward()
optimizer.step()
loss_accum += loss.detach().cpu().item()
return loss_accum / len(loader)
def eval(model, device, loader, evaluator):
model.eval()
y_true = []
y_pred = []
for batch in loader:
batch = batch.to(device)
with torch.no_grad():
pred = model(batch).view(-1,)
y_true.append(batch.y.view(pred.shape).detach().cpu())
y_pred.append(pred.detach().cpu())
y_true = torch.cat(y_true, dim=0)
y_pred = torch.cat(y_pred, dim=0)
input_dict = {"y_true": y_true, "y_pred": y_pred}
return evaluator.eval(input_dict)["mae"]
def test(model, device, loader):
model.eval()
y_pred = []
for batch in loader:
batch = batch.to(device)
with torch.no_grad():
pred = model(batch).view(-1,)
y_pred.append(pred.detach().cpu())
y_pred = torch.cat(y_pred, dim=0)
return y_pred
def main():
args = get_args()
config = process_config(args)
print(config)
if config.get('seed') is not None:
random.seed(config.seed)
torch.manual_seed(config.seed)
np.random.seed(config.seed)
torch.cuda.manual_seed(config.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(config.seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dataset = PygPCQM4MDataset(root='dataset/')
evaluator = PCQM4MEvaluator()
kf = KFold(n_splits=8, shuffle=True, random_state=10086)
split_idx = dataset.get_idx_split()
train_val = np.hstack([split_idx["train"], split_idx["valid"]])
train_vals = [i for i in kf.split(train_val)]
train_split = train_vals[config.fold][0].tolist()
valid_split = train_vals[config.fold][1].tolist()
dataset_train = dataset[train_split]
dataset_val = dataset[valid_split]
dataset_test = dataset
train_loader = DataLoader(dataset_train, batch_size=config.batch_size, shuffle=True,
num_workers=config.num_workers)
valid_loader = DataLoader(dataset_val, batch_size=config.batch_size, shuffle=False, num_workers = config.num_workers)
if config.get('save_test_dir') is not None:
test_loader = DataLoader(dataset_test, batch_size=config.batch_size, shuffle=False, num_workers = config.num_workers)
net = Net
model = net(config.architecture).to(device)
num_params = sum(p.numel() for p in model.parameters())
print(f'#Params: {num_params}')
optimizer = optim.Adam(model.parameters(), lr=config.learning_rate)
best_valid_mae = 1000
scheduler = StepLR(optimizer, step_size=config.step_size, gamma=config.decay_rate)
writer = SummaryWriter(log_dir=config.directory)
ts_algo_hp = str(config.time_stamp) + '_' \
+ str(config.commit_id[0:7]) + '_' \
+ str(config.architecture.exp_n) \
+ str(config.architecture.exp_nonlinear) \
+ str(config.architecture.exp_bn) \
+ str(config.architecture.pooling) \
+ str(config.architecture.JK) \
+ str(config.architecture.layers) + '_' \
+ str(config.architecture.hidden) + '_' \
+ str(config.architecture.dropout) + '_' \
+ str(config.learning_rate) + '_' \
+ str(config.step_size) + '_' \
+ str(config.decay_rate) + '_' \
+ 'B' + str(config.batch_size) \
+ 'S' + str(config.get('seed', 'na')) \
+ 'W' + str(config.get('num_workers', 'na'))\
+ 'f' + str(config.fold)
for epoch in range(1, config.epochs + 1):
print("=====Epoch {}".format(epoch))
print('Training...')
train_mae = train(model, device, train_loader, optimizer, config)
print('Evaluating...')
valid_mae = eval(model, device, valid_loader, evaluator)
print({'Train': train_mae, 'Validation': valid_mae})
lr = scheduler.optimizer.param_groups[0]['lr']
writer.add_scalars('pcqm4m', {ts_algo_hp + '/lr': lr}, epoch)
# writer.add_scalars('pcqm4m', {ts_algo_hp + '/te': test_error}, epoch)
writer.add_scalars('pcqm4m', {ts_algo_hp + '/ve': valid_mae}, epoch)
writer.add_scalars('pcqm4m', {ts_algo_hp + '/ls': train_mae}, epoch)
if valid_mae < best_valid_mae:
best_valid_mae = valid_mae
if config.get('checkpoint_dir') is not None:
print('Saving checkpoint...')
os.makedirs(config.get('checkpoint_dir'), exist_ok=True)
checkpoint = {'model_state_dict': model.state_dict()}
torch.save(checkpoint, os.path.join(config.get('checkpoint_dir'), 'pcqm4m_' + ts_algo_hp + '_checkpoint.pt'))
if config.get('save_test_dir') is not None:
print('Predicting on test data...')
y_pred = test(model, device, test_loader)
save_inference_logits(config.get('save_test_dir'), 'all_y_pred_pcqm4m' + ts_algo_hp, y_pred)
scheduler.step()
print(f'Best validation MAE so far: {best_valid_mae}')
writer.close()
def save_inference_logits(dir, file, y_pred):
print('Saving test submission file...')
os.makedirs(dir, exist_ok=True)
filename = os.path.join(dir, file)
assert (isinstance(filename, str))
assert (isinstance(y_pred, np.ndarray) or isinstance(y_pred, torch.Tensor))
# assert (y_pred.shape == (len(dataset),))
if isinstance(y_pred, torch.Tensor):
y_pred = y_pred.numpy()
y_pred = y_pred.astype(np.float32)
np.savez_compressed(filename, y_pred=y_pred)
# evaluator.save_test_submission({'y_pred': y_pred}, config.get('save_test_dir'))
if __name__ == "__main__":
main()
| [
"numpy.hstack",
"torch.nn.L1Loss",
"torch.cuda.is_available",
"sklearn.model_selection.KFold",
"ogb.lsc.pcqm4m_pyg.PygPCQM4MDataset",
"sys.path.append",
"torch.utils.tensorboard.SummaryWriter",
"torch_geometric.data.dataloader.DataLoader",
"ogb.lsc.PCQM4MEvaluator",
"utils.config.get_args",
"num... | [((428, 448), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (443, 448), False, 'import sys\n'), ((583, 600), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (598, 600), False, 'import torch\n'), ((1350, 1374), 'torch.cat', 'torch.cat', (['y_true'], {'dim': '(0)'}), '(y_true, dim=0)\n', (1359, 1374), False, 'import torch\n'), ((1388, 1412), 'torch.cat', 'torch.cat', (['y_pred'], {'dim': '(0)'}), '(y_pred, dim=0)\n', (1397, 1412), False, 'import torch\n'), ((1772, 1796), 'torch.cat', 'torch.cat', (['y_pred'], {'dim': '(0)'}), '(y_pred, dim=0)\n', (1781, 1796), False, 'import torch\n'), ((1841, 1851), 'utils.config.get_args', 'get_args', ([], {}), '()\n', (1849, 1851), False, 'from utils.config import process_config, get_args\n'), ((1865, 1885), 'utils.config.process_config', 'process_config', (['args'], {}), '(args)\n', (1879, 1885), False, 'from utils.config import process_config, get_args\n'), ((2276, 2309), 'ogb.lsc.pcqm4m_pyg.PygPCQM4MDataset', 'PygPCQM4MDataset', ([], {'root': '"""dataset/"""'}), "(root='dataset/')\n", (2292, 2309), False, 'from ogb.lsc.pcqm4m_pyg import PygPCQM4MDataset\n'), ((2327, 2344), 'ogb.lsc.PCQM4MEvaluator', 'PCQM4MEvaluator', ([], {}), '()\n', (2342, 2344), False, 'from ogb.lsc import PCQM4MEvaluator\n'), ((2355, 2406), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(8)', 'shuffle': '(True)', 'random_state': '(10086)'}), '(n_splits=8, shuffle=True, random_state=10086)\n', (2360, 2406), False, 'from sklearn.model_selection import KFold\n'), ((2463, 2514), 'numpy.hstack', 'np.hstack', (["[split_idx['train'], split_idx['valid']]"], {}), "([split_idx['train'], split_idx['valid']])\n", (2472, 2514), True, 'import numpy as np\n'), ((2801, 2906), 'torch_geometric.data.dataloader.DataLoader', 'DataLoader', (['dataset_train'], {'batch_size': 'config.batch_size', 'shuffle': '(True)', 'num_workers': 'config.num_workers'}), '(dataset_train, batch_size=config.batch_size, shuffle=True,\n num_workers=config.num_workers)\n', (2811, 2906), False, 'from torch_geometric.data.dataloader import DataLoader\n'), ((2953, 3057), 'torch_geometric.data.dataloader.DataLoader', 'DataLoader', (['dataset_val'], {'batch_size': 'config.batch_size', 'shuffle': '(False)', 'num_workers': 'config.num_workers'}), '(dataset_val, batch_size=config.batch_size, shuffle=False,\n num_workers=config.num_workers)\n', (2963, 3057), False, 'from torch_geometric.data.dataloader import DataLoader\n'), ((3508, 3578), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (['optimizer'], {'step_size': 'config.step_size', 'gamma': 'config.decay_rate'}), '(optimizer, step_size=config.step_size, gamma=config.decay_rate)\n', (3514, 3578), False, 'from torch.optim.lr_scheduler import StepLR\n'), ((3593, 3632), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'config.directory'}), '(log_dir=config.directory)\n', (3606, 3632), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((6193, 6224), 'os.makedirs', 'os.makedirs', (['dir'], {'exist_ok': '(True)'}), '(dir, exist_ok=True)\n', (6204, 6224), False, 'import os\n'), ((6240, 6263), 'os.path.join', 'os.path.join', (['dir', 'file'], {}), '(dir, file)\n', (6252, 6263), False, 'import os\n'), ((6547, 6591), 'numpy.savez_compressed', 'np.savez_compressed', (['filename'], {'y_pred': 'y_pred'}), '(filename, y_pred=y_pred)\n', (6566, 6591), True, 'import numpy as np\n'), ((1952, 1976), 'random.seed', 'random.seed', (['config.seed'], {}), '(config.seed)\n', (1963, 1976), False, 'import random\n'), ((1985, 2015), 'torch.manual_seed', 'torch.manual_seed', (['config.seed'], {}), '(config.seed)\n', (2002, 2015), False, 'import torch\n'), ((2024, 2051), 'numpy.random.seed', 'np.random.seed', (['config.seed'], {}), '(config.seed)\n', (2038, 2051), True, 'import numpy as np\n'), ((2060, 2095), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['config.seed'], {}), '(config.seed)\n', (2082, 2095), False, 'import torch\n'), ((2107, 2132), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2130, 2132), False, 'import torch\n'), ((3127, 3232), 'torch_geometric.data.dataloader.DataLoader', 'DataLoader', (['dataset_test'], {'batch_size': 'config.batch_size', 'shuffle': '(False)', 'num_workers': 'config.num_workers'}), '(dataset_test, batch_size=config.batch_size, shuffle=False,\n num_workers=config.num_workers)\n', (3137, 3232), False, 'from torch_geometric.data.dataloader import DataLoader\n'), ((1170, 1185), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1183, 1185), False, 'import torch\n'), ((1655, 1670), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1668, 1670), False, 'import torch\n'), ((2146, 2185), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['config.seed'], {}), '(config.seed)\n', (2172, 2185), False, 'import torch\n'), ((2223, 2248), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2246, 2248), False, 'import torch\n')] |
import json
import os
import os.path
import cv2
import numpy as np
import torch
import torch.utils.data as data_utl
from tqdm import tqdm
from dataset.vidor import VidOR
from frames import extract_all_frames
def video_to_tensor(pic):
"""Convert a ``numpy.ndarray`` to tensor.
Converts a numpy.ndarray (T x H x W x C)
to a torch.FloatTensor of shape (C x T x H x W)
Args:
pic (numpy.ndarray): Video to be converted to tensor.
Returns:
Tensor: Converted video.
"""
return torch.from_numpy(pic.transpose([3, 0, 1, 2]))
def load_rgb_frames(video_path, image_dir, begin, end, extract_frames=False):
"""
:param video_path: if u need 2 extract frames, but b careful, this setting needs a long time!
:param image_dir: This is image dir, but not same with extract frames func
:param begin:
:param end:
:param extract_frames:
:return:
"""
frames = []
video_path_splits = video_path.split('/')
image_dir_path = os.path.join(image_dir, video_path_splits[-2], video_path_splits[-1][:-4])
if extract_frames:
# Be careful! This step will take a long time!
extract_all_frames(video_path, image_dir_path)
for i in range(begin, end):
img_path = os.path.join(image_dir_path, str(i).zfill(6) + '.jpg')
if os.path.exists(img_path):
img = cv2.imread(img_path)[:, :, [2, 1, 0]]
w, h, c = img.shape
if w < 226 or h < 226:
d = 226. - min(w, h)
sc = 1 + d / min(w, h)
img = cv2.resize(img, dsize=(0, 0), fx=sc, fy=sc)
img = (img / 255.) * 2 - 1
frames.append(img)
else:
if len(frames) >= 1:
frames.append(frames[-1])
# final relength the frames list
for miss_frame in range(end - begin - len(frames)):
frames.insert(0, frames[0])
return np.asarray(frames, dtype=np.float32)
def load_flow_frames(image_dir, vid, start, num):
frames = []
for i in range(start, start + num):
imgx = cv2.imread(os.path.join(image_dir, vid, vid + '-' + str(i).zfill(6) + 'x.jpg'), cv2.IMREAD_GRAYSCALE)
imgy = cv2.imread(os.path.join(image_dir, vid, vid + '-' + str(i).zfill(6) + 'y.jpg'), cv2.IMREAD_GRAYSCALE)
w, h = imgx.shape
if w < 224 or h < 224:
d = 224. - min(w, h)
sc = 1 + d / min(w, h)
imgx = cv2.resize(imgx, dsize=(0, 0), fx=sc, fy=sc)
imgy = cv2.resize(imgy, dsize=(0, 0), fx=sc, fy=sc)
imgx = (imgx / 255.) * 2 - 1
imgy = (imgy / 255.) * 2 - 1
img = np.asarray([imgx, imgy]).transpose([1, 2, 0])
frames.append(img)
return np.asarray(frames, dtype=np.float32)
def make_vidor_dataset(anno_rpath, splits, video_rpath, task, low_memory=True):
vidor_dataset = VidOR(anno_rpath, video_rpath, splits, low_memory)
if task not in ['object', 'action', 'relation']:
print(task, "is not supported! ")
exit()
vidor_dataset_list = []
if task == 'action':
with open('actions.json', 'r') as action_f:
actions = json.load(action_f)['actions']
for each_split in splits:
print('Preparing: ', each_split)
get_index_list = vidor_dataset.get_index(each_split)
pbar = tqdm(total=len(get_index_list))
for ind in get_index_list:
for each_ins in vidor_dataset.get_action_insts(ind):
video_path = vidor_dataset.get_video_path(ind)
start_f, end_f = each_ins['duration']
label = np.full((1, end_f - start_f), actions.index(each_ins['category']))
vidor_dataset_list.append((video_path, label, start_f, end_f))
pbar.update(1)
pbar.close()
return vidor_dataset_list
class VidorPytorchTrain(data_utl.Dataset):
def __init__(self, anno_rpath, splits, video_rpath,
frames_rpath, mode, save_dir, task='action',
transforms=None, low_memory=True):
self.data = make_vidor_dataset(
anno_rpath=anno_rpath,
splits=splits,
video_rpath=video_rpath,
task=task,
low_memory=low_memory)
self.frames_rpath = frames_rpath
self.transforms = transforms
self.mode = mode
self.task = task
self.save_dir = save_dir
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
video_path, label, start_f, end_f = self.data[index]
vid_paths = video_path.split('/')
img_dir_path = os.path.join(self.frames_rpath, vid_paths[-2], vid_paths[-1][:-4])
if os.path.exists(img_dir_path):
if self.mode == 'rgb':
imgs = load_rgb_frames(video_path=video_path,
image_dir=self.frames_rpath,
begin=start_f,
end=end_f)
else:
# imgs = load_flow_frames(self.root, vid, start_f, 64)
print('not supported')
# label = label[:, start_f: end_f]
imgs = self.transforms(imgs)
# return video_to_tensor(imgs), 0 # correct
# return 0, torch.from_numpy(label) # runtimeError sizes must be non-negative
return video_to_tensor(imgs), torch.from_numpy(label)
return 0, 0
def __len__(self):
return len(self.data)
class VidorPytorchExtract(data_utl.Dataset):
def __init__(self, anno_rpath, save_dir, splits,
video_rpath, frames_rpath, mode, task='action',
transforms=None, low_memory=True):
self.data = make_vidor_dataset(
anno_rpath=anno_rpath,
splits=splits,
video_rpath=video_rpath,
task=task,
low_memory=low_memory)
self.frames_rpath = frames_rpath
self.splits = splits
self.transforms = transforms
self.mode = mode
self.save_dir = save_dir
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
video_path, label, start_f, end_f = self.data[index]
vid_paths = video_path.split('/')
img_dir_path = os.path.join(self.frames_rpath, vid_paths[-2], vid_paths[-1][:-4])
if os.path.exists(img_dir_path + '.npy'):
return 0, 0, vid_paths[-2], vid_paths[-1][:-4]
if os.path.exists(img_dir_path):
if self.mode == 'rgb':
imgs = load_rgb_frames(video_path=video_path,
image_dir=self.frames_rpath,
begin=start_f,
end=end_f)
else:
# imgs = load_flow_frames(self.root, vid, start_f, 64)
print('not supported')
imgs = self.transforms(imgs)
return video_to_tensor(imgs), torch.from_numpy(label), vid_paths[-2], vid_paths[-1][:-4]
return -1, -1, vid_paths[-2], vid_paths[-1][:-4]
def __len__(self):
return len(self.data)
if __name__ == '__main__':
import videotransforms
from torchvision import transforms
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-where', type=str, default="local")
parser.add_argument('-split', type=str, default="test")
parser.add_argument('-dataset', type=str, default='ext')
args = parser.parse_args()
local_anno_rpath = '/home/daivd/PycharmProjects/vidor/annotation'
local_video_rpath = '/home/daivd/PycharmProjects/vidor/test_vids'
gpu_anno_rpath = '/storage/dldi/PyProjects/vidor/annotation'
gpu_video_rpath = '/storage/dldi/PyProjects/vidor/train_vids'
mode = 'rgb'
save_dir = 'output/features/'
low_memory = True
batch_size = 1
train_transforms = transforms.Compose([videotransforms.RandomCrop(224),
videotransforms.RandomHorizontalFlip()])
test_transforms = transforms.Compose([videotransforms.CenterCrop(224)])
task = 'action'
if args.dataset == 'train':
Dataset = VidorPytorchTrain
else:
Dataset = VidorPytorchExtract
if args.where == 'gpu':
anno_rpath = gpu_anno_rpath
video_rpath = gpu_video_rpath
frames_rpath = 'data/Vidor_rgb/JPEGImages/'
else:
anno_rpath = local_anno_rpath
video_rpath = local_video_rpath
frames_rpath = '/home/daivd/PycharmProjects/vidor/Vidor_rgb/JPEGImages/'
if args.split == 'train':
dataset = Dataset(anno_rpath=anno_rpath,
splits=['training'],
video_rpath=video_rpath,
mode=mode,
task=task,
save_dir=save_dir,
frames_rpath=frames_rpath,
transforms=train_transforms,
low_memory=low_memory)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=36,
pin_memory=True)
else:
val_dataset = Dataset(anno_rpath=anno_rpath,
splits=['validation'],
video_rpath=video_rpath,
mode=mode,
save_dir=save_dir,
frames_rpath=frames_rpath,
task=task,
transforms=test_transforms,
low_memory=low_memory)
dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=True, num_workers=36,
pin_memory=True)
for data in dataloader:
# get the inputs
inputs, labels, a, b = data
if inputs.tolist()[0] != -1:
print(inputs.size()) # torch.Size([1, 3, 4, 224, 224])
print(labels.size()) # torch.Size([1, 1, 4])
| [
"os.path.exists",
"argparse.ArgumentParser",
"videotransforms.CenterCrop",
"dataset.vidor.VidOR",
"numpy.asarray",
"os.path.join",
"torch.from_numpy",
"videotransforms.RandomCrop",
"torch.utils.data.DataLoader",
"json.load",
"cv2.resize",
"cv2.imread",
"frames.extract_all_frames",
"videotr... | [((997, 1071), 'os.path.join', 'os.path.join', (['image_dir', 'video_path_splits[-2]', 'video_path_splits[-1][:-4]'], {}), '(image_dir, video_path_splits[-2], video_path_splits[-1][:-4])\n', (1009, 1071), False, 'import os\n'), ((1915, 1951), 'numpy.asarray', 'np.asarray', (['frames'], {'dtype': 'np.float32'}), '(frames, dtype=np.float32)\n', (1925, 1951), True, 'import numpy as np\n'), ((2721, 2757), 'numpy.asarray', 'np.asarray', (['frames'], {'dtype': 'np.float32'}), '(frames, dtype=np.float32)\n', (2731, 2757), True, 'import numpy as np\n'), ((2860, 2910), 'dataset.vidor.VidOR', 'VidOR', (['anno_rpath', 'video_rpath', 'splits', 'low_memory'], {}), '(anno_rpath, video_rpath, splits, low_memory)\n', (2865, 2910), False, 'from dataset.vidor import VidOR\n'), ((7582, 7607), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7605, 7607), False, 'import argparse\n'), ((1159, 1205), 'frames.extract_all_frames', 'extract_all_frames', (['video_path', 'image_dir_path'], {}), '(video_path, image_dir_path)\n', (1177, 1205), False, 'from frames import extract_all_frames\n'), ((1324, 1348), 'os.path.exists', 'os.path.exists', (['img_path'], {}), '(img_path)\n', (1338, 1348), False, 'import os\n'), ((4782, 4848), 'os.path.join', 'os.path.join', (['self.frames_rpath', 'vid_paths[-2]', 'vid_paths[-1][:-4]'], {}), '(self.frames_rpath, vid_paths[-2], vid_paths[-1][:-4])\n', (4794, 4848), False, 'import os\n'), ((4860, 4888), 'os.path.exists', 'os.path.exists', (['img_dir_path'], {}), '(img_dir_path)\n', (4874, 4888), False, 'import os\n'), ((6583, 6649), 'os.path.join', 'os.path.join', (['self.frames_rpath', 'vid_paths[-2]', 'vid_paths[-1][:-4]'], {}), '(self.frames_rpath, vid_paths[-2], vid_paths[-1][:-4])\n', (6595, 6649), False, 'import os\n'), ((6662, 6699), 'os.path.exists', 'os.path.exists', (["(img_dir_path + '.npy')"], {}), "(img_dir_path + '.npy')\n", (6676, 6699), False, 'import os\n'), ((6772, 6800), 'os.path.exists', 'os.path.exists', (['img_dir_path'], {}), '(img_dir_path)\n', (6786, 6800), False, 'import os\n'), ((9363, 9473), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(36)', 'pin_memory': '(True)'}), '(dataset, batch_size=batch_size, shuffle=True,\n num_workers=36, pin_memory=True)\n', (9390, 9473), False, 'import torch\n'), ((10010, 10125), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(36)', 'pin_memory': '(True)'}), '(val_dataset, batch_size=batch_size, shuffle=\n True, num_workers=36, pin_memory=True)\n', (10037, 10125), False, 'import torch\n'), ((2439, 2483), 'cv2.resize', 'cv2.resize', (['imgx'], {'dsize': '(0, 0)', 'fx': 'sc', 'fy': 'sc'}), '(imgx, dsize=(0, 0), fx=sc, fy=sc)\n', (2449, 2483), False, 'import cv2\n'), ((2503, 2547), 'cv2.resize', 'cv2.resize', (['imgy'], {'dsize': '(0, 0)', 'fx': 'sc', 'fy': 'sc'}), '(imgy, dsize=(0, 0), fx=sc, fy=sc)\n', (2513, 2547), False, 'import cv2\n'), ((8229, 8260), 'videotransforms.RandomCrop', 'videotransforms.RandomCrop', (['(224)'], {}), '(224)\n', (8255, 8260), False, 'import videotransforms\n'), ((8305, 8343), 'videotransforms.RandomHorizontalFlip', 'videotransforms.RandomHorizontalFlip', ([], {}), '()\n', (8341, 8343), False, 'import videotransforms\n'), ((8389, 8420), 'videotransforms.CenterCrop', 'videotransforms.CenterCrop', (['(224)'], {}), '(224)\n', (8415, 8420), False, 'import videotransforms\n'), ((1368, 1388), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1378, 1388), False, 'import cv2\n'), ((1571, 1614), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(0, 0)', 'fx': 'sc', 'fy': 'sc'}), '(img, dsize=(0, 0), fx=sc, fy=sc)\n', (1581, 1614), False, 'import cv2\n'), ((2637, 2661), 'numpy.asarray', 'np.asarray', (['[imgx, imgy]'], {}), '([imgx, imgy])\n', (2647, 2661), True, 'import numpy as np\n'), ((3149, 3168), 'json.load', 'json.load', (['action_f'], {}), '(action_f)\n', (3158, 3168), False, 'import json\n'), ((5573, 5596), 'torch.from_numpy', 'torch.from_numpy', (['label'], {}), '(label)\n', (5589, 5596), False, 'import torch\n'), ((7283, 7306), 'torch.from_numpy', 'torch.from_numpy', (['label'], {}), '(label)\n', (7299, 7306), False, 'import torch\n')] |
import io
import dash
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger
from dash.exceptions import PreventUpdate
import plotly.graph_objs as go
import pandas as pd
import base64
import dash_table
from dash_table.Format import Format, Scheme, Trim
import numpy as np
import equadratures as eq
import equadratures.distributions as db
import ast
import numexpr as ne
from utils import convert_latex
from app import app
###################################################################
# Distributions Data (@To add more)
###################################################################
MEAN_VAR_DIST = ["gaussian"]
LOWER_UPPER_DIST = ["uniform"]
SHAPE_PARAM_DIST = ["lognormal"]
ALL_4 = ["beta", "truncated-gaussian"]
model_selection=dcc.Dropdown(
options=[
{'label': 'Analytical Model', 'value': 'analytical'},
{'label': 'Offline Model', 'value': 'offline'},
],
className="m-1", id='model_select',
value='analytical', clearable=False)
###################################################################
# Collapsable more info card
###################################################################
info_text = r'''
This app uses Equadratures to compute unceratinty in the user-defined data. In this model user
can define parameters, select basis function and create a polynomial.
#### Instructions
1. Click **add parameter** button in parameter definition card to add parameters. Choose the type of distribution and on basis of **selected distribution**, input the required fields.
2. To visualize the defined parameters **probability density function** press the toggle button to obtain the plot in Probability density function card
3. Select the **basis** type from basis selection card and input required fields based on the basis selected (For example sparse-grid requires q-val, order and growth as input)
4. Use **Set Basis** button to compute the **cardinality** and get insights regarding the basis function chosen in the basis selection card.
5. Set the solver method for Polynomial and enter the **input function** in parameter definition card for computing **statistical moments**, use sobol dropdown to gain insights regarding **sensitivity analysis**
'''
info = html.Div(
[
dbc.Button("More Information",color="primary",id="data-info-open",className="py-0"),
dbc.Modal(
[
dbc.ModalHeader(dcc.Markdown('**More Information**')),
dbc.ModalBody(dcc.Markdown(convert_latex(info_text),dangerously_allow_html=True)),
dbc.ModalFooter(dbc.Button("Close", id="data-info-close", className="py-0", color='primary')),
],
id="data-info",
scrollable=True,size='lg'
),
]
)
###################################################################
# Parameter Definition Card
###################################################################
TOP_CARD = dbc.Card(
[
dbc.CardHeader(dcc.Markdown("**Parameter Definition**",style={"color": "#000000"})),
dbc.CardBody(
[
dbc.Row(
[
dbc.Col(
dbc.Button('Add Parameter', id='AP_button',
n_clicks=0, color="primary", className="py-0"),
width='auto'),
dbc.Col(
dbc.Spinner(html.Div(id='param_added'),color='primary'),
width=1),
dbc.Col(
dcc.Input(id="input_func", type="text", placeholder="Input Function...",
className='ip_field', debounce=True),
width=3),
dbc.Col(dbc.Alert(id='input-warning',color='danger',is_open=False), width=3),
]
),
html.Br(),
dbc.Row(
dbc.Col(
html.Div(id='param_add', children=[])
)
)
],
className='top_card',
)
],
id='top_card',
)
###################################################################
# PDF Plot
###################################################################
PDF_PLOT = dcc.Graph(id='plot_pdf', style={'height':'50vh','width': 'inherit'})
PDF_GRAPH = dbc.Card(
[
dbc.CardHeader(dcc.Markdown("**Probability Density Function**")),
dbc.CardBody(
[
dbc.Row(dbc.Col(PDF_PLOT,width=12))
]
)
], style={'height':'60vh'}
)
###################################################################
# Card for setting basis, levels, q and compute cardinality
###################################################################
basis_dropdown = dcc.Dropdown(
options=[
{'label': 'Total-order', 'value': 'total-order'},
{'label': 'Tensor-grid', 'value': 'tensor-grid'},
{'label': 'Sparse-grid', 'value': 'sparse-grid'},
{'label': 'Hyperbolic-basis', 'value': 'hyperbolic-basis'},
{'label': 'Euclidean-degree', 'value': 'euclidean-degree'}
],
placeholder='Select Basis', className="m-1", id='drop_basis',
value='tensor-grid', clearable=False,
)
growth_dropdown = dcc.Dropdown(
options=[
{'label': 'Linear', 'value': 'linear'},
{'label': 'Exponential', 'value': 'exponential'},
],
placeholder='Growth Rule', clearable=False,
className="m-1", id='basis_growth_rule',
disabled=True,
)
DOE_download= html.Div([
dbc.Button("Download DOE",id='download_button',style={'display':'None'},color='primary'),
dcc.Download(id='download_DOE_data')
])
BASIS_CARD = dbc.Card(
[
dbc.CardHeader(dcc.Markdown("**Basis Selection**")),
dbc.CardBody(
[
dbc.Row(
[
dbc.Col(basis_dropdown, width=3),
dbc.Col(
dbc.Input(bs_size="sm", id='q_val', type="number", value=np.nan, placeholder='q',
className='ip_field', disabled=True),
width=2),
dbc.Col(
dbc.Input(bs_size="sm", id='levels', type='number', value=np.nan, placeholder='Level',
className='ip_field', disabled=True),
width=2),
dbc.Col(growth_dropdown,width=3)
], justify="start"
),
dbc.Row(
[
dbc.Col(
dbc.Button('Set basis', id='basis_button', n_clicks=0, className='ip_buttons',color='primary',disabled=False),
width=2),
dbc.Col(
dbc.Input(bs_size="sm", id='op_box', type="number", value='', placeholder='Cardinality...', className='ip_field',disabled=True),
width=3),
dbc.Col(dbc.Alert(id='compute-warning',color='danger',is_open=False),width='auto'),
dbc.Col(DOE_download,width=3)
]
),
dbc.Row(dbc.Col(
dcc.Graph(id='plot_basis',style={'width': 'inherit', 'height': '40vh', 'margin-top':'5px'}),
width=8),align='start',justify='center'),
]
)
], style={"height": "60vh"}
)
###################################################################
# Results card
###################################################################
params = dict(
gridcolor="white",
showbackground=False,
linecolor='black',
tickcolor='black',
ticks='outside',
zerolinecolor="white"
)
xparams = params.copy()
xparams['title'] = 'x1'
yparams = params.copy()
yparams['title'] = 'x2'
zparams = params.copy()
zparams['title'] = 'f(x)'
layout = dict(margin={'t': 0, 'r': 0, 'l': 0, 'b': 0, 'pad': 10}, autosize=True,
scene=dict(
aspectmode='cube',
xaxis=xparams,
yaxis=yparams,
zaxis=zparams,
),
paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)'
)
polyfig3D = go.Figure(layout=layout)
polyfig3D.update_xaxes(color='black', linecolor='black', showline=True, tickcolor='black', ticks='outside')
polyfig3D.update_yaxes(color='black', linecolor='black', showline=True, tickcolor='black', ticks='outside')
polyfig3D.add_trace(go.Surface(x=[], y=[], z=[], showscale=False, opacity=0.5,
colorscale=[[0, 'rgb(178,34,34)'], [1, 'rgb(0,0,0)']]))
polyfig3D.add_trace(go.Scatter3d(x=[], y=[], z=[], mode='markers',
marker=dict(size=10, color="rgb(144, 238, 144)", opacity=0.6,
line=dict(color='rgb(0,0,0)', width=1))))
method_dropdown = dcc.Dropdown(
options=[
{'label': 'Least-squares', 'value': 'least-squares'},
{'label': 'Numerical-integration', 'value': 'numerical-integration'},
],
placeholder='Solver method', clearable=False,
value='numerical-integration',
className="m-1", id='solver_method',
)
Upload_region=html.Div([
dcc.Upload(
id='upload_data',
children=html.Div([
'Upload model evalutions ',
html.A('Select Files',style={'color':'blue'},id='filename_append'),
]),
),
])
dataset_info = html.Div(
[
dbc.Button("View Dataset",color="primary",id="dataset-info-open",className="py-0",disabled=True),
dbc.Modal(
[
dbc.ModalHeader(dcc.Markdown('',id='dataset_filename')),
dbc.ModalBody([dash_table.DataTable(data=[],columns=[],id='upload_data_table',
style_table={'overflowX': 'auto','overflowY':'auto','height':'35vh'},
editable=True,fill_width=True,page_size=20)],id='dataset_data'),
dbc.ModalFooter(dbc.Button("Close", id="dataset-info-close", className="py-0", color='primary')),
],
id="dataset-info",
scrollable=True,size='lg'
),
],
id='dataset-div'
)
mean_form = dbc.FormGroup(
[
dbc.Label("Mean",html_for='mean'),
dbc.Row(dbc.Col(
dbc.Input(bs_size="sm", id='mean', type='number', value=np.nan, placeholder='Mean...',
className='ip_field', disabled=True)
), style={'align':'center'})
]
)
var_form = dbc.FormGroup(
[
dbc.Label("Variance",html_for='variance'),
dbc.Row(dbc.Col(
dbc.Input(bs_size="sm", id='variance', type='number', value=np.nan,
placeholder='Variance..,', className='ip_field', disabled=True)
), style={'align': 'center'})
]
)
r2_form = dbc.FormGroup(
[
dbc.Label("R2 score",html_for='r2_score'),
dbc.Row(dbc.Col(
dbc.Input(bs_size="sm", id='r2_score', type='number', value=np.nan,
placeholder='R2 Score..,', className='ip_field', disabled=True)
), style={'align':'center'})
]
)
sobol_form = dbc.FormGroup(
[
dbc.Label("Senstivity Indices",html_for='sobol_order'),
dbc.Row(dbc.Col(
dcc.Dropdown(
options=[
{'label': 'Order 1', 'value': 1},
{'label': 'Order 2', 'value': 2},
{'label': 'Order 3', 'value': 3},
],
placeholder='Order 1', value=1,
className="m-1", id='sobol_order',
disabled=True, clearable=False,
),
))
]
)
sobol_plot = dcc.Graph(id='Sobol_plot', style={'width': 'inherit', 'height':'35vh'})
left_side = [
dbc.Row([dbc.Col(method_dropdown,width=6),
dbc.Col(
dbc.Spinner([Upload_region],show_initially=False,color='primary')
)
]),
dbc.Row([dbc.Col(
dbc.Button('Compute Polynomial', id='CU_button', n_clicks=0, className='ip_buttons',color='primary',disabled=True)
),
dbc.Col(dataset_info,width='auto')
]),
dbc.Row([dbc.Col(dbc.Alert(id='poly-warning',color='danger',is_open=False), width=3)]),
dbc.Row(
[
dbc.Col(mean_form),
dbc.Col(var_form),
dbc.Col(r2_form),
]
),
dbc.Row(dbc.Col(sobol_form,width=6)),
dbc.Row(dbc.Col(sobol_plot,width=8))
]
right_side = dbc.Spinner(
[
dcc.Graph(id='plot_poly_3D', style={'width': 'inherit','height':'60vh'}, figure=polyfig3D),
dbc.Alert(id='plot_poly_info',color='primary',is_open=False)
], color='primary',type='grow',show_initially=False
)
COMPUTE_CARD = dbc.Card(
[
dbc.CardHeader(dcc.Markdown("**Compute Polynomial**")),
dbc.CardBody(
[
dbc.Row(
[
dbc.Col(left_side,width=6),
dbc.Col(right_side,width=6)
]
)
]
)
], style={"height": "80vh"}
)
tooltips = html.Div(
[
dbc.Tooltip("Maximum of 5 parameters",target="AP_button"),
dbc.Tooltip("The variables should be of the form x1,x2...",target="input_func"),
# dbc.Tooltip('Set basis and Input Function first',target="CU_button"),
]
)
###################################################################
# Overal app layout
###################################################################
layout = dbc.Container(
[
dbc.Row([
dbc.Col(model_selection,width=4)
]),
html.H2("Uncertainty quantification of an analytical model",id='main_text'),
dbc.Row(
[
dbc.Col(dcc.Markdown('Define an analytical model, and its uncertain input parameters. Then, use polynomial chaos to compute output uncertainties and sensitivities.',id='info_text'),width='auto'),
dbc.Col(info,width='auto')
], align='center', style={'margin-bottom':'10px'}
),
dbc.Row(dbc.Col(TOP_CARD, width=12),style={'margin-bottom':'10px'}),
dbc.Row(
[
dbc.Col(PDF_GRAPH, width=5),
dbc.Col(BASIS_CARD, width=7)
],
),
dbc.Row(dbc.Col(COMPUTE_CARD,width=12),
style={'margin-top':'10px'}),
# Small bits of data (store clientside)
dcc.Store(id='ndims'),
# Big bits of data (store serverside)
dcc.Store(id='ParamsObject'),
dcc.Store(id='PolyObject'),
dcc.Store(id='BasisObject'),
dcc.Store(id='DOE'),
dcc.Store(id='UploadedDF'),
tooltips
], fluid=True
)
###################################################################
# Callback for disabling AP button after 5 clicks
###################################################################
@app.callback(
Output('AP_button', 'disabled'),
Input('AP_button', 'n_clicks'),
Input('basis_button','n_clicks')
)
def check_param(n_clicks,ndims):
if n_clicks > 4:
return True
else:
return False
@app.callback(
Output('input_func','style'),
Output('upload_data','style'),
Output('dataset-div','style'),
Input('model_select','value')
)
def InputRequired(model):
if model=='analytical':
return None,{'display':'None'},{'display':'None'}
else:
style = {
'width': '100%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'
}
return {'display':'None'},style,None
###################################################################
# Callback for adding parameter to param definition card
###################################################################
@app.callback(
Output('param_add', 'children'),
Output('param_added','children'),
[Input('AP_button', 'n_clicks'),
State('param_add', 'children')]
)
def addInputs(n_clicks, children):
dist_dropdown = dcc.Dropdown(
options=[
{'label': 'Uniform', 'value': 'uniform'},
{'label': 'Gaussian', 'value': 'gaussian'},
{'label': 'Truncated Gaussian', 'value': 'truncated-gaussian'},
{'label': 'LogNormal', 'value': 'lognormal'},
{'label': 'Beta', 'value': 'beta'}
],
placeholder='Select a distribution', value='uniform', clearable=False,
className="m-1", id={'type': 'drop-1', 'index': n_clicks},
)
dist_form = dbc.Form(
[
dbc.Label('Distribution', html_for='drop-1'),
dist_dropdown,
]
)
params_form = dbc.Form(
[
dbc.Label('Statistical moments/shape parameters',html_for='params'),
dbc.Row(
[
dbc.Col(
dbc.Input(bs_size="sm", id={'type': 'params', 'index': n_clicks}, type="number",
value=np.nan, placeholder='',
debounce=True, className='ip_field'),
width=6),
dbc.Col(
dbc.Input(bs_size="sm", id={'type': 'params_2', 'index': n_clicks}, type="number",
value=np.nan, placeholder='',
debounce=True, className='ip_field'),
width=6),
], justify='start', align='start'
)
]
)
min_max_form = dbc.Form(
[
dbc.Label('Support', html_for='min_val'),
dbc.Row(
[
dbc.Col([
dbc.Input(bs_size="sm", id={'type': 'min_val', 'index': n_clicks}, type="number",
value=np.nan, placeholder='Minimum value...',
debounce=True, className='ip_field'),
], width=6),
dbc.Col([
dbc.Input(bs_size="sm", id={'type': 'max_val', 'index': n_clicks}, type="number",
value=np.nan, placeholder="Maximum value...",
debounce=True, className='ip_field')
], width=6),
], justify='start', align='start'
)
]
)
order_form = dbc.Form(
[
dbc.Label('Order'),
dbc.Input(bs_size="sm", id={'type': 'order', 'index': n_clicks}, type="number",
value=np.nan,min=0,
placeholder="Order",
debounce=True, className='ip_field')
]
)
toggle_form = dbc.Form(
[
dbc.Label('Plot PDF'),
dbc.Checklist(
options=[{"value": "val_{}".format(n_clicks),'disabled':False}],
switch=True, value=[0], id={"type": "radio_pdf","index": n_clicks},
)
]
)
# Assemble layout
if n_clicks > 0:
add_card = dbc.Row(
[
dbc.Col(dcc.Markdown(convert_latex(r'$x_%d$' %n_clicks),dangerously_allow_html=True), width=1),
dbc.Col(dist_form, width=2),
dbc.Col(params_form, width=3),
dbc.Col(min_max_form, width=3),
dbc.Col(order_form, width=1),
dbc.Col(toggle_form,width=1)
], align='start'
)
else:
add_card = dbc.Row()
children.append(add_card)
return children,None
@app.callback(
Output('main_text','children'),
Output('info_text','children'),
Input('model_select','value')
)
def MainText(model):
if model=='analytical':
return 'Uncertainty quantification of an analytical model','Define an analytical model, and its uncertain input parameters. Then, use polynomial chaos to compute output uncertainties and sensitivities.'
else:
return 'Uncertainty quantification of an offline model','Define an offline model, and its uncertain input parameters.Download the DOE points and then, use polynomial chaos to compute output uncertainties and sensitivities at your simulation results.'
###################################################################
# Callback for disabling Cardinality Check button
###################################################################
@app.callback(
Output('basis_button','disabled'),
[
Input('AP_button','n_clicks'),
]
)
def CheckifAPClicked(n_clicks):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'AP_button' in changed_id:
return False
else:
return True
@app.callback(
Output('download_button','disabled'),
Input('BasisObject','data'),
)
def ShowDownload(basis):
if basis is not None:
return False
else:
return True
@app.callback(
Output('download_DOE_data','data'),
Output('download_button','style'),
Input('download_button','n_clicks'),
Input('model_select','value'),
Input('ParamsObject', 'data'),
Input('BasisObject', 'data'),
Input('solver_method', 'value'),
prevent_initial_call = True
)
def DOEdownload(n_clicks,model,params,basis,method):
if model=='offline':
if basis is not None:
mypoly = Set_Polynomial(params, basis, method)
DOE = mypoly.get_points()
DOE=pd.DataFrame(DOE)
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'download_button' in changed_id:
return dcc.send_data_frame(DOE.to_csv, "DOE.csv"), None
else:
raise PreventUpdate
else:
return None,None
if model=='analytical':
return None,{'display':'None'}
else:
raise PreventUpdate
def ParseData(content,filename):
content_type,content_string=content.split(',')
try:
if 'csv' in filename:
decoded = base64.b64decode(content_string)
df=np.genfromtxt(io.StringIO(decoded.decode('utf-8')),delimiter=',')
data=[]
for i in range(1,len(df)):
data.append(df[i][-1])
data=np.array(data).reshape(-1,1)
return data
elif 'npy' in filename:
r = base64.b64decode(content_string)
data=np.load(io.BytesIO(r)).reshape(-1,1)
return data
except Exception:
return None
else:
raise PreventUpdate
@app.callback(
ServersideOutput('UploadedDF','data'),
Output('filename_append','children'),
Output('dataset-info-open','disabled'),
Input('model_select','value'),
Input('upload_data','filename'),
Input('upload_data','contents'),
Input('DOE','data')
)
def ParsedData(model,filename,content,DOE):
if model=='offline':
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'upload_data' in changed_id:
df=ParseData(content,filename)
children=[filename]
if df.shape[0]==DOE.shape[0]:
return df,children,False
else:
return None,'Error',True
else:
raise PreventUpdate
else:
raise PreventUpdate
@app.callback(
Output("dataset-info", "is_open"),
[Input("dataset-info-open", "n_clicks"), Input("dataset-info-close", "n_clicks")],
[State("dataset-info", "is_open")],
)
def toggle_modal(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('dataset_filename','children'),
Output('upload_data_table','data'),
Output('upload_data_table','columns'),
Input('filename_append','children'),
Input("dataset-info", "is_open"),
Input("UploadedDF",'data'),
Input("DOE",'data')
)
def DatasetInfo(filename,is_open,df,DOE):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'dataset-info' in changed_id:
if is_open:
data=[]
vals=np.column_stack((df,DOE))
for i in range(vals.shape[0]):
val_dict = {}
for j in range(vals.shape[1]):
if j==0:
val_dict['model_evaluations'] = vals[i][j]
else:
val_dict['DOE_{}'.format(j)] = vals[i][j]
if j==vals.shape[1]-1:
data.append(val_dict)
print(data)
columns = [
{'name': i, 'id': i, 'deletable': False, 'type': 'numeric', 'format': Format(precision=4)}
for i in data[0].keys()]
return filename,data,columns
else:
raise PreventUpdate
else:
raise PreventUpdate
###################################################################
# Callback for disabling Compute Uncertainty button
###################################################################
@app.callback(
Output('CU_button','disabled'),
[
Input('basis_button','n_clicks'),
Input('input_func','value'),
Input('AP_button','n_clicks'),
Input('model_select','value'),
Input('UploadedDF','data')
],
)
def CheckifCCClickd(n_clicks,input_val,ap,model,uploaded):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'AP_button' in changed_id:
return True
else:
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'basis_button' or 'input_func' in changed_id:
if model=='analytical':
if n_clicks>0 and input_val is not None:
return False
else:
return True
else:
if n_clicks>0 and uploaded is not None:
return False
else:
return True
else:
return True
###################################################################
# Callback to map input boxes to distributions
###################################################################
@app.callback(
Output({'type': 'params', 'index': dash.dependencies.MATCH}, 'placeholder'),
Output({'type': 'params_2', 'index': dash.dependencies.MATCH}, 'placeholder'),
Output({'type': 'min_val', 'index': dash.dependencies.MATCH}, 'placeholder'),
Output({'type': 'max_val', 'index': dash.dependencies.MATCH}, 'placeholder'),
Output({'type': 'params', 'index': dash.dependencies.MATCH}, 'disabled'),
Output({'type': 'params_2', 'index': dash.dependencies.MATCH}, 'disabled'),
Output({'type': 'min_val', 'index': dash.dependencies.MATCH}, 'disabled'),
Output({'type': 'max_val', 'index': dash.dependencies.MATCH}, 'disabled'),
[Input({'type': 'drop-1', 'index': dash.dependencies.MATCH}, 'value')],
prevent_initial_callback=True,
)
def UpdateInputField(value):
show = False
hide = True
if value is None:
return ['...', '...', '...', '...', hide, hide, hide]
elif value in MEAN_VAR_DIST:
return 'Mean...', 'Variance...', ' ', ' ', show, show, hide, hide
elif value in LOWER_UPPER_DIST:
return '', '', 'Lower bound...', 'Upper bound...', hide, hide, show, show
elif value in SHAPE_PARAM_DIST:
return 'Shape parameter...', ' ', '', '', show, hide, hide, hide
elif value in ALL_4:
return 'Shape param. A...', 'Shape param. B...', 'Lower bound...', 'Upper bound...', show, show, show, show
# @app.callback(
# Output({'type':'radio_pdf','index': dash.dependencies.ALL},'disabled'),
# Input('AP_button','n_clicks'),
# prevent_intial_call=True
# )
# def Toggle(n_clicks):
# changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
# print(changed_id)
# if 'basis_button' in changed_id:
# return [{'disabled':False}]
# else:
# val={'disabled':True}
# return [val]*n_clicks
###################################################################
# Callback to create EQ Param Objects
###################################################################
@app.callback(
ServersideOutput('ParamsObject', 'data'),
Output('ndims','data'),
[
Input({'type': 'params', 'index': dash.dependencies.ALL}, 'value'),
Input({'type': 'params_2', 'index': dash.dependencies.ALL}, 'value'),
Input({'type': 'drop-1', 'index': dash.dependencies.ALL}, 'value'),
Input({'type': 'max_val', 'index': dash.dependencies.ALL}, 'value'),
Input({'type': 'min_val', 'index': dash.dependencies.ALL}, 'value'),
Input({'type': 'order', 'index': dash.dependencies.ALL}, 'value'),
Input('basis_button','n_clicks'),
],
prevent_intial_call=True
)
def ParamListUpload(shape_parameter_A, shape_parameter_B, distribution, max_val, min_val, order,basis_click):
i = len(distribution)
param_list = []
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'basis_button' in changed_id:
if i > 0:
for j in range(i):
if distribution[j] in MEAN_VAR_DIST:
if (shape_parameter_A[j] and shape_parameter_B[j] and order[j]) is None:
return None,None
if order[j]<0:
return None,None
else:
param = eq.Parameter(distribution=distribution[j], shape_parameter_A=shape_parameter_A[j]
, shape_parameter_B=shape_parameter_B[j], lower=min_val[j],
upper=max_val[j],
order=order[j])
elif distribution[j] in ALL_4:
if (shape_parameter_A[j] and shape_parameter_B[j] and min_val[j] and max_val[j] and order[j]) is None:
return None,None
elif min_val[j]>max_val[j]:
return None,None
else:
param = eq.Parameter(distribution=distribution[j], shape_parameter_A=shape_parameter_A[j]
, shape_parameter_B=shape_parameter_B[j], lower=min_val[j], upper=max_val[j],
order=order[j])
elif distribution[j] in SHAPE_PARAM_DIST:
if (shape_parameter_A[j] and order[j]) is None:
return None,None
else:
param = eq.Parameter(distribution=distribution[j], shape_parameter_A=shape_parameter_A[j],
order=order[j])
elif distribution[j] in LOWER_UPPER_DIST:
if (min_val[j] and max_val[j] and order[j]) is None:
return None,None
else:
param = eq.Parameter(distribution=distribution[j], lower=min_val[j], upper=max_val[j], order=order[j])
param_list.append(param)
return param_list,len(param_list)
else:
raise PreventUpdate
###################################################################
# Function to compute s_values and pdf
###################################################################
def CreateParam(distribution, shape_parameter_A, shape_parameter_B, min, max, order):
param_obj = eq.Parameter(distribution=distribution, shape_parameter_A=shape_parameter_A,
shape_parameter_B=shape_parameter_B,
lower=min, upper=max, order=order)
s_values, pdf = param_obj.get_pdf()
return param_obj, s_values, pdf
###################################################################
# Misc Callbacks
###################################################################
# More info collapsable
@app.callback(
Output("data-info", "is_open"),
[Input("data-info-open", "n_clicks"), Input("data-info-close", "n_clicks")],
[State("data-info", "is_open")],
)
def toggle_modal(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
###################################################################
# Callback to plot pdf
###################################################################
@app.callback(
Output('plot_pdf', 'figure'),
Input({'type': 'radio_pdf', 'index': dash.dependencies.ALL}, 'value'),
[State({'type': 'params', 'index': dash.dependencies.ALL}, 'value'),
State({'type': 'params_2', 'index': dash.dependencies.ALL}, 'value'),
State({'type': 'drop-1', 'index': dash.dependencies.ALL}, 'value'),
State({'type': 'max_val', 'index': dash.dependencies.ALL}, 'value'),
State({'type': 'min_val', 'index': dash.dependencies.ALL}, 'value'),
State({'type': 'order', 'index': dash.dependencies.ALL}, 'value'),
],
prevent_initial_call=True
)
def PlotPdf(pdf_val, param1_val, params2_val, drop1_val, max_val, min_val, order):
layout = {'margin': {'t': 0, 'r': 0, 'l': 0, 'b': 0},
'paper_bgcolor': 'white', 'plot_bgcolor': 'white', 'autosize': True,
"xaxis":{"title": r'$x$'}, "yaxis": {"title": 'PDF'}}
fig = go.Figure(layout=layout)
fig.update_xaxes(color='black', linecolor='black', showline=True, tickcolor='black', ticks='outside')
fig.update_yaxes(color='black', linecolor='black', showline=True, tickcolor='black', ticks='outside')
ctx = dash.callback_context
id = ctx.triggered[0]['prop_id'].split('.')[0]
idx = ast.literal_eval(id)['index']
elem = [0, 'val_{}'.format(idx)]
check = elem in pdf_val
if check:
i = pdf_val.index(elem)
if param1_val and params2_val is None:
param, s_values, pdf = CreateParam(distribution=drop1_val[i], shape_parameter_A=param1_val[i],
shape_parameter_B=params2_val[i], min=min_val[i], max=max_val[i], order=order[i])
fig.add_trace(go.Scatter(x=s_values, y=pdf, line=dict(color='rgb(0,176,246)'), fill='tonexty', mode='lines',
name='Polyfit', line_width=4, line_color='black')),
else:
param, s_values, pdf = CreateParam(distribution=drop1_val[i], shape_parameter_A=param1_val[i],
shape_parameter_B=params2_val[i], min=min_val[i], max=max_val[i],order=order[i])
fig.add_trace(go.Scatter(x=s_values, y=pdf, line=dict(color='rgb(0,176,246)'), fill='tonexty')),
return fig
###################################################################
# Callback to handle toggle switch in param definition card
###################################################################
@app.callback(
Output({'type': 'radio_pdf', 'index': dash.dependencies.ALL}, 'value'),
Input({'type': 'radio_pdf', 'index': dash.dependencies.ALL}, 'value'),
prevent_initial_call=True
)
def setToggles(pdf_val):
ctx = dash.callback_context
id = ctx.triggered[0]['prop_id'].split('.')[0]
idx = ast.literal_eval(id)['index']
elem = [0, 'val_{}'.format(idx)]
check = elem in pdf_val
ret_vals = pdf_val
if check:
i = pdf_val.index(elem)
ret_vals[i] = elem
for j in range(len(ret_vals)):
if j != i:
ret_vals[j] = [0]
test = [[0] if j != i else elem for j, x in enumerate(pdf_val)]
return ret_vals
###################################################################
# Callback to disable basis card input boxes based on basis selection
###################################################################
@app.callback(
Output('q_val', 'disabled'),
Output('levels', 'disabled'),
Output('basis_growth_rule', 'disabled'),
[Input('drop_basis', 'value')],
prevent_initial_call=True
)
def BasisShow(value):
show = False
hide = True
if value is not None:
if value == 'sparse-grid':
return hide, show, show
elif value == 'hyperbolic-basis':
return show, hide, hide
else:
return hide, hide, hide
else:
return hide, hide, hide
def Set_Basis(basis_val, order, level, q_val, growth_rule):
basis_set = eq.Basis('{}'.format(basis_val), orders=order, level=level, q=q_val, growth_rule=growth_rule)
return basis_set
def Set_Polynomial(parameters, basis, method):
mypoly = eq.Poly(parameters=parameters, basis=basis, method=method)
return mypoly
###################################################################
# Callback for automatic selection of solver method based on basis selection
###################################################################
@app.callback(
Output('solver_method', 'value'),
Input('drop_basis', 'value'),
prevent_initial_call=True
)
def SetMethod(drop_basis):
if drop_basis == 'total-order':
return 'least-squares'
else:
return 'numerical-integration'
###################################################################
# Callback for setting basis
###################################################################
@app.callback(
Output('op_box', 'value'),
ServersideOutput('BasisObject', 'data'),
Output('compute-warning','is_open'),
Output('compute-warning','children'),
Input('ParamsObject', 'data'),
Input('basis_button','n_clicks'),
State('drop_basis', 'value'),
State('q_val', 'value'),
State('levels', 'value'),
State('basis_growth_rule', 'value'),
prevent_initial_call=True
)
def SetBasis(param_obj,n_clicks,basis_select, q_val, levels, growth_rule):
# Compute subspace (if button has just been pressed)
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'basis_button' in changed_id:
if param_obj is not None:
if basis_select is None:
return 'Error...',None,True,'No basis value selected'
elif basis_select=='sparse-grid' and (levels or growth_rule) is None:
return 'ERROR...',None,True,'Enter the required values'
else:
basis_ord=[]
for elem in param_obj:
basis_ord.append(elem.order)
mybasis = Set_Basis(basis_val=basis_select, order=basis_ord, level=levels, q_val=q_val, growth_rule=growth_rule)
return mybasis.get_cardinality(), mybasis, False, None
else:
return 'ERROR...',None,True,'Incorrect parameter values'
else:
raise PreventUpdate
###################################################################
# Plotting Function: To plot basis 1D/2D/3D
###################################################################
@app.callback(
Output('plot_basis', 'figure'),
ServersideOutput('DOE','data'),
Input('ParamsObject', 'data'),
Input('BasisObject', 'data'),
Input('solver_method', 'value'),
Input('ndims','data')
)
def PlotBasis(params, mybasis, method, ndims):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'ParamsObject' in changed_id:
if mybasis is not None:
# Fit a poly just to get points (this isn't used elsewhere)
mypoly = Set_Polynomial(params, mybasis, method)
DOE = mypoly.get_points()
layout = {'margin': {'t': 0, 'r': 0, 'l': 0, 'b': 0},
'paper_bgcolor': 'white', 'plot_bgcolor': 'white', 'autosize': True,
"xaxis":{"title": r'$x_1$'}, "yaxis": {"title": r'$x_2$'}}
fig = go.Figure(layout=layout)
fig.update_xaxes(color='black', linecolor='black', showline=True, tickcolor='black', ticks='outside')
fig.update_yaxes(color='black', linecolor='black', showline=True, tickcolor='black', ticks='outside')
if ndims == 1:
fig.add_trace(go.Scatter(x=DOE[:,0], y=np.zeros_like(DOE[:,0]), mode='markers',marker=dict(size=8, color="rgb(144, 238, 144)", opacity=1,
line=dict(color='rgb(0,0,0)', width=1))))
fig.update_yaxes(visible=False)
return fig,DOE
elif ndims == 2:
fig.add_trace(go.Scatter(x=DOE[:, 0], y=DOE[:, 1],mode='markers',marker=dict(size=8, color="rgb(144, 238, 144)", opacity=0.6,
line=dict(color='rgb(0,0,0)', width=1))))
return fig,DOE
elif ndims>=3:
fig.update_layout(dict(margin={'t': 0, 'r': 0, 'l': 0, 'b': 0, 'pad': 10}, autosize=True,
scene=dict(
aspectmode='cube',
xaxis=dict(
title=r'$x_1$',
gridcolor="white",
showbackground=False,
linecolor='black',
tickcolor='black',
ticks='outside',
zerolinecolor="white", ),
yaxis=dict(
title=r'$x_2$',
gridcolor="white",
showbackground=False,
linecolor='black',
tickcolor='black',
ticks='outside',
zerolinecolor="white"),
zaxis=dict(
title=r'$x_3$',
backgroundcolor="rgb(230, 230,200)",
gridcolor="white",
showbackground=False,
linecolor='black',
tickcolor='black',
ticks='outside',
zerolinecolor="white", ),
),
))
fig.add_trace(go.Scatter3d(x=DOE[:, 0], y=DOE[:, 1], z=DOE[:, 2], mode='markers',
marker=dict(size=8, color="rgb(144, 238, 144)", opacity=0.6, line=dict(color='rgb(0,0,0)', width=1))))
return fig,DOE
else:
raise PreventUpdate
else:
raise PreventUpdate
else:
raise PreventUpdate
###################################################################
# Callback to set Poly object, calculate mean, variance, r2_score
###################################################################
@app.callback(
ServersideOutput('PolyObject', 'data'),
Output('mean', 'value'),
Output('variance', 'value'),
Output('r2_score', 'value'),
Output('input-warning','is_open'),
Output('input-warning','children'),
Output('poly-warning','is_open'),
Output('poly-warning','children'),
Trigger('CU_button', 'n_clicks'),
Input('ParamsObject', 'data'),
Input('BasisObject', 'data'),
Input('solver_method', 'value'),
Input('model_select','value'),
Input('UploadedDF','data'),
State('input_func', 'value'),
State('ndims', 'data'),
prevent_initial_call=True
)
def SetModel(params,mybasis,method,model,data,expr,ndims):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'CU_button' in changed_id:
mypoly = Set_Polynomial(params, mybasis, method)
if model=='analytical':
# Parse input function
x = [r"x{} = op[{}]".format(j, j - 1) for j in range(1, ndims + 1)]
def f(op):
for i in range(ndims):
exec(x[i])
return ne.evaluate(expr)
# Evaluate model
try:
mypoly.set_model(f)
except KeyError or ValueError:
return None,None,None,True,"Incorrect variable naming",True,False,None
# Get mean and variance
mean, var = mypoly.get_mean_and_variance()
DOE = mypoly.get_points()
# Get R2 score
y_true = mypoly._model_evaluations
y_pred = mypoly.get_polyfit(DOE).squeeze()
y_pred = y_pred.reshape(-1, 1)
r2_score = eq.datasets.score(y_true, y_pred, metric='r2')
return mypoly, mean, var, r2_score, False,None,False,None ###
else:
try:
mypoly.set_model(data)
except KeyError:
return None,None,None,None,False,True,"Incorrect Model evaluations"
# except AssertionError:
# return None,None,None,True,None,False,True,"Incorrect Data uploaded"
# except ValueError:
# return None, None, None, None, False, True, "Incorrect Model evaluations"
# except IndexError:
# return None, None, None, None, False, True, "Incorrect Model evaluations"
#
mean, var = mypoly.get_mean_and_variance()
DOE=mypoly.get_points()
y_true=data.squeeze()
y_pred = mypoly.get_polyfit(DOE).squeeze()
y_pred = y_pred.reshape(-1, 1)
r2_score = eq.datasets.score(y_true, y_pred, metric='r2')
return mypoly, mean, var, r2_score, False, None,False,None ###
else:
raise PreventUpdate
###################################################################
# Callback to plot Sobol' indices
###################################################################
@app.callback(
Output('sobol_order','options'),
Input('CU_button','n_clicks'),
State('ndims','data'),
State('sobol_order','options')
,
prevent_intial_call=True
)
def SobolCheck(n_clicks,ndims,options):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'CU_button' in changed_id:
opt=options
if ndims==1:
return options
elif ndims==2:
opt[0]['disabled']=False
opt[1]['disabled']=False
opt[2]['disabled']=True
return opt
elif ndims>=3:
opt[0]['disabled'] = False
opt[1]['disabled'] = False
opt[2]['disabled'] = False
return opt
else:
raise PreventUpdate
else:
raise PreventUpdate
@app.callback(
Output('Sobol_plot', 'figure'),
Output('sobol_order','disabled'),
Output('Sobol_plot','style'),
Input('PolyObject', 'data'),
Input('sobol_order', 'value'),
Trigger('CU_button', 'n_clicks'),
Input('ndims','data'),
Input('model_select','value'),
State('Sobol_plot', 'figure'),
prevent_initial_call=True
)
def Plot_Sobol(mypoly, order, ndims, model,fig):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'CU_button' in changed_id:
layout = {'margin': {'t': 0, 'r': 0, 'l': 0, 'b': 0},
'paper_bgcolor': 'white', 'plot_bgcolor': 'white', 'autosize': True}
fig=go.Figure(layout=layout)
fig.update_xaxes(color='black', linecolor='black', showline=True, tickcolor='black', ticks='outside')
fig.update_yaxes(color='black', linecolor='black', showline=True, tickcolor='black', ticks='outside')
if ndims == 1:
disabled = True
else:
disabled = False
if mypoly is not None:
sobol_indices=mypoly.get_sobol_indices(order=order)
layout = {'margin': {'t': 0, 'r': 0, 'l': 0, 'b': 0},
'paper_bgcolor': 'white', 'plot_bgcolor': 'white', 'autosize': True}
fig=go.Figure(layout=layout)
if order==1:
fig.update_yaxes(title=r'$S_{i}$')
labels = [r'$S_%d$' % i for i in range(1,(ndims)+1)]
to_plot = [sobol_indices[(i,)] for i in range((ndims))]
elif order==2:
fig.update_yaxes(title=r'$S_{ij}$')
labels = [r'$S_{%d%d}$' % (i, j) for i in range(1,int(ndims)+1) for j in range(i + 1, int(ndims)+1)]
to_plot = [sobol_indices[(i, j)] for i in range(int(ndims)) for j in range(i + 1, int(ndims))]
elif order==3:
fig.update_yaxes(title=r'$S_{ijk}$')
labels = [r'$S_{%d%d%d}$' % (i, j, k) for i in range(1,int(ndims)+1) for j in range(i + 1, int(ndims)+1) for k in
range(j + 1, int(ndims)+1)]
to_plot = [sobol_indices[(i, j, k)] for i in range(int(ndims)) for j in range(i + 1, int(ndims)) for k in
range(j + 1, int(ndims))]
# fig.update_xaxes(nticks=len(sobol_indices),tickvals=labels,tickangle=45)
data=go.Bar(
x=np.arange(len(sobol_indices)),
y=to_plot,marker_color='LightSkyBlue',marker_line_width=2,marker_line_color='black')
fig = go.Figure(layout=layout,data=data)
fig.update_layout(
xaxis=dict(
tickmode='array',
tickvals=np.arange(len(sobol_indices)),
ticktext=labels
),
# uniformtext_minsize=8, uniformtext_mode='hide',
xaxis_tickangle=-30
)
if model=='analytical':
style={'width': 'inherit', 'height':'35vh'}
else:
style = {'width': 'inherit', 'height': '35vh'}
return fig, disabled, style
else:
raise PreventUpdate
#
# @app.callback(
# Output('sobol_order','options'),
# Input('ndims','data'),
# Trigger('CU_button','n_clicks'),
# State('sobol_order','options'),
# prevent_intial_call=True
# )
# def SobolDisplay(ndims,options):
# option_list=['Order 1','Order 2','Order 3'],
# if ndims==2:
# labels=[o for o in option_list[:1]]
# return labels
# elif ndims>2:
# labels=[o for o in option_list]
# return labels
# else:
# raise PreventUpdate
#
###################################################################
# Plotting Function: Polyfit plot
###################################################################
@app.callback(
Output('plot_poly_3D', 'figure'),
Output('plot_poly_3D','style'),
Output('plot_poly_info','is_open'),
Output('plot_poly_info','children'),
Input('PolyObject', 'data'),
Trigger('CU_button', 'n_clicks'),
Input('ndims','data'),
State('plot_poly_3D', 'figure'),
prevent_initial_call=True
)
def Plot_poly_3D(mypoly, ndims,fig):
hide={'display':'None'}
default={'width':'600px'}
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'CU_button' in changed_id:
if (mypoly is not None):
y_true = mypoly._model_evaluations.ravel()
if ndims==2:
DOE = mypoly.get_points()
N = 20
s1_samples = np.linspace(DOE[0, 0], DOE[-1, 0], N)
s2_samples = np.linspace(DOE[0, 1], DOE[-1, 1], N)
[S1, S2] = np.meshgrid(s1_samples, s2_samples)
S1_vec = np.reshape(S1, (N * N, 1))
S2_vec = np.reshape(S2, (N * N, 1))
samples = np.hstack([S1_vec, S2_vec])
PolyDiscreet = mypoly.get_polyfit(samples)
PolyDiscreet = np.reshape(PolyDiscreet, (N, N))
fig = go.Figure(fig)
fig.data = fig.data[0:2]
fig.plotly_restyle({'x': S1, 'y': S2, 'z': PolyDiscreet}, 0)
fig.plotly_restyle({'x': DOE[:, 0], 'y': DOE[:, 1], 'z': y_true}, 1)
return fig,default,False,None
elif ndims==1:
layout = {"xaxis": {"title": r'$x_1$'}, "yaxis": {"title": r'$f(x_1)$'},
'margin': {'t': 0, 'r': 0, 'l': 0, 'b': 60},
'paper_bgcolor': 'white', 'plot_bgcolor': 'white', 'autosize': True}
DOE = mypoly.get_points()
N = 20
s1_samples = np.linspace(DOE[0, 0], DOE[-1, -1], N)
[S1] = np.meshgrid(s1_samples)
S1_vec = np.reshape(S1, (N , 1))
samples = np.hstack([S1_vec])
PolyDiscreet = mypoly.get_polyfit(samples)
PolyDiscreet = np.reshape(PolyDiscreet, (N))
fig = go.Figure(fig)
fig.update_xaxes(color='black', linecolor='black', showline=True, tickcolor='black', ticks='outside')
fig.update_yaxes(color='black', linecolor='black', showline=True, tickcolor='black', ticks='outside')
fig.update_layout(layout)
fig.plotly_restyle({'x': [[]], 'y': [[]], 'z': [[]]}, 0)
fig.plotly_restyle({'x': [[]], 'y': [[]], 'z': [[]]}, 1)
if len(fig.data) == 4:
fig.plotly_restyle({'x': DOE[:,0], 'y': y_true}, 2)
fig.plotly_restyle({'x': S1 , 'y': PolyDiscreet}, 3)
else:
fig.add_trace(go.Scatter(x=DOE[:,0], y=y_true, mode='markers', name='Training samples',
marker=dict(color='rgb(135,206,250)', size=15, opacity=0.5,
line=dict(color='rgb(0,0,0)', width=1))))
fig.add_trace(go.Scatter(x=S1,y=PolyDiscreet,mode='lines',name='Polynomial approx.',line_color='rgb(178,34,34)'))
return fig,default,False,None
else:
added_text='''
The Polyfit Plot exists for only **1D** and **2D** Polynomials, as we move to higher dimensions,
visualisation of data becomes computationally expensive and hence, we stick to 2D or 3D plots
'''
added_text = dcc.Markdown(convert_latex(added_text), dangerously_allow_html=True,
style={'text-align': 'justify'})
return fig,hide,True,added_text
else:
raise PreventUpdate
else:
raise PreventUpdate
| [
"dash_extensions.enrich.Input",
"numpy.hstack",
"dash_core_components.Input",
"io.BytesIO",
"numpy.column_stack",
"plotly.graph_objs.Scatter",
"dash_core_components.Store",
"numpy.array",
"utils.convert_latex",
"dash_bootstrap_components.Input",
"dash_core_components.send_data_frame",
"dash_bo... | [((927, 1133), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'options': "[{'label': 'Analytical Model', 'value': 'analytical'}, {'label':\n 'Offline Model', 'value': 'offline'}]", 'className': '"""m-1"""', 'id': '"""model_select"""', 'value': '"""analytical"""', 'clearable': '(False)'}), "(options=[{'label': 'Analytical Model', 'value': 'analytical'},\n {'label': 'Offline Model', 'value': 'offline'}], className='m-1', id=\n 'model_select', value='analytical', clearable=False)\n", (939, 1133), True, 'import dash_core_components as dcc\n'), ((4543, 4613), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""plot_pdf"""', 'style': "{'height': '50vh', 'width': 'inherit'}"}), "(id='plot_pdf', style={'height': '50vh', 'width': 'inherit'})\n", (4552, 4613), True, 'import dash_core_components as dcc\n'), ((5090, 5504), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'options': "[{'label': 'Total-order', 'value': 'total-order'}, {'label': 'Tensor-grid',\n 'value': 'tensor-grid'}, {'label': 'Sparse-grid', 'value':\n 'sparse-grid'}, {'label': 'Hyperbolic-basis', 'value':\n 'hyperbolic-basis'}, {'label': 'Euclidean-degree', 'value':\n 'euclidean-degree'}]", 'placeholder': '"""Select Basis"""', 'className': '"""m-1"""', 'id': '"""drop_basis"""', 'value': '"""tensor-grid"""', 'clearable': '(False)'}), "(options=[{'label': 'Total-order', 'value': 'total-order'}, {\n 'label': 'Tensor-grid', 'value': 'tensor-grid'}, {'label':\n 'Sparse-grid', 'value': 'sparse-grid'}, {'label': 'Hyperbolic-basis',\n 'value': 'hyperbolic-basis'}, {'label': 'Euclidean-degree', 'value':\n 'euclidean-degree'}], placeholder='Select Basis', className='m-1', id=\n 'drop_basis', value='tensor-grid', clearable=False)\n", (5102, 5504), True, 'import dash_core_components as dcc\n'), ((5575, 5795), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'options': "[{'label': 'Linear', 'value': 'linear'}, {'label': 'Exponential', 'value':\n 'exponential'}]", 'placeholder': '"""Growth Rule"""', 'clearable': '(False)', 'className': '"""m-1"""', 'id': '"""basis_growth_rule"""', 'disabled': '(True)'}), "(options=[{'label': 'Linear', 'value': 'linear'}, {'label':\n 'Exponential', 'value': 'exponential'}], placeholder='Growth Rule',\n clearable=False, className='m-1', id='basis_growth_rule', disabled=True)\n", (5587, 5795), True, 'import dash_core_components as dcc\n'), ((8429, 8453), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'layout': 'layout'}), '(layout=layout)\n', (8438, 8453), True, 'import plotly.graph_objs as go\n'), ((9078, 9351), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'options': "[{'label': 'Least-squares', 'value': 'least-squares'}, {'label':\n 'Numerical-integration', 'value': 'numerical-integration'}]", 'placeholder': '"""Solver method"""', 'clearable': '(False)', 'value': '"""numerical-integration"""', 'className': '"""m-1"""', 'id': '"""solver_method"""'}), "(options=[{'label': 'Least-squares', 'value': 'least-squares'},\n {'label': 'Numerical-integration', 'value': 'numerical-integration'}],\n placeholder='Solver method', clearable=False, value=\n 'numerical-integration', className='m-1', id='solver_method')\n", (9090, 9351), True, 'import dash_core_components as dcc\n'), ((11889, 11961), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""Sobol_plot"""', 'style': "{'width': 'inherit', 'height': '35vh'}"}), "(id='Sobol_plot', style={'width': 'inherit', 'height': '35vh'})\n", (11898, 11961), True, 'import dash_core_components as dcc\n'), ((8695, 8812), 'plotly.graph_objs.Surface', 'go.Surface', ([], {'x': '[]', 'y': '[]', 'z': '[]', 'showscale': '(False)', 'opacity': '(0.5)', 'colorscale': "[[0, 'rgb(178,34,34)'], [1, 'rgb(0,0,0)']]"}), "(x=[], y=[], z=[], showscale=False, opacity=0.5, colorscale=[[0,\n 'rgb(178,34,34)'], [1, 'rgb(0,0,0)']])\n", (8705, 8812), True, 'import plotly.graph_objs as go\n'), ((15250, 15281), 'dash_extensions.enrich.Output', 'Output', (['"""AP_button"""', '"""disabled"""'], {}), "('AP_button', 'disabled')\n", (15256, 15281), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((15288, 15318), 'dash_extensions.enrich.Input', 'Input', (['"""AP_button"""', '"""n_clicks"""'], {}), "('AP_button', 'n_clicks')\n", (15293, 15318), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((15325, 15358), 'dash_extensions.enrich.Input', 'Input', (['"""basis_button"""', '"""n_clicks"""'], {}), "('basis_button', 'n_clicks')\n", (15330, 15358), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((15494, 15523), 'dash_extensions.enrich.Output', 'Output', (['"""input_func"""', '"""style"""'], {}), "('input_func', 'style')\n", (15500, 15523), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((15529, 15559), 'dash_extensions.enrich.Output', 'Output', (['"""upload_data"""', '"""style"""'], {}), "('upload_data', 'style')\n", (15535, 15559), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((15565, 15595), 'dash_extensions.enrich.Output', 'Output', (['"""dataset-div"""', '"""style"""'], {}), "('dataset-div', 'style')\n", (15571, 15595), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((15601, 15631), 'dash_extensions.enrich.Input', 'Input', (['"""model_select"""', '"""value"""'], {}), "('model_select', 'value')\n", (15606, 15631), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((16546, 16952), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'options': "[{'label': 'Uniform', 'value': 'uniform'}, {'label': 'Gaussian', 'value':\n 'gaussian'}, {'label': 'Truncated Gaussian', 'value':\n 'truncated-gaussian'}, {'label': 'LogNormal', 'value': 'lognormal'}, {\n 'label': 'Beta', 'value': 'beta'}]", 'placeholder': '"""Select a distribution"""', 'value': '"""uniform"""', 'clearable': '(False)', 'className': '"""m-1"""', 'id': "{'type': 'drop-1', 'index': n_clicks}"}), "(options=[{'label': 'Uniform', 'value': 'uniform'}, {'label':\n 'Gaussian', 'value': 'gaussian'}, {'label': 'Truncated Gaussian',\n 'value': 'truncated-gaussian'}, {'label': 'LogNormal', 'value':\n 'lognormal'}, {'label': 'Beta', 'value': 'beta'}], placeholder=\n 'Select a distribution', value='uniform', clearable=False, className=\n 'm-1', id={'type': 'drop-1', 'index': n_clicks})\n", (16558, 16952), True, 'import dash_core_components as dcc\n'), ((16338, 16369), 'dash_extensions.enrich.Output', 'Output', (['"""param_add"""', '"""children"""'], {}), "('param_add', 'children')\n", (16344, 16369), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((16376, 16409), 'dash_extensions.enrich.Output', 'Output', (['"""param_added"""', '"""children"""'], {}), "('param_added', 'children')\n", (16382, 16409), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((20177, 20208), 'dash_extensions.enrich.Output', 'Output', (['"""main_text"""', '"""children"""'], {}), "('main_text', 'children')\n", (20183, 20208), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((20214, 20245), 'dash_extensions.enrich.Output', 'Output', (['"""info_text"""', '"""children"""'], {}), "('info_text', 'children')\n", (20220, 20245), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((20251, 20281), 'dash_extensions.enrich.Input', 'Input', (['"""model_select"""', '"""value"""'], {}), "('model_select', 'value')\n", (20256, 20281), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((21034, 21068), 'dash_extensions.enrich.Output', 'Output', (['"""basis_button"""', '"""disabled"""'], {}), "('basis_button', 'disabled')\n", (21040, 21068), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((21350, 21387), 'dash_extensions.enrich.Output', 'Output', (['"""download_button"""', '"""disabled"""'], {}), "('download_button', 'disabled')\n", (21356, 21387), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((21393, 21421), 'dash_extensions.enrich.Input', 'Input', (['"""BasisObject"""', '"""data"""'], {}), "('BasisObject', 'data')\n", (21398, 21421), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((21555, 21590), 'dash_extensions.enrich.Output', 'Output', (['"""download_DOE_data"""', '"""data"""'], {}), "('download_DOE_data', 'data')\n", (21561, 21590), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((21596, 21630), 'dash_extensions.enrich.Output', 'Output', (['"""download_button"""', '"""style"""'], {}), "('download_button', 'style')\n", (21602, 21630), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((21636, 21672), 'dash_extensions.enrich.Input', 'Input', (['"""download_button"""', '"""n_clicks"""'], {}), "('download_button', 'n_clicks')\n", (21641, 21672), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((21678, 21708), 'dash_extensions.enrich.Input', 'Input', (['"""model_select"""', '"""value"""'], {}), "('model_select', 'value')\n", (21683, 21708), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((21714, 21743), 'dash_extensions.enrich.Input', 'Input', (['"""ParamsObject"""', '"""data"""'], {}), "('ParamsObject', 'data')\n", (21719, 21743), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((21750, 21778), 'dash_extensions.enrich.Input', 'Input', (['"""BasisObject"""', '"""data"""'], {}), "('BasisObject', 'data')\n", (21755, 21778), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((21785, 21816), 'dash_extensions.enrich.Input', 'Input', (['"""solver_method"""', '"""value"""'], {}), "('solver_method', 'value')\n", (21790, 21816), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((23228, 23266), 'dash_extensions.enrich.ServersideOutput', 'ServersideOutput', (['"""UploadedDF"""', '"""data"""'], {}), "('UploadedDF', 'data')\n", (23244, 23266), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((23272, 23309), 'dash_extensions.enrich.Output', 'Output', (['"""filename_append"""', '"""children"""'], {}), "('filename_append', 'children')\n", (23278, 23309), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((23315, 23354), 'dash_extensions.enrich.Output', 'Output', (['"""dataset-info-open"""', '"""disabled"""'], {}), "('dataset-info-open', 'disabled')\n", (23321, 23354), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((23360, 23390), 'dash_extensions.enrich.Input', 'Input', (['"""model_select"""', '"""value"""'], {}), "('model_select', 'value')\n", (23365, 23390), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((23396, 23428), 'dash_extensions.enrich.Input', 'Input', (['"""upload_data"""', '"""filename"""'], {}), "('upload_data', 'filename')\n", (23401, 23428), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((23434, 23466), 'dash_extensions.enrich.Input', 'Input', (['"""upload_data"""', '"""contents"""'], {}), "('upload_data', 'contents')\n", (23439, 23466), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((23472, 23492), 'dash_extensions.enrich.Input', 'Input', (['"""DOE"""', '"""data"""'], {}), "('DOE', 'data')\n", (23477, 23492), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((24022, 24055), 'dash_extensions.enrich.Output', 'Output', (['"""dataset-info"""', '"""is_open"""'], {}), "('dataset-info', 'is_open')\n", (24028, 24055), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((24314, 24352), 'dash_extensions.enrich.Output', 'Output', (['"""dataset_filename"""', '"""children"""'], {}), "('dataset_filename', 'children')\n", (24320, 24352), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((24358, 24393), 'dash_extensions.enrich.Output', 'Output', (['"""upload_data_table"""', '"""data"""'], {}), "('upload_data_table', 'data')\n", (24364, 24393), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((24399, 24437), 'dash_extensions.enrich.Output', 'Output', (['"""upload_data_table"""', '"""columns"""'], {}), "('upload_data_table', 'columns')\n", (24405, 24437), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((24443, 24479), 'dash_extensions.enrich.Input', 'Input', (['"""filename_append"""', '"""children"""'], {}), "('filename_append', 'children')\n", (24448, 24479), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((24485, 24517), 'dash_extensions.enrich.Input', 'Input', (['"""dataset-info"""', '"""is_open"""'], {}), "('dataset-info', 'is_open')\n", (24490, 24517), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((24524, 24551), 'dash_extensions.enrich.Input', 'Input', (['"""UploadedDF"""', '"""data"""'], {}), "('UploadedDF', 'data')\n", (24529, 24551), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((24557, 24577), 'dash_extensions.enrich.Input', 'Input', (['"""DOE"""', '"""data"""'], {}), "('DOE', 'data')\n", (24562, 24577), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((25778, 25809), 'dash_extensions.enrich.Output', 'Output', (['"""CU_button"""', '"""disabled"""'], {}), "('CU_button', 'disabled')\n", (25784, 25809), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((26968, 27043), 'dash_extensions.enrich.Output', 'Output', (["{'type': 'params', 'index': dash.dependencies.MATCH}", '"""placeholder"""'], {}), "({'type': 'params', 'index': dash.dependencies.MATCH}, 'placeholder')\n", (26974, 27043), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((27050, 27127), 'dash_extensions.enrich.Output', 'Output', (["{'type': 'params_2', 'index': dash.dependencies.MATCH}", '"""placeholder"""'], {}), "({'type': 'params_2', 'index': dash.dependencies.MATCH}, 'placeholder')\n", (27056, 27127), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((27134, 27210), 'dash_extensions.enrich.Output', 'Output', (["{'type': 'min_val', 'index': dash.dependencies.MATCH}", '"""placeholder"""'], {}), "({'type': 'min_val', 'index': dash.dependencies.MATCH}, 'placeholder')\n", (27140, 27210), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((27217, 27293), 'dash_extensions.enrich.Output', 'Output', (["{'type': 'max_val', 'index': dash.dependencies.MATCH}", '"""placeholder"""'], {}), "({'type': 'max_val', 'index': dash.dependencies.MATCH}, 'placeholder')\n", (27223, 27293), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((27300, 27372), 'dash_extensions.enrich.Output', 'Output', (["{'type': 'params', 'index': dash.dependencies.MATCH}", '"""disabled"""'], {}), "({'type': 'params', 'index': dash.dependencies.MATCH}, 'disabled')\n", (27306, 27372), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((27379, 27453), 'dash_extensions.enrich.Output', 'Output', (["{'type': 'params_2', 'index': dash.dependencies.MATCH}", '"""disabled"""'], {}), "({'type': 'params_2', 'index': dash.dependencies.MATCH}, 'disabled')\n", (27385, 27453), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((27460, 27533), 'dash_extensions.enrich.Output', 'Output', (["{'type': 'min_val', 'index': dash.dependencies.MATCH}", '"""disabled"""'], {}), "({'type': 'min_val', 'index': dash.dependencies.MATCH}, 'disabled')\n", (27466, 27533), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((27540, 27613), 'dash_extensions.enrich.Output', 'Output', (["{'type': 'max_val', 'index': dash.dependencies.MATCH}", '"""disabled"""'], {}), "({'type': 'max_val', 'index': dash.dependencies.MATCH}, 'disabled')\n", (27546, 27613), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((29032, 29072), 'dash_extensions.enrich.ServersideOutput', 'ServersideOutput', (['"""ParamsObject"""', '"""data"""'], {}), "('ParamsObject', 'data')\n", (29048, 29072), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((29079, 29102), 'dash_extensions.enrich.Output', 'Output', (['"""ndims"""', '"""data"""'], {}), "('ndims', 'data')\n", (29085, 29102), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((32365, 32517), 'equadratures.Parameter', 'eq.Parameter', ([], {'distribution': 'distribution', 'shape_parameter_A': 'shape_parameter_A', 'shape_parameter_B': 'shape_parameter_B', 'lower': 'min', 'upper': 'max', 'order': 'order'}), '(distribution=distribution, shape_parameter_A=shape_parameter_A,\n shape_parameter_B=shape_parameter_B, lower=min, upper=max, order=order)\n', (32377, 32517), True, 'import equadratures as eq\n'), ((32856, 32886), 'dash_extensions.enrich.Output', 'Output', (['"""data-info"""', '"""is_open"""'], {}), "('data-info', 'is_open')\n", (32862, 32886), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((34205, 34229), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'layout': 'layout'}), '(layout=layout)\n', (34214, 34229), True, 'import plotly.graph_objs as go\n'), ((33298, 33326), 'dash_extensions.enrich.Output', 'Output', (['"""plot_pdf"""', '"""figure"""'], {}), "('plot_pdf', 'figure')\n", (33304, 33326), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((33333, 33402), 'dash_extensions.enrich.Input', 'Input', (["{'type': 'radio_pdf', 'index': dash.dependencies.ALL}", '"""value"""'], {}), "({'type': 'radio_pdf', 'index': dash.dependencies.ALL}, 'value')\n", (33338, 33402), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((35716, 35786), 'dash_extensions.enrich.Output', 'Output', (["{'type': 'radio_pdf', 'index': dash.dependencies.ALL}", '"""value"""'], {}), "({'type': 'radio_pdf', 'index': dash.dependencies.ALL}, 'value')\n", (35722, 35786), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((35793, 35862), 'dash_extensions.enrich.Input', 'Input', (["{'type': 'radio_pdf', 'index': dash.dependencies.ALL}", '"""value"""'], {}), "({'type': 'radio_pdf', 'index': dash.dependencies.ALL}, 'value')\n", (35798, 35862), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((36650, 36677), 'dash_extensions.enrich.Output', 'Output', (['"""q_val"""', '"""disabled"""'], {}), "('q_val', 'disabled')\n", (36656, 36677), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((36684, 36712), 'dash_extensions.enrich.Output', 'Output', (['"""levels"""', '"""disabled"""'], {}), "('levels', 'disabled')\n", (36690, 36712), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((36719, 36758), 'dash_extensions.enrich.Output', 'Output', (['"""basis_growth_rule"""', '"""disabled"""'], {}), "('basis_growth_rule', 'disabled')\n", (36725, 36758), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((37429, 37487), 'equadratures.Poly', 'eq.Poly', ([], {'parameters': 'parameters', 'basis': 'basis', 'method': 'method'}), '(parameters=parameters, basis=basis, method=method)\n', (37436, 37487), True, 'import equadratures as eq\n'), ((37746, 37778), 'dash_extensions.enrich.Output', 'Output', (['"""solver_method"""', '"""value"""'], {}), "('solver_method', 'value')\n", (37752, 37778), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((37785, 37813), 'dash_extensions.enrich.Input', 'Input', (['"""drop_basis"""', '"""value"""'], {}), "('drop_basis', 'value')\n", (37790, 37813), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((38188, 38213), 'dash_extensions.enrich.Output', 'Output', (['"""op_box"""', '"""value"""'], {}), "('op_box', 'value')\n", (38194, 38213), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((38220, 38259), 'dash_extensions.enrich.ServersideOutput', 'ServersideOutput', (['"""BasisObject"""', '"""data"""'], {}), "('BasisObject', 'data')\n", (38236, 38259), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((38266, 38302), 'dash_extensions.enrich.Output', 'Output', (['"""compute-warning"""', '"""is_open"""'], {}), "('compute-warning', 'is_open')\n", (38272, 38302), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((38308, 38345), 'dash_extensions.enrich.Output', 'Output', (['"""compute-warning"""', '"""children"""'], {}), "('compute-warning', 'children')\n", (38314, 38345), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((38351, 38380), 'dash_extensions.enrich.Input', 'Input', (['"""ParamsObject"""', '"""data"""'], {}), "('ParamsObject', 'data')\n", (38356, 38380), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((38387, 38420), 'dash_extensions.enrich.Input', 'Input', (['"""basis_button"""', '"""n_clicks"""'], {}), "('basis_button', 'n_clicks')\n", (38392, 38420), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((38426, 38454), 'dash_extensions.enrich.State', 'State', (['"""drop_basis"""', '"""value"""'], {}), "('drop_basis', 'value')\n", (38431, 38454), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((38461, 38484), 'dash_extensions.enrich.State', 'State', (['"""q_val"""', '"""value"""'], {}), "('q_val', 'value')\n", (38466, 38484), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((38491, 38515), 'dash_extensions.enrich.State', 'State', (['"""levels"""', '"""value"""'], {}), "('levels', 'value')\n", (38496, 38515), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((38522, 38557), 'dash_extensions.enrich.State', 'State', (['"""basis_growth_rule"""', '"""value"""'], {}), "('basis_growth_rule', 'value')\n", (38527, 38557), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((39814, 39844), 'dash_extensions.enrich.Output', 'Output', (['"""plot_basis"""', '"""figure"""'], {}), "('plot_basis', 'figure')\n", (39820, 39844), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((39851, 39882), 'dash_extensions.enrich.ServersideOutput', 'ServersideOutput', (['"""DOE"""', '"""data"""'], {}), "('DOE', 'data')\n", (39867, 39882), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((39888, 39917), 'dash_extensions.enrich.Input', 'Input', (['"""ParamsObject"""', '"""data"""'], {}), "('ParamsObject', 'data')\n", (39893, 39917), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((39924, 39952), 'dash_extensions.enrich.Input', 'Input', (['"""BasisObject"""', '"""data"""'], {}), "('BasisObject', 'data')\n", (39929, 39952), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((39959, 39990), 'dash_extensions.enrich.Input', 'Input', (['"""solver_method"""', '"""value"""'], {}), "('solver_method', 'value')\n", (39964, 39990), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((39997, 40019), 'dash_extensions.enrich.Input', 'Input', (['"""ndims"""', '"""data"""'], {}), "('ndims', 'data')\n", (40002, 40019), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((43509, 43547), 'dash_extensions.enrich.ServersideOutput', 'ServersideOutput', (['"""PolyObject"""', '"""data"""'], {}), "('PolyObject', 'data')\n", (43525, 43547), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((43554, 43577), 'dash_extensions.enrich.Output', 'Output', (['"""mean"""', '"""value"""'], {}), "('mean', 'value')\n", (43560, 43577), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((43584, 43611), 'dash_extensions.enrich.Output', 'Output', (['"""variance"""', '"""value"""'], {}), "('variance', 'value')\n", (43590, 43611), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((43618, 43645), 'dash_extensions.enrich.Output', 'Output', (['"""r2_score"""', '"""value"""'], {}), "('r2_score', 'value')\n", (43624, 43645), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((43652, 43686), 'dash_extensions.enrich.Output', 'Output', (['"""input-warning"""', '"""is_open"""'], {}), "('input-warning', 'is_open')\n", (43658, 43686), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((43692, 43727), 'dash_extensions.enrich.Output', 'Output', (['"""input-warning"""', '"""children"""'], {}), "('input-warning', 'children')\n", (43698, 43727), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((43733, 43766), 'dash_extensions.enrich.Output', 'Output', (['"""poly-warning"""', '"""is_open"""'], {}), "('poly-warning', 'is_open')\n", (43739, 43766), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((43772, 43806), 'dash_extensions.enrich.Output', 'Output', (['"""poly-warning"""', '"""children"""'], {}), "('poly-warning', 'children')\n", (43778, 43806), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((43812, 43844), 'dash_extensions.enrich.Trigger', 'Trigger', (['"""CU_button"""', '"""n_clicks"""'], {}), "('CU_button', 'n_clicks')\n", (43819, 43844), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((43851, 43880), 'dash_extensions.enrich.Input', 'Input', (['"""ParamsObject"""', '"""data"""'], {}), "('ParamsObject', 'data')\n", (43856, 43880), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((43887, 43915), 'dash_extensions.enrich.Input', 'Input', (['"""BasisObject"""', '"""data"""'], {}), "('BasisObject', 'data')\n", (43892, 43915), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((43922, 43953), 'dash_extensions.enrich.Input', 'Input', (['"""solver_method"""', '"""value"""'], {}), "('solver_method', 'value')\n", (43927, 43953), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((43960, 43990), 'dash_extensions.enrich.Input', 'Input', (['"""model_select"""', '"""value"""'], {}), "('model_select', 'value')\n", (43965, 43990), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((43996, 44023), 'dash_extensions.enrich.Input', 'Input', (['"""UploadedDF"""', '"""data"""'], {}), "('UploadedDF', 'data')\n", (44001, 44023), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((44029, 44057), 'dash_extensions.enrich.State', 'State', (['"""input_func"""', '"""value"""'], {}), "('input_func', 'value')\n", (44034, 44057), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((44064, 44086), 'dash_extensions.enrich.State', 'State', (['"""ndims"""', '"""data"""'], {}), "('ndims', 'data')\n", (44069, 44086), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((46502, 46534), 'dash_extensions.enrich.Output', 'Output', (['"""sobol_order"""', '"""options"""'], {}), "('sobol_order', 'options')\n", (46508, 46534), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((46540, 46570), 'dash_extensions.enrich.Input', 'Input', (['"""CU_button"""', '"""n_clicks"""'], {}), "('CU_button', 'n_clicks')\n", (46545, 46570), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((46576, 46598), 'dash_extensions.enrich.State', 'State', (['"""ndims"""', '"""data"""'], {}), "('ndims', 'data')\n", (46581, 46598), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((46604, 46635), 'dash_extensions.enrich.State', 'State', (['"""sobol_order"""', '"""options"""'], {}), "('sobol_order', 'options')\n", (46609, 46635), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((47335, 47365), 'dash_extensions.enrich.Output', 'Output', (['"""Sobol_plot"""', '"""figure"""'], {}), "('Sobol_plot', 'figure')\n", (47341, 47365), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((47372, 47405), 'dash_extensions.enrich.Output', 'Output', (['"""sobol_order"""', '"""disabled"""'], {}), "('sobol_order', 'disabled')\n", (47378, 47405), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((47411, 47440), 'dash_extensions.enrich.Output', 'Output', (['"""Sobol_plot"""', '"""style"""'], {}), "('Sobol_plot', 'style')\n", (47417, 47440), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((47446, 47473), 'dash_extensions.enrich.Input', 'Input', (['"""PolyObject"""', '"""data"""'], {}), "('PolyObject', 'data')\n", (47451, 47473), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((47480, 47509), 'dash_extensions.enrich.Input', 'Input', (['"""sobol_order"""', '"""value"""'], {}), "('sobol_order', 'value')\n", (47485, 47509), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((47516, 47548), 'dash_extensions.enrich.Trigger', 'Trigger', (['"""CU_button"""', '"""n_clicks"""'], {}), "('CU_button', 'n_clicks')\n", (47523, 47548), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((47555, 47577), 'dash_extensions.enrich.Input', 'Input', (['"""ndims"""', '"""data"""'], {}), "('ndims', 'data')\n", (47560, 47577), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((47583, 47613), 'dash_extensions.enrich.Input', 'Input', (['"""model_select"""', '"""value"""'], {}), "('model_select', 'value')\n", (47588, 47613), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((47619, 47648), 'dash_extensions.enrich.State', 'State', (['"""Sobol_plot"""', '"""figure"""'], {}), "('Sobol_plot', 'figure')\n", (47624, 47648), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((51420, 51452), 'dash_extensions.enrich.Output', 'Output', (['"""plot_poly_3D"""', '"""figure"""'], {}), "('plot_poly_3D', 'figure')\n", (51426, 51452), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((51459, 51490), 'dash_extensions.enrich.Output', 'Output', (['"""plot_poly_3D"""', '"""style"""'], {}), "('plot_poly_3D', 'style')\n", (51465, 51490), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((51496, 51531), 'dash_extensions.enrich.Output', 'Output', (['"""plot_poly_info"""', '"""is_open"""'], {}), "('plot_poly_info', 'is_open')\n", (51502, 51531), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((51537, 51573), 'dash_extensions.enrich.Output', 'Output', (['"""plot_poly_info"""', '"""children"""'], {}), "('plot_poly_info', 'children')\n", (51543, 51573), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((51579, 51606), 'dash_extensions.enrich.Input', 'Input', (['"""PolyObject"""', '"""data"""'], {}), "('PolyObject', 'data')\n", (51584, 51606), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((51613, 51645), 'dash_extensions.enrich.Trigger', 'Trigger', (['"""CU_button"""', '"""n_clicks"""'], {}), "('CU_button', 'n_clicks')\n", (51620, 51645), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((51652, 51674), 'dash_extensions.enrich.Input', 'Input', (['"""ndims"""', '"""data"""'], {}), "('ndims', 'data')\n", (51657, 51674), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((51680, 51711), 'dash_extensions.enrich.State', 'State', (['"""plot_poly_3D"""', '"""figure"""'], {}), "('plot_poly_3D', 'figure')\n", (51685, 51711), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((2460, 2550), 'dash_bootstrap_components.Button', 'dbc.Button', (['"""More Information"""'], {'color': '"""primary"""', 'id': '"""data-info-open"""', 'className': '"""py-0"""'}), "('More Information', color='primary', id='data-info-open',\n className='py-0')\n", (2470, 2550), True, 'import dash_bootstrap_components as dbc\n'), ((5871, 5967), 'dash_bootstrap_components.Button', 'dbc.Button', (['"""Download DOE"""'], {'id': '"""download_button"""', 'style': "{'display': 'None'}", 'color': '"""primary"""'}), "('Download DOE', id='download_button', style={'display': 'None'},\n color='primary')\n", (5881, 5967), True, 'import dash_bootstrap_components as dbc\n'), ((5966, 6002), 'dash_core_components.Download', 'dcc.Download', ([], {'id': '"""download_DOE_data"""'}), "(id='download_DOE_data')\n", (5978, 6002), True, 'import dash_core_components as dcc\n'), ((9679, 9783), 'dash_bootstrap_components.Button', 'dbc.Button', (['"""View Dataset"""'], {'color': '"""primary"""', 'id': '"""dataset-info-open"""', 'className': '"""py-0"""', 'disabled': '(True)'}), "('View Dataset', color='primary', id='dataset-info-open',\n className='py-0', disabled=True)\n", (9689, 9783), True, 'import dash_bootstrap_components as dbc\n'), ((10404, 10438), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Mean"""'], {'html_for': '"""mean"""'}), "('Mean', html_for='mean')\n", (10413, 10438), True, 'import dash_bootstrap_components as dbc\n'), ((10712, 10754), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Variance"""'], {'html_for': '"""variance"""'}), "('Variance', html_for='variance')\n", (10721, 10754), True, 'import dash_bootstrap_components as dbc\n'), ((11036, 11078), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""R2 score"""'], {'html_for': '"""r2_score"""'}), "('R2 score', html_for='r2_score')\n", (11045, 11078), True, 'import dash_bootstrap_components as dbc\n'), ((11362, 11417), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Senstivity Indices"""'], {'html_for': '"""sobol_order"""'}), "('Senstivity Indices', html_for='sobol_order')\n", (11371, 11417), True, 'import dash_bootstrap_components as dbc\n'), ((12613, 12641), 'dash_bootstrap_components.Col', 'dbc.Col', (['sobol_form'], {'width': '(6)'}), '(sobol_form, width=6)\n', (12620, 12641), True, 'import dash_bootstrap_components as dbc\n'), ((12656, 12684), 'dash_bootstrap_components.Col', 'dbc.Col', (['sobol_plot'], {'width': '(8)'}), '(sobol_plot, width=8)\n', (12663, 12684), True, 'import dash_bootstrap_components as dbc\n'), ((12733, 12829), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""plot_poly_3D"""', 'style': "{'width': 'inherit', 'height': '60vh'}", 'figure': 'polyfig3D'}), "(id='plot_poly_3D', style={'width': 'inherit', 'height': '60vh'},\n figure=polyfig3D)\n", (12742, 12829), True, 'import dash_core_components as dcc\n'), ((12834, 12896), 'dash_bootstrap_components.Alert', 'dbc.Alert', ([], {'id': '"""plot_poly_info"""', 'color': '"""primary"""', 'is_open': '(False)'}), "(id='plot_poly_info', color='primary', is_open=False)\n", (12843, 12896), True, 'import dash_bootstrap_components as dbc\n'), ((13392, 13450), 'dash_bootstrap_components.Tooltip', 'dbc.Tooltip', (['"""Maximum of 5 parameters"""'], {'target': '"""AP_button"""'}), "('Maximum of 5 parameters', target='AP_button')\n", (13403, 13450), True, 'import dash_bootstrap_components as dbc\n'), ((13460, 13545), 'dash_bootstrap_components.Tooltip', 'dbc.Tooltip', (['"""The variables should be of the form x1,x2..."""'], {'target': '"""input_func"""'}), "('The variables should be of the form x1,x2...', target='input_func'\n )\n", (13471, 13545), True, 'import dash_bootstrap_components as dbc\n'), ((13914, 13990), 'dash_html_components.H2', 'html.H2', (['"""Uncertainty quantification of an analytical model"""'], {'id': '"""main_text"""'}), "('Uncertainty quantification of an analytical model', id='main_text')\n", (13921, 13990), True, 'import dash_html_components as html\n'), ((14739, 14760), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""ndims"""'}), "(id='ndims')\n", (14748, 14760), True, 'import dash_core_components as dcc\n'), ((14824, 14852), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""ParamsObject"""'}), "(id='ParamsObject')\n", (14833, 14852), True, 'import dash_core_components as dcc\n'), ((14863, 14889), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""PolyObject"""'}), "(id='PolyObject')\n", (14872, 14889), True, 'import dash_core_components as dcc\n'), ((14900, 14927), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""BasisObject"""'}), "(id='BasisObject')\n", (14909, 14927), True, 'import dash_core_components as dcc\n'), ((14938, 14957), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""DOE"""'}), "(id='DOE')\n", (14947, 14957), True, 'import dash_core_components as dcc\n'), ((14968, 14994), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""UploadedDF"""'}), "(id='UploadedDF')\n", (14977, 14994), True, 'import dash_core_components as dcc\n'), ((20079, 20088), 'dash_bootstrap_components.Row', 'dbc.Row', ([], {}), '()\n', (20086, 20088), True, 'import dash_bootstrap_components as dbc\n'), ((16416, 16446), 'dash_extensions.enrich.Input', 'Input', (['"""AP_button"""', '"""n_clicks"""'], {}), "('AP_button', 'n_clicks')\n", (16421, 16446), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((16454, 16484), 'dash_extensions.enrich.State', 'State', (['"""param_add"""', '"""children"""'], {}), "('param_add', 'children')\n", (16459, 16484), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((21081, 21111), 'dash_extensions.enrich.Input', 'Input', (['"""AP_button"""', '"""n_clicks"""'], {}), "('AP_button', 'n_clicks')\n", (21086, 21111), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((24063, 24101), 'dash_extensions.enrich.Input', 'Input', (['"""dataset-info-open"""', '"""n_clicks"""'], {}), "('dataset-info-open', 'n_clicks')\n", (24068, 24101), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((24103, 24142), 'dash_extensions.enrich.Input', 'Input', (['"""dataset-info-close"""', '"""n_clicks"""'], {}), "('dataset-info-close', 'n_clicks')\n", (24108, 24142), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((24151, 24183), 'dash_extensions.enrich.State', 'State', (['"""dataset-info"""', '"""is_open"""'], {}), "('dataset-info', 'is_open')\n", (24156, 24183), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((25826, 25859), 'dash_extensions.enrich.Input', 'Input', (['"""basis_button"""', '"""n_clicks"""'], {}), "('basis_button', 'n_clicks')\n", (25831, 25859), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((25869, 25897), 'dash_extensions.enrich.Input', 'Input', (['"""input_func"""', '"""value"""'], {}), "('input_func', 'value')\n", (25874, 25897), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((25907, 25937), 'dash_extensions.enrich.Input', 'Input', (['"""AP_button"""', '"""n_clicks"""'], {}), "('AP_button', 'n_clicks')\n", (25912, 25937), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((25947, 25977), 'dash_extensions.enrich.Input', 'Input', (['"""model_select"""', '"""value"""'], {}), "('model_select', 'value')\n", (25952, 25977), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((25987, 26014), 'dash_extensions.enrich.Input', 'Input', (['"""UploadedDF"""', '"""data"""'], {}), "('UploadedDF', 'data')\n", (25992, 26014), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((27621, 27689), 'dash_extensions.enrich.Input', 'Input', (["{'type': 'drop-1', 'index': dash.dependencies.MATCH}", '"""value"""'], {}), "({'type': 'drop-1', 'index': dash.dependencies.MATCH}, 'value')\n", (27626, 27689), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((29119, 29185), 'dash_extensions.enrich.Input', 'Input', (["{'type': 'params', 'index': dash.dependencies.ALL}", '"""value"""'], {}), "({'type': 'params', 'index': dash.dependencies.ALL}, 'value')\n", (29124, 29185), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((29196, 29264), 'dash_extensions.enrich.Input', 'Input', (["{'type': 'params_2', 'index': dash.dependencies.ALL}", '"""value"""'], {}), "({'type': 'params_2', 'index': dash.dependencies.ALL}, 'value')\n", (29201, 29264), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((29275, 29341), 'dash_extensions.enrich.Input', 'Input', (["{'type': 'drop-1', 'index': dash.dependencies.ALL}", '"""value"""'], {}), "({'type': 'drop-1', 'index': dash.dependencies.ALL}, 'value')\n", (29280, 29341), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((29352, 29419), 'dash_extensions.enrich.Input', 'Input', (["{'type': 'max_val', 'index': dash.dependencies.ALL}", '"""value"""'], {}), "({'type': 'max_val', 'index': dash.dependencies.ALL}, 'value')\n", (29357, 29419), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((29430, 29497), 'dash_extensions.enrich.Input', 'Input', (["{'type': 'min_val', 'index': dash.dependencies.ALL}", '"""value"""'], {}), "({'type': 'min_val', 'index': dash.dependencies.ALL}, 'value')\n", (29435, 29497), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((29508, 29573), 'dash_extensions.enrich.Input', 'Input', (["{'type': 'order', 'index': dash.dependencies.ALL}", '"""value"""'], {}), "({'type': 'order', 'index': dash.dependencies.ALL}, 'value')\n", (29513, 29573), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((29584, 29617), 'dash_extensions.enrich.Input', 'Input', (['"""basis_button"""', '"""n_clicks"""'], {}), "('basis_button', 'n_clicks')\n", (29589, 29617), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((32894, 32929), 'dash_extensions.enrich.Input', 'Input', (['"""data-info-open"""', '"""n_clicks"""'], {}), "('data-info-open', 'n_clicks')\n", (32899, 32929), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((32931, 32967), 'dash_extensions.enrich.Input', 'Input', (['"""data-info-close"""', '"""n_clicks"""'], {}), "('data-info-close', 'n_clicks')\n", (32936, 32967), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((32976, 33005), 'dash_extensions.enrich.State', 'State', (['"""data-info"""', '"""is_open"""'], {}), "('data-info', 'is_open')\n", (32981, 33005), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((34542, 34562), 'ast.literal_eval', 'ast.literal_eval', (['id'], {}), '(id)\n', (34558, 34562), False, 'import ast\n'), ((33410, 33476), 'dash_extensions.enrich.State', 'State', (["{'type': 'params', 'index': dash.dependencies.ALL}", '"""value"""'], {}), "({'type': 'params', 'index': dash.dependencies.ALL}, 'value')\n", (33415, 33476), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((33484, 33552), 'dash_extensions.enrich.State', 'State', (["{'type': 'params_2', 'index': dash.dependencies.ALL}", '"""value"""'], {}), "({'type': 'params_2', 'index': dash.dependencies.ALL}, 'value')\n", (33489, 33552), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((33560, 33626), 'dash_extensions.enrich.State', 'State', (["{'type': 'drop-1', 'index': dash.dependencies.ALL}", '"""value"""'], {}), "({'type': 'drop-1', 'index': dash.dependencies.ALL}, 'value')\n", (33565, 33626), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((33634, 33701), 'dash_extensions.enrich.State', 'State', (["{'type': 'max_val', 'index': dash.dependencies.ALL}", '"""value"""'], {}), "({'type': 'max_val', 'index': dash.dependencies.ALL}, 'value')\n", (33639, 33701), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((33709, 33776), 'dash_extensions.enrich.State', 'State', (["{'type': 'min_val', 'index': dash.dependencies.ALL}", '"""value"""'], {}), "({'type': 'min_val', 'index': dash.dependencies.ALL}, 'value')\n", (33714, 33776), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((33784, 33849), 'dash_extensions.enrich.State', 'State', (["{'type': 'order', 'index': dash.dependencies.ALL}", '"""value"""'], {}), "({'type': 'order', 'index': dash.dependencies.ALL}, 'value')\n", (33789, 33849), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((36020, 36040), 'ast.literal_eval', 'ast.literal_eval', (['id'], {}), '(id)\n', (36036, 36040), False, 'import ast\n'), ((36766, 36794), 'dash_extensions.enrich.Input', 'Input', (['"""drop_basis"""', '"""value"""'], {}), "('drop_basis', 'value')\n", (36771, 36794), False, 'from dash_extensions.enrich import Dash, ServersideOutput, Output, Input, State, Trigger\n'), ((48020, 48044), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'layout': 'layout'}), '(layout=layout)\n', (48029, 48044), True, 'import plotly.graph_objs as go\n'), ((3158, 3226), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""**Parameter Definition**"""'], {'style': "{'color': '#000000'}"}), "('**Parameter Definition**', style={'color': '#000000'})\n", (3170, 3226), True, 'import dash_core_components as dcc\n'), ((4668, 4716), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""**Probability Density Function**"""'], {}), "('**Probability Density Function**')\n", (4680, 4716), True, 'import dash_core_components as dcc\n'), ((6060, 6095), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""**Basis Selection**"""'], {}), "('**Basis Selection**')\n", (6072, 6095), True, 'import dash_core_components as dcc\n'), ((11992, 12025), 'dash_bootstrap_components.Col', 'dbc.Col', (['method_dropdown'], {'width': '(6)'}), '(method_dropdown, width=6)\n', (11999, 12025), True, 'import dash_bootstrap_components as dbc\n'), ((12323, 12358), 'dash_bootstrap_components.Col', 'dbc.Col', (['dataset_info'], {'width': '"""auto"""'}), "(dataset_info, width='auto')\n", (12330, 12358), True, 'import dash_bootstrap_components as dbc\n'), ((12498, 12516), 'dash_bootstrap_components.Col', 'dbc.Col', (['mean_form'], {}), '(mean_form)\n', (12505, 12516), True, 'import dash_bootstrap_components as dbc\n'), ((12531, 12548), 'dash_bootstrap_components.Col', 'dbc.Col', (['var_form'], {}), '(var_form)\n', (12538, 12548), True, 'import dash_bootstrap_components as dbc\n'), ((12563, 12579), 'dash_bootstrap_components.Col', 'dbc.Col', (['r2_form'], {}), '(r2_form)\n', (12570, 12579), True, 'import dash_bootstrap_components as dbc\n'), ((13014, 13052), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""**Compute Polynomial**"""'], {}), "('**Compute Polynomial**')\n", (13026, 13052), True, 'import dash_core_components as dcc\n'), ((14373, 14400), 'dash_bootstrap_components.Col', 'dbc.Col', (['TOP_CARD'], {'width': '(12)'}), '(TOP_CARD, width=12)\n', (14380, 14400), True, 'import dash_bootstrap_components as dbc\n'), ((14604, 14635), 'dash_bootstrap_components.Col', 'dbc.Col', (['COMPUTE_CARD'], {'width': '(12)'}), '(COMPUTE_CARD, width=12)\n', (14611, 14635), True, 'import dash_bootstrap_components as dbc\n'), ((17103, 17147), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Distribution"""'], {'html_for': '"""drop-1"""'}), "('Distribution', html_for='drop-1')\n", (17112, 17147), True, 'import dash_bootstrap_components as dbc\n'), ((17253, 17321), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Statistical moments/shape parameters"""'], {'html_for': '"""params"""'}), "('Statistical moments/shape parameters', html_for='params')\n", (17262, 17321), True, 'import dash_bootstrap_components as dbc\n'), ((18087, 18127), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Support"""'], {'html_for': '"""min_val"""'}), "('Support', html_for='min_val')\n", (18096, 18127), True, 'import dash_bootstrap_components as dbc\n'), ((18959, 18977), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Order"""'], {}), "('Order')\n", (18968, 18977), True, 'import dash_bootstrap_components as dbc\n'), ((18992, 19159), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'bs_size': '"""sm"""', 'id': "{'type': 'order', 'index': n_clicks}", 'type': '"""number"""', 'value': 'np.nan', 'min': '(0)', 'placeholder': '"""Order"""', 'debounce': '(True)', 'className': '"""ip_field"""'}), "(bs_size='sm', id={'type': 'order', 'index': n_clicks}, type=\n 'number', value=np.nan, min=0, placeholder='Order', debounce=True,\n className='ip_field')\n", (19001, 19159), True, 'import dash_bootstrap_components as dbc\n'), ((19316, 19337), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Plot PDF"""'], {}), "('Plot PDF')\n", (19325, 19337), True, 'import dash_bootstrap_components as dbc\n'), ((22081, 22098), 'pandas.DataFrame', 'pd.DataFrame', (['DOE'], {}), '(DOE)\n', (22093, 22098), True, 'import pandas as pd\n'), ((22668, 22700), 'base64.b64decode', 'base64.b64decode', (['content_string'], {}), '(content_string)\n', (22684, 22700), False, 'import base64\n'), ((24798, 24824), 'numpy.column_stack', 'np.column_stack', (['(df, DOE)'], {}), '((df, DOE))\n', (24813, 24824), True, 'import numpy as np\n'), ((40642, 40666), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'layout': 'layout'}), '(layout=layout)\n', (40651, 40666), True, 'import plotly.graph_objs as go\n'), ((45179, 45225), 'equadratures.datasets.score', 'eq.datasets.score', (['y_true', 'y_pred'], {'metric': '"""r2"""'}), "(y_true, y_pred, metric='r2')\n", (45196, 45225), True, 'import equadratures as eq\n'), ((46140, 46186), 'equadratures.datasets.score', 'eq.datasets.score', (['y_true', 'y_pred'], {'metric': '"""r2"""'}), "(y_true, y_pred, metric='r2')\n", (46157, 46186), True, 'import equadratures as eq\n'), ((4118, 4127), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (4125, 4127), True, 'import dash_html_components as html\n'), ((10478, 10606), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'bs_size': '"""sm"""', 'id': '"""mean"""', 'type': '"""number"""', 'value': 'np.nan', 'placeholder': '"""Mean..."""', 'className': '"""ip_field"""', 'disabled': '(True)'}), "(bs_size='sm', id='mean', type='number', value=np.nan, placeholder\n ='Mean...', className='ip_field', disabled=True)\n", (10487, 10606), True, 'import dash_bootstrap_components as dbc\n'), ((10794, 10929), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'bs_size': '"""sm"""', 'id': '"""variance"""', 'type': '"""number"""', 'value': 'np.nan', 'placeholder': '"""Variance..,"""', 'className': '"""ip_field"""', 'disabled': '(True)'}), "(bs_size='sm', id='variance', type='number', value=np.nan,\n placeholder='Variance..,', className='ip_field', disabled=True)\n", (10803, 10929), True, 'import dash_bootstrap_components as dbc\n'), ((11118, 11253), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'bs_size': '"""sm"""', 'id': '"""r2_score"""', 'type': '"""number"""', 'value': 'np.nan', 'placeholder': '"""R2 Score..,"""', 'className': '"""ip_field"""', 'disabled': '(True)'}), "(bs_size='sm', id='r2_score', type='number', value=np.nan,\n placeholder='R2 Score..,', className='ip_field', disabled=True)\n", (11127, 11253), True, 'import dash_bootstrap_components as dbc\n'), ((11457, 11688), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'options': "[{'label': 'Order 1', 'value': 1}, {'label': 'Order 2', 'value': 2}, {\n 'label': 'Order 3', 'value': 3}]", 'placeholder': '"""Order 1"""', 'value': '(1)', 'className': '"""m-1"""', 'id': '"""sobol_order"""', 'disabled': '(True)', 'clearable': '(False)'}), "(options=[{'label': 'Order 1', 'value': 1}, {'label': 'Order 2',\n 'value': 2}, {'label': 'Order 3', 'value': 3}], placeholder='Order 1',\n value=1, className='m-1', id='sobol_order', disabled=True, clearable=False)\n", (11469, 11688), True, 'import dash_core_components as dcc\n'), ((12065, 12132), 'dash_bootstrap_components.Spinner', 'dbc.Spinner', (['[Upload_region]'], {'show_initially': '(False)', 'color': '"""primary"""'}), "([Upload_region], show_initially=False, color='primary')\n", (12076, 12132), True, 'import dash_bootstrap_components as dbc\n'), ((12195, 12316), 'dash_bootstrap_components.Button', 'dbc.Button', (['"""Compute Polynomial"""'], {'id': '"""CU_button"""', 'n_clicks': '(0)', 'className': '"""ip_buttons"""', 'color': '"""primary"""', 'disabled': '(True)'}), "('Compute Polynomial', id='CU_button', n_clicks=0, className=\n 'ip_buttons', color='primary', disabled=True)\n", (12205, 12316), True, 'import dash_bootstrap_components as dbc\n'), ((12389, 12448), 'dash_bootstrap_components.Alert', 'dbc.Alert', ([], {'id': '"""poly-warning"""', 'color': '"""danger"""', 'is_open': '(False)'}), "(id='poly-warning', color='danger', is_open=False)\n", (12398, 12448), True, 'import dash_bootstrap_components as dbc\n'), ((13859, 13892), 'dash_bootstrap_components.Col', 'dbc.Col', (['model_selection'], {'width': '(4)'}), '(model_selection, width=4)\n', (13866, 13892), True, 'import dash_bootstrap_components as dbc\n'), ((14254, 14281), 'dash_bootstrap_components.Col', 'dbc.Col', (['info'], {'width': '"""auto"""'}), "(info, width='auto')\n", (14261, 14281), True, 'import dash_bootstrap_components as dbc\n'), ((14484, 14511), 'dash_bootstrap_components.Col', 'dbc.Col', (['PDF_GRAPH'], {'width': '(5)'}), '(PDF_GRAPH, width=5)\n', (14491, 14511), True, 'import dash_bootstrap_components as dbc\n'), ((14530, 14558), 'dash_bootstrap_components.Col', 'dbc.Col', (['BASIS_CARD'], {'width': '(7)'}), '(BASIS_CARD, width=7)\n', (14537, 14558), True, 'import dash_bootstrap_components as dbc\n'), ((19788, 19815), 'dash_bootstrap_components.Col', 'dbc.Col', (['dist_form'], {'width': '(2)'}), '(dist_form, width=2)\n', (19795, 19815), True, 'import dash_bootstrap_components as dbc\n'), ((19834, 19863), 'dash_bootstrap_components.Col', 'dbc.Col', (['params_form'], {'width': '(3)'}), '(params_form, width=3)\n', (19841, 19863), True, 'import dash_bootstrap_components as dbc\n'), ((19882, 19912), 'dash_bootstrap_components.Col', 'dbc.Col', (['min_max_form'], {'width': '(3)'}), '(min_max_form, width=3)\n', (19889, 19912), True, 'import dash_bootstrap_components as dbc\n'), ((19931, 19959), 'dash_bootstrap_components.Col', 'dbc.Col', (['order_form'], {'width': '(1)'}), '(order_form, width=1)\n', (19938, 19959), True, 'import dash_bootstrap_components as dbc\n'), ((19978, 20007), 'dash_bootstrap_components.Col', 'dbc.Col', (['toggle_form'], {'width': '(1)'}), '(toggle_form, width=1)\n', (19985, 20007), True, 'import dash_bootstrap_components as dbc\n'), ((23006, 23038), 'base64.b64decode', 'base64.b64decode', (['content_string'], {}), '(content_string)\n', (23022, 23038), False, 'import base64\n'), ((44618, 44635), 'numexpr.evaluate', 'ne.evaluate', (['expr'], {}), '(expr)\n', (44629, 44635), True, 'import numexpr as ne\n'), ((48668, 48692), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'layout': 'layout'}), '(layout=layout)\n', (48677, 48692), True, 'import plotly.graph_objs as go\n'), ((50037, 50072), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'layout': 'layout', 'data': 'data'}), '(layout=layout, data=data)\n', (50046, 50072), True, 'import plotly.graph_objs as go\n'), ((52170, 52207), 'numpy.linspace', 'np.linspace', (['DOE[0, 0]', 'DOE[-1, 0]', 'N'], {}), '(DOE[0, 0], DOE[-1, 0], N)\n', (52181, 52207), True, 'import numpy as np\n'), ((52238, 52275), 'numpy.linspace', 'np.linspace', (['DOE[0, 1]', 'DOE[-1, 1]', 'N'], {}), '(DOE[0, 1], DOE[-1, 1], N)\n', (52249, 52275), True, 'import numpy as np\n'), ((52304, 52339), 'numpy.meshgrid', 'np.meshgrid', (['s1_samples', 's2_samples'], {}), '(s1_samples, s2_samples)\n', (52315, 52339), True, 'import numpy as np\n'), ((52366, 52392), 'numpy.reshape', 'np.reshape', (['S1', '(N * N, 1)'], {}), '(S1, (N * N, 1))\n', (52376, 52392), True, 'import numpy as np\n'), ((52419, 52445), 'numpy.reshape', 'np.reshape', (['S2', '(N * N, 1)'], {}), '(S2, (N * N, 1))\n', (52429, 52445), True, 'import numpy as np\n'), ((52473, 52500), 'numpy.hstack', 'np.hstack', (['[S1_vec, S2_vec]'], {}), '([S1_vec, S2_vec])\n', (52482, 52500), True, 'import numpy as np\n'), ((52593, 52625), 'numpy.reshape', 'np.reshape', (['PolyDiscreet', '(N, N)'], {}), '(PolyDiscreet, (N, N))\n', (52603, 52625), True, 'import numpy as np\n'), ((52651, 52665), 'plotly.graph_objs.Figure', 'go.Figure', (['fig'], {}), '(fig)\n', (52660, 52665), True, 'import plotly.graph_objs as go\n'), ((2601, 2637), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""**More Information**"""'], {}), "('**More Information**')\n", (2613, 2637), True, 'import dash_core_components as dcc\n'), ((2765, 2841), 'dash_bootstrap_components.Button', 'dbc.Button', (['"""Close"""'], {'id': '"""data-info-close"""', 'className': '"""py-0"""', 'color': '"""primary"""'}), "('Close', id='data-info-close', className='py-0', color='primary')\n", (2775, 2841), True, 'import dash_bootstrap_components as dbc\n'), ((4782, 4809), 'dash_bootstrap_components.Col', 'dbc.Col', (['PDF_PLOT'], {'width': '(12)'}), '(PDF_PLOT, width=12)\n', (4789, 4809), True, 'import dash_bootstrap_components as dbc\n'), ((9833, 9872), 'dash_core_components.Markdown', 'dcc.Markdown', (['""""""'], {'id': '"""dataset_filename"""'}), "('', id='dataset_filename')\n", (9845, 9872), True, 'import dash_core_components as dcc\n'), ((10164, 10243), 'dash_bootstrap_components.Button', 'dbc.Button', (['"""Close"""'], {'id': '"""dataset-info-close"""', 'className': '"""py-0"""', 'color': '"""primary"""'}), "('Close', id='dataset-info-close', className='py-0', color='primary')\n", (10174, 10243), True, 'import dash_bootstrap_components as dbc\n'), ((14049, 14232), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""Define an analytical model, and its uncertain input parameters. Then, use polynomial chaos to compute output uncertainties and sensitivities."""'], {'id': '"""info_text"""'}), "(\n 'Define an analytical model, and its uncertain input parameters. Then, use polynomial chaos to compute output uncertainties and sensitivities.'\n , id='info_text')\n", (14061, 14232), True, 'import dash_core_components as dcc\n'), ((22257, 22299), 'dash_core_components.send_data_frame', 'dcc.send_data_frame', (['DOE.to_csv', '"""DOE.csv"""'], {}), "(DOE.to_csv, 'DOE.csv')\n", (22276, 22299), True, 'import dash_core_components as dcc\n'), ((22902, 22916), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (22910, 22916), True, 'import numpy as np\n'), ((25367, 25386), 'dash_table.Format.Format', 'Format', ([], {'precision': '(4)'}), '(precision=4)\n', (25373, 25386), False, 'from dash_table.Format import Format, Scheme, Trim\n'), ((53294, 53332), 'numpy.linspace', 'np.linspace', (['DOE[0, 0]', 'DOE[-1, -1]', 'N'], {}), '(DOE[0, 0], DOE[-1, -1], N)\n', (53305, 53332), True, 'import numpy as np\n'), ((53357, 53380), 'numpy.meshgrid', 'np.meshgrid', (['s1_samples'], {}), '(s1_samples)\n', (53368, 53380), True, 'import numpy as np\n'), ((53407, 53429), 'numpy.reshape', 'np.reshape', (['S1', '(N, 1)'], {}), '(S1, (N, 1))\n', (53417, 53429), True, 'import numpy as np\n'), ((53458, 53477), 'numpy.hstack', 'np.hstack', (['[S1_vec]'], {}), '([S1_vec])\n', (53467, 53477), True, 'import numpy as np\n'), ((53570, 53597), 'numpy.reshape', 'np.reshape', (['PolyDiscreet', 'N'], {}), '(PolyDiscreet, N)\n', (53580, 53597), True, 'import numpy as np\n'), ((53623, 53637), 'plotly.graph_objs.Figure', 'go.Figure', (['fig'], {}), '(fig)\n', (53632, 53637), True, 'import plotly.graph_objs as go\n'), ((2680, 2704), 'utils.convert_latex', 'convert_latex', (['info_text'], {}), '(info_text)\n', (2693, 2704), False, 'from utils import convert_latex\n'), ((4210, 4247), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""param_add"""', 'children': '[]'}), "(id='param_add', children=[])\n", (4218, 4247), True, 'import dash_html_components as html\n'), ((6190, 6222), 'dash_bootstrap_components.Col', 'dbc.Col', (['basis_dropdown'], {'width': '(3)'}), '(basis_dropdown, width=3)\n', (6197, 6222), True, 'import dash_bootstrap_components as dbc\n'), ((6713, 6746), 'dash_bootstrap_components.Col', 'dbc.Col', (['growth_dropdown'], {'width': '(3)'}), '(growth_dropdown, width=3)\n', (6720, 6746), True, 'import dash_bootstrap_components as dbc\n'), ((7346, 7376), 'dash_bootstrap_components.Col', 'dbc.Col', (['DOE_download'], {'width': '(3)'}), '(DOE_download, width=3)\n', (7353, 7376), True, 'import dash_bootstrap_components as dbc\n'), ((7458, 7555), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""plot_basis"""', 'style': "{'width': 'inherit', 'height': '40vh', 'margin-top': '5px'}"}), "(id='plot_basis', style={'width': 'inherit', 'height': '40vh',\n 'margin-top': '5px'})\n", (7467, 7555), True, 'import dash_core_components as dcc\n'), ((9546, 9615), 'dash_html_components.A', 'html.A', (['"""Select Files"""'], {'style': "{'color': 'blue'}", 'id': '"""filename_append"""'}), "('Select Files', style={'color': 'blue'}, id='filename_append')\n", (9552, 9615), True, 'import dash_html_components as html\n'), ((9902, 10096), 'dash_table.DataTable', 'dash_table.DataTable', ([], {'data': '[]', 'columns': '[]', 'id': '"""upload_data_table"""', 'style_table': "{'overflowX': 'auto', 'overflowY': 'auto', 'height': '35vh'}", 'editable': '(True)', 'fill_width': '(True)', 'page_size': '(20)'}), "(data=[], columns=[], id='upload_data_table',\n style_table={'overflowX': 'auto', 'overflowY': 'auto', 'height': '35vh'\n }, editable=True, fill_width=True, page_size=20)\n", (9922, 10096), False, 'import dash_table\n'), ((13167, 13194), 'dash_bootstrap_components.Col', 'dbc.Col', (['left_side'], {'width': '(6)'}), '(left_side, width=6)\n', (13174, 13194), True, 'import dash_bootstrap_components as dbc\n'), ((13220, 13248), 'dash_bootstrap_components.Col', 'dbc.Col', (['right_side'], {'width': '(6)'}), '(right_side, width=6)\n', (13227, 13248), True, 'import dash_bootstrap_components as dbc\n'), ((17410, 17567), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'bs_size': '"""sm"""', 'id': "{'type': 'params', 'index': n_clicks}", 'type': '"""number"""', 'value': 'np.nan', 'placeholder': '""""""', 'debounce': '(True)', 'className': '"""ip_field"""'}), "(bs_size='sm', id={'type': 'params', 'index': n_clicks}, type=\n 'number', value=np.nan, placeholder='', debounce=True, className='ip_field'\n )\n", (17419, 17567), True, 'import dash_bootstrap_components as dbc\n'), ((17699, 17858), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'bs_size': '"""sm"""', 'id': "{'type': 'params_2', 'index': n_clicks}", 'type': '"""number"""', 'value': 'np.nan', 'placeholder': '""""""', 'debounce': '(True)', 'className': '"""ip_field"""'}), "(bs_size='sm', id={'type': 'params_2', 'index': n_clicks}, type=\n 'number', value=np.nan, placeholder='', debounce=True, className='ip_field'\n )\n", (17708, 17858), True, 'import dash_bootstrap_components as dbc\n'), ((19696, 19730), 'utils.convert_latex', 'convert_latex', (["('$x_%d$' % n_clicks)"], {}), "('$x_%d$' % n_clicks)\n", (19709, 19730), False, 'from utils import convert_latex\n'), ((30312, 30496), 'equadratures.Parameter', 'eq.Parameter', ([], {'distribution': 'distribution[j]', 'shape_parameter_A': 'shape_parameter_A[j]', 'shape_parameter_B': 'shape_parameter_B[j]', 'lower': 'min_val[j]', 'upper': 'max_val[j]', 'order': 'order[j]'}), '(distribution=distribution[j], shape_parameter_A=\n shape_parameter_A[j], shape_parameter_B=shape_parameter_B[j], lower=\n min_val[j], upper=max_val[j], order=order[j])\n', (30324, 30496), True, 'import equadratures as eq\n'), ((55116, 55141), 'utils.convert_latex', 'convert_latex', (['added_text'], {}), '(added_text)\n', (55129, 55141), False, 'from utils import convert_latex\n'), ((3378, 3472), 'dash_bootstrap_components.Button', 'dbc.Button', (['"""Add Parameter"""'], {'id': '"""AP_button"""', 'n_clicks': '(0)', 'color': '"""primary"""', 'className': '"""py-0"""'}), "('Add Parameter', id='AP_button', n_clicks=0, color='primary',\n className='py-0')\n", (3388, 3472), True, 'import dash_bootstrap_components as dbc\n'), ((3771, 3884), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""input_func"""', 'type': '"""text"""', 'placeholder': '"""Input Function..."""', 'className': '"""ip_field"""', 'debounce': '(True)'}), "(id='input_func', type='text', placeholder='Input Function...',\n className='ip_field', debounce=True)\n", (3780, 3884), True, 'import dash_core_components as dcc\n'), ((3988, 4048), 'dash_bootstrap_components.Alert', 'dbc.Alert', ([], {'id': '"""input-warning"""', 'color': '"""danger"""', 'is_open': '(False)'}), "(id='input-warning', color='danger', is_open=False)\n", (3997, 4048), True, 'import dash_bootstrap_components as dbc\n'), ((6279, 6401), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'bs_size': '"""sm"""', 'id': '"""q_val"""', 'type': '"""number"""', 'value': 'np.nan', 'placeholder': '"""q"""', 'className': '"""ip_field"""', 'disabled': '(True)'}), "(bs_size='sm', id='q_val', type='number', value=np.nan,\n placeholder='q', className='ip_field', disabled=True)\n", (6288, 6401), True, 'import dash_bootstrap_components as dbc\n'), ((6511, 6638), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'bs_size': '"""sm"""', 'id': '"""levels"""', 'type': '"""number"""', 'value': 'np.nan', 'placeholder': '"""Level"""', 'className': '"""ip_field"""', 'disabled': '(True)'}), "(bs_size='sm', id='levels', type='number', value=np.nan,\n placeholder='Level', className='ip_field', disabled=True)\n", (6520, 6638), True, 'import dash_bootstrap_components as dbc\n'), ((6886, 7002), 'dash_bootstrap_components.Button', 'dbc.Button', (['"""Set basis"""'], {'id': '"""basis_button"""', 'n_clicks': '(0)', 'className': '"""ip_buttons"""', 'color': '"""primary"""', 'disabled': '(False)'}), "('Set basis', id='basis_button', n_clicks=0, className=\n 'ip_buttons', color='primary', disabled=False)\n", (6896, 7002), True, 'import dash_bootstrap_components as dbc\n'), ((7071, 7204), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'bs_size': '"""sm"""', 'id': '"""op_box"""', 'type': '"""number"""', 'value': '""""""', 'placeholder': '"""Cardinality..."""', 'className': '"""ip_field"""', 'disabled': '(True)'}), "(bs_size='sm', id='op_box', type='number', value='', placeholder=\n 'Cardinality...', className='ip_field', disabled=True)\n", (7080, 7204), True, 'import dash_bootstrap_components as dbc\n'), ((7253, 7315), 'dash_bootstrap_components.Alert', 'dbc.Alert', ([], {'id': '"""compute-warning"""', 'color': '"""danger"""', 'is_open': '(False)'}), "(id='compute-warning', color='danger', is_open=False)\n", (7262, 7315), True, 'import dash_bootstrap_components as dbc\n'), ((18226, 18399), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'bs_size': '"""sm"""', 'id': "{'type': 'min_val', 'index': n_clicks}", 'type': '"""number"""', 'value': 'np.nan', 'placeholder': '"""Minimum value..."""', 'debounce': '(True)', 'className': '"""ip_field"""'}), "(bs_size='sm', id={'type': 'min_val', 'index': n_clicks}, type=\n 'number', value=np.nan, placeholder='Minimum value...', debounce=True,\n className='ip_field')\n", (18235, 18399), True, 'import dash_bootstrap_components as dbc\n'), ((18552, 18725), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'bs_size': '"""sm"""', 'id': "{'type': 'max_val', 'index': n_clicks}", 'type': '"""number"""', 'value': 'np.nan', 'placeholder': '"""Maximum value..."""', 'debounce': '(True)', 'className': '"""ip_field"""'}), "(bs_size='sm', id={'type': 'max_val', 'index': n_clicks}, type=\n 'number', value=np.nan, placeholder='Maximum value...', debounce=True,\n className='ip_field')\n", (18561, 18725), True, 'import dash_bootstrap_components as dbc\n'), ((23065, 23078), 'io.BytesIO', 'io.BytesIO', (['r'], {}), '(r)\n', (23075, 23078), False, 'import io\n'), ((40981, 41005), 'numpy.zeros_like', 'np.zeros_like', (['DOE[:, 0]'], {}), '(DOE[:, 0])\n', (40994, 41005), True, 'import numpy as np\n'), ((54624, 54730), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': 'S1', 'y': 'PolyDiscreet', 'mode': '"""lines"""', 'name': '"""Polynomial approx."""', 'line_color': '"""rgb(178,34,34)"""'}), "(x=S1, y=PolyDiscreet, mode='lines', name='Polynomial approx.',\n line_color='rgb(178,34,34)')\n", (54634, 54730), True, 'import plotly.graph_objs as go\n'), ((3624, 3650), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""param_added"""'}), "(id='param_added')\n", (3632, 3650), True, 'import dash_html_components as html\n'), ((30993, 31177), 'equadratures.Parameter', 'eq.Parameter', ([], {'distribution': 'distribution[j]', 'shape_parameter_A': 'shape_parameter_A[j]', 'shape_parameter_B': 'shape_parameter_B[j]', 'lower': 'min_val[j]', 'upper': 'max_val[j]', 'order': 'order[j]'}), '(distribution=distribution[j], shape_parameter_A=\n shape_parameter_A[j], shape_parameter_B=shape_parameter_B[j], lower=\n min_val[j], upper=max_val[j], order=order[j])\n', (31005, 31177), True, 'import equadratures as eq\n'), ((31477, 31580), 'equadratures.Parameter', 'eq.Parameter', ([], {'distribution': 'distribution[j]', 'shape_parameter_A': 'shape_parameter_A[j]', 'order': 'order[j]'}), '(distribution=distribution[j], shape_parameter_A=\n shape_parameter_A[j], order=order[j])\n', (31489, 31580), True, 'import equadratures as eq\n'), ((31851, 31950), 'equadratures.Parameter', 'eq.Parameter', ([], {'distribution': 'distribution[j]', 'lower': 'min_val[j]', 'upper': 'max_val[j]', 'order': 'order[j]'}), '(distribution=distribution[j], lower=min_val[j], upper=max_val[\n j], order=order[j])\n', (31863, 31950), True, 'import equadratures as eq\n')] |
import numpy as np
import pandas as pd
__base32 = '0123456789bcdefghjkmnpqrstuvwxyz'
__decodemap = { }
for i in range(len(__base32)):
__decodemap[__base32[i]] = i
del i
def decode_exactly(geohash):
lat_interval, lon_interval = (-90.0, 90.0), (-180.0, 180.0)
lat_err, lon_err = 90.0, 180.0
is_even = True
for c in geohash:
cd = __decodemap[c]
for mask in [16, 8, 4, 2, 1]:
if is_even:
lon_err /= 2
if cd & mask:
lon_interval = ((lon_interval[0]+lon_interval[1])/2, lon_interval[1])
else:
lon_interval = (lon_interval[0], (lon_interval[0]+lon_interval[1])/2)
else:
lat_err /= 2
if cd & mask:
lat_interval = ((lat_interval[0]+lat_interval[1])/2, lat_interval[1])
else:
lat_interval = (lat_interval[0], (lat_interval[0]+lat_interval[1])/2)
is_even = not is_even
lat = (lat_interval[0] + lat_interval[1]) / 2
lon = (lon_interval[0] + lon_interval[1]) / 2
return lon,lat, lon_err, lat_err
def decode(geohash):
lon,lat, lon_err, lat_err = decode_exactly(geohash)
lats = "%.*f" % (max(1, int(round(-np.log10(lat_err)))) - 1, lat)
lons = "%.*f" % (max(1, int(round(-np.log10(lon_err)))) - 1, lon)
if '.' in lats: lats = lats.rstrip('0')
if '.' in lons: lons = lons.rstrip('0')
return lons,lats
def encode(longitude,latitude, precision=12):
lat_interval, lon_interval = (-90.0, 90.0), (-180.0, 180.0)
geohash = []
bits = [ 16, 8, 4, 2, 1 ]
bit = 0
ch = 0
even = True
i = 1
while len(geohash) < precision:
if even:
mid = (lon_interval[0] + lon_interval[1]) / 2
if longitude > mid:
ch |= bits[bit]
lon_interval = (mid, lon_interval[1])
else:
lon_interval = (lon_interval[0], mid)
else:
mid = (lat_interval[0] + lat_interval[1]) / 2
if latitude > mid:
ch |= bits[bit]
lat_interval = (mid, lat_interval[1])
else:
lat_interval = (lat_interval[0], mid)
even = not even
if bit < 4:
bit += 1
else:
geohash += __base32[ch]
bit = 0
ch = 0
i+=1
return ''.join(geohash)
def geohash_encode(lon,lat,precision=12):
'''
输入经纬度与精度,输出geohash编码
输入
-------
lon : Series
经度列
lat : Series
纬度列
precision : number
geohash精度
输出
-------
lon : Series
经度列
lat : Series
纬度列
'''
tmp = pd.DataFrame()
tmp['lon'] = lon
tmp['lat'] = lat
geohash = tmp.apply(lambda r:encode(r['lon'],r['lat'],precision),axis = 1)
return geohash
def geohash_decode(geohash):
'''
输入经纬度与精度,输出geohash编码
输入
-------
geohash : Series
geohash编码列
输出
-------
geohash : Series
geohash编码列
'''
lonslats = geohash.apply(lambda r:decode(r))
lon = lonslats.apply(lambda r:r[0])
lat = lonslats.apply(lambda r:r[1])
return lon,lat
def geohash_togrid(geohash):
'''
输入geohash编码,输出geohash网格的地理信息图形Series列
输入
-------
geohash : Series
geohash编码列
输出
-------
poly : Series
geohash的栅格列
'''
lonslats = geohash.apply(lambda r:decode_exactly(r))
def topoly(r):
(lon,lat,lon_err, lat_err) = r
from shapely.geometry import Polygon
return Polygon([[lon-lon_err,lat-lat_err],
[lon-lon_err,lat+lat_err],
[lon+lon_err,lat+lat_err],
[lon+lon_err,lat-lat_err],
[lon-lon_err,lat-lat_err],
])
poly = lonslats.apply(lambda r:topoly(r))
return poly | [
"pandas.DataFrame",
"shapely.geometry.Polygon",
"numpy.log10"
] | [((2748, 2762), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2760, 2762), True, 'import pandas as pd\n'), ((3660, 3838), 'shapely.geometry.Polygon', 'Polygon', (['[[lon - lon_err, lat - lat_err], [lon - lon_err, lat + lat_err], [lon +\n lon_err, lat + lat_err], [lon + lon_err, lat - lat_err], [lon - lon_err,\n lat - lat_err]]'], {}), '([[lon - lon_err, lat - lat_err], [lon - lon_err, lat + lat_err], [\n lon + lon_err, lat + lat_err], [lon + lon_err, lat - lat_err], [lon -\n lon_err, lat - lat_err]])\n', (3667, 3838), False, 'from shapely.geometry import Polygon\n'), ((1262, 1279), 'numpy.log10', 'np.log10', (['lat_err'], {}), '(lat_err)\n', (1270, 1279), True, 'import numpy as np\n'), ((1332, 1349), 'numpy.log10', 'np.log10', (['lon_err'], {}), '(lon_err)\n', (1340, 1349), True, 'import numpy as np\n')] |
# tensorflow, numpy를 사용하기위해 import
import tensorflow as tf
import numpy as np
# Deep learning을 위해 데이터를 읽습니다.
data = np.loadtxt('./data.csv',delimiter=',',unpack=True,dtype='float32')
# csv자료의 0부터 2번째 까지의 feature를 x_data에 넣습니다.
# 나머지는 분류가 되는 데이터로 y_data에 넣습니다.
x_data = np.transpose(data[0:3])
y_data = np.transpose(data[3:])
# Step 1 Neural network
# x_data, y_data를 저장하기위해 X, Y를 placeholder형태로 선언해줍니다.
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
# 3층의 weights를 선언합니다. (1 x 3) x (3 x 50) x (50 x 100) x (100 x 3) = (1 x 3)
W1 = tf.Variable(tf.random_uniform([3,50],-1.,1.))
W2 = tf.Variable(tf.random_uniform([50,100],-1.,1.))
W3 = tf.Variable(tf.random_uniform([100,3],-1.,1.))
# 3층의 layer를 선언합니다. Classification 경우로 0~1 값이 나오도록 Sigmoid 함수를 사용합니다.
L1 = tf.sigmoid(tf.matmul(X,W1))
L2 = tf.sigmoid(tf.matmul(L1,W2))
L3 = tf.matmul(L2,W3)
# 3개의 점수 A,B,C 중에서 하나를 고르는 것이므로 cost function을 softmax_cross_entropy 함수로 정한다.
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=L3,labels= Y))
# 0.001 learning_rate로 Global minimum을 찾는다.
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train = optimizer.minimize(cost)
# Session을 선언한다.
with tf.Session() as sess:
# 변수들을 초기화 한다.
init = tf.global_variables_initializer()
sess.run(init)
# 총 20001번을 Train 한다.
for step in range(20001):
sess.run(train, feed_dict={X: x_data, Y: y_data})
# step이 1000번이 될때마다 현재 cost를 보여준다.
if step % 1000 == 0:
print (step, sess.run(cost, feed_dict={X: x_data, Y: y_data}))
# 학습 이후에 데이터를 넣었을 때에 결과가 어떻게 나오는지를 확인한다.
pred = tf.argmax(L3,1)
real = tf.argmax(Y,1)
print("Prediction:",sess.run(pred,feed_dict={X:x_data}))
print("Real:",sess.run(real, feed_dict={Y:y_data}))
#세 과목에서 80, 80, 80점을 받았을 때의 결과를 보여준다.
print("Grade: ",sess.run(pred,feed_dict={X:[[80,80,80]]}))
| [
"tensorflow.train.AdamOptimizer",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.global_variables_initializer",
"tensorflow.random_uniform",
"tensorflow.argmax",
"tensorflow.matmul",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"numpy.loadtxt",
"numpy.transpose"
] | [((118, 187), 'numpy.loadtxt', 'np.loadtxt', (['"""./data.csv"""'], {'delimiter': '""","""', 'unpack': '(True)', 'dtype': '"""float32"""'}), "('./data.csv', delimiter=',', unpack=True, dtype='float32')\n", (128, 187), True, 'import numpy as np\n'), ((272, 295), 'numpy.transpose', 'np.transpose', (['data[0:3]'], {}), '(data[0:3])\n', (284, 295), True, 'import numpy as np\n'), ((305, 327), 'numpy.transpose', 'np.transpose', (['data[3:]'], {}), '(data[3:])\n', (317, 327), True, 'import numpy as np\n'), ((411, 437), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (425, 437), True, 'import tensorflow as tf\n'), ((442, 468), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (456, 468), True, 'import tensorflow as tf\n'), ((845, 862), 'tensorflow.matmul', 'tf.matmul', (['L2', 'W3'], {}), '(L2, W3)\n', (854, 862), True, 'import tensorflow as tf\n'), ((1081, 1124), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (1103, 1124), True, 'import tensorflow as tf\n'), ((563, 600), 'tensorflow.random_uniform', 'tf.random_uniform', (['[3, 50]', '(-1.0)', '(1.0)'], {}), '([3, 50], -1.0, 1.0)\n', (580, 600), True, 'import tensorflow as tf\n'), ((614, 653), 'tensorflow.random_uniform', 'tf.random_uniform', (['[50, 100]', '(-1.0)', '(1.0)'], {}), '([50, 100], -1.0, 1.0)\n', (631, 653), True, 'import tensorflow as tf\n'), ((667, 705), 'tensorflow.random_uniform', 'tf.random_uniform', (['[100, 3]', '(-1.0)', '(1.0)'], {}), '([100, 3], -1.0, 1.0)\n', (684, 705), True, 'import tensorflow as tf\n'), ((789, 805), 'tensorflow.matmul', 'tf.matmul', (['X', 'W1'], {}), '(X, W1)\n', (798, 805), True, 'import tensorflow as tf\n'), ((822, 839), 'tensorflow.matmul', 'tf.matmul', (['L1', 'W2'], {}), '(L1, W2)\n', (831, 839), True, 'import tensorflow as tf\n'), ((963, 1023), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'L3', 'labels': 'Y'}), '(logits=L3, labels=Y)\n', (1002, 1023), True, 'import tensorflow as tf\n'), ((1181, 1193), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1191, 1193), True, 'import tensorflow as tf\n'), ((1233, 1266), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1264, 1266), True, 'import tensorflow as tf\n'), ((1612, 1628), 'tensorflow.argmax', 'tf.argmax', (['L3', '(1)'], {}), '(L3, 1)\n', (1621, 1628), True, 'import tensorflow as tf\n'), ((1639, 1654), 'tensorflow.argmax', 'tf.argmax', (['Y', '(1)'], {}), '(Y, 1)\n', (1648, 1654), True, 'import tensorflow as tf\n')] |
import torch
import numpy as np
from tqdm import tqdm
from typing import Union, List, Tuple, Any, Dict
from easydict import EasyDict
from .dataset import preprocess, InferenceDataset, InferenceDatasetWithKeypoints
from .network import build_spin
from .. import BasePose3dRunner, BasePose3dRefiner, ACTIONS
from iPERCore.tools.human_digitalizer.bodynets import SMPL
from iPERCore.tools.utils.dataloaders import build_inference_loader
from iPERCore.tools.utils.geometry.boxes import cal_head_bbox
from iPERCore.tools.utils.geometry.cam_pose_utils import cam_init2orig, cam_norm
from iPERCore.tools.utils.filesio.persistence import load_toml_file
__all__ = ["SPINRunner"]
class SPINRunner(BasePose3dRunner):
def __init__(self,
cfg_or_path: Union[EasyDict, str],
device=torch.device("cpu")):
"""
Args:
cfg_or_path (EasyDict or str): the configuration EasyDict or the cfg_path with `toml` file.
If it is an EasyDict instance, it must contains the followings,
--ckpt_path (str): the path of the pre-trained checkpoints;
--smpl_path (str): the path of the smpl model;
--smpl_mean_params (str): the path of the mean parameters of SMPL.
Otherwise if it is a `toml` file, an example could be the followings,
ckpt_path = "./assets/pretrains/spin_ckpt.pth"
smpl_path = "./assets/pretrains/smpl_model.pkl"
smpl_mean_params = "./assets/pretrains/smpl_mean_params.npz"
device (torch.device):
"""
self.device = device
# RGB
self.MEAN = torch.as_tensor([0.485, 0.456, 0.406])[None, :, None, None].to(self.device)
self.STD = torch.as_tensor([0.229, 0.224, 0.225])[None, :, None, None].to(self.device)
if isinstance(cfg_or_path, str):
cfg = EasyDict(load_toml_file(cfg_or_path))
else:
cfg = cfg_or_path
self.model = build_spin(pretrained=False)
checkpoint = torch.load(cfg["ckpt_path"])
self.model.load_state_dict(checkpoint, strict=True)
self.model.eval()
self._smpl = SMPL(cfg["smpl_path"]).to(self.device)
self.model = self.model.to(self.device)
def __call__(self, image: np.ndarray,
boxes: Union[np.ndarray, List, Tuple, Any],
action: ACTIONS = ACTIONS.SPLIT) -> Dict[str, Any]:
"""
Args:
image (np.ndarray): (H, W, C), color intensity [0, 255] with BGR color channel;
boxes (np.ndarray or List, or Tuple or None): (N, 4)
action:
-- 0: only return `cams`, `pose` and `shape` of SMPL;
-- 1: return `cams`, `pose`, `shape` and `verts`.
-- 2: return `cams`, `pose`, `shape`, `verts`, `j2d` and `j3d`.
Returns:
result (dict):
"""
image = np.copy(image)
proc_img, proc_info = preprocess(image, boxes)
proc_img = torch.tensor(proc_img).to(device=self.device)[None]
with torch.no_grad():
proc_img = (proc_img - self.MEAN) / self.STD
smpls = self.model(proc_img)
cams_orig = cam_init2orig(smpls[:, 0:3], proc_info["scale"],
torch.tensor(proc_info["start_pt"], device=self.device).float())
cams = cam_norm(cams_orig, proc_info["im_shape"][0])
smpls[:, 0:3] = cams
if action == ACTIONS.SPLIT:
result = self.body_model.split(smpls)
elif action == ACTIONS.SKIN:
result = self.body_model.skinning(smpls)
elif action == ACTIONS.SMPL:
result = {"theta": smpls}
else:
result = self.body_model.get_details(smpls)
result["proc_info"] = proc_info
return result
def run_with_smplify(self, image_paths: List[str], boxes: List[Union[List, Tuple, np.ndarray]],
keypoints_info: Dict, smplify_runner: BasePose3dRefiner,
batch_size: int = 16, num_workers: int = 4,
filter_invalid: bool = True, temporal: bool = True):
"""
Args:
image_paths (list of str): the image paths;
boxes (list of Union[np.np.ndarray, list, tuple)): the bounding boxes of each image;
keypoints_info (Dict): the keypoints information of each image;
smplify_runner (BasePose3dRefiner): the simplify instance, it must contains the keypoint_formater;
batch_size (int): the mini-batch size;
num_workers (int): the number of processes;
filter_invalid (bool): the flag to control whether filter invalid frames or not;
temporal (bool): use temporal smooth optimization or not.
Returns:
smpl_infos (dict): the estimated smpl infomations, it contains,
--all_init_smpls (torch.Tensor): (num, 85), the initialized smpls;
--all_opt_smpls (torch.Tensor): (num, 85), the optimized smpls;
--all_valid_ids (torch.Tensor): (num of valid frames,), the valid indexes.
"""
def head_is_valid(head_boxes):
return (head_boxes[:, 1] - head_boxes[:, 0]) * (head_boxes[:, 3] - head_boxes[:, 2]) > 10 * 10
dataset = InferenceDatasetWithKeypoints(image_paths, boxes, keypoints_info,
smplify_runner.keypoint_formater, image_size=224, temporal=temporal)
data_loader = build_inference_loader(dataset, batch_size=batch_size, num_workers=num_workers)
"""
sample (dict): the sample information, it contains,
--image (torch.Tensor): (3, 224, 224) is the cropped image range of [0, 1] and normalized
by MEAN and STD, RGB channel;
--orig_image (torch.Tensor): (3, height, width) is the in rage of [0, 1], RGB channel;
--im_shape (torch.Tensor): (height, width)
--keypoints (dict): (num_joints, 3), and num_joints could be [75,].
--center (torch.Tensor): (2,);
--start_pt (torch.Tensor): (2,);
--scale (torch.Tensor): (1,);
--img_path (str): the image path.
"""
all_init_smpls = []
all_opt_smpls = []
all_pose3d_img_ids = []
for sample in tqdm(data_loader):
images = sample["image"].to(self.device)
start_pt = sample["start_pt"].to(self.device)
scale = sample["scale"][:, None].to(self.device).float()
im_shape = sample["im_shape"][:, 0:1].to(self.device)
keypoints_info = sample["keypoints"].to(self.device)
img_ids = sample["img_id"]
with torch.no_grad():
init_smpls = self.model(images)
cams_orig = cam_init2orig(init_smpls[:, 0:3], scale, start_pt)
cams = cam_norm(cams_orig, im_shape)
init_smpls[:, 0:3] = cams
smplify_results = smplify_runner(
keypoints_info, cams, init_smpls[:, -10:], init_smpls[:, 3:-10], proc_kps=False, temporal=temporal
)
opt_smpls = torch.cat([cams, smplify_results["new_opt_pose"], smplify_results["new_opt_betas"]], dim=1)
if filter_invalid:
opt_smpls_info = self.get_details(opt_smpls)
head_boxes = cal_head_bbox(opt_smpls_info["j2d"], image_size=512)
valid = head_is_valid(head_boxes).nonzero(as_tuple=False)
valid.squeeze_(-1)
img_ids = img_ids[valid]
all_init_smpls.append(init_smpls.cpu())
all_opt_smpls.append(opt_smpls.cpu())
all_pose3d_img_ids.append(img_ids.cpu())
all_init_smpls = torch.cat(all_init_smpls, dim=0)
all_opt_smpls = torch.cat(all_opt_smpls, dim=0)
all_valid_ids = torch.cat(all_pose3d_img_ids, dim=0)
smpl_infos = {
"all_init_smpls": all_init_smpls,
"all_opt_smpls": all_opt_smpls,
"all_valid_ids": all_valid_ids
}
return smpl_infos
def run(self, image_paths: List[str], boxes: List[List],
batch_size: int = 16, num_workers: int = 4,
filter_invalid: bool = True, temporal: bool = True):
"""
Args:
image_paths (list of str): the image paths;
boxes (list of list): the bounding boxes of each image;
batch_size (int): the mini-batch size;
num_workers (int): the number of processes;
filter_invalid (bool): the flag to control whether filter invalid frames or not;
temporal (bool): use temporal smooth optimization or not.
Returns:
smpl_infos (dict): the estimated smpl infomations, it contains,
--all_init_smpls (torch.Tensor): (num, 85), the initialized smpls;
--all_opt_smpls (torch.Tensor): None
--all_valid_ids (torch.Tensor): (num of valid frames,), the valid indexes.
"""
def head_is_valid(head_boxes):
return (head_boxes[:, 1] - head_boxes[:, 0]) * (head_boxes[:, 3] - head_boxes[:, 2]) > 10 * 10
dataset = InferenceDataset(image_paths, boxes, image_size=224)
data_loader = build_inference_loader(dataset, batch_size=batch_size, num_workers=num_workers)
"""
sample (dict): the sample information, it contains,
--image (torch.Tensor): (3, 224, 224) is the cropped image range of [0, 1] and normalized
by MEAN and STD, RGB channel;
--orig_image (torch.Tensor): (3, height, width) is the in rage of [0, 1], RGB channel;
--im_shape (torch.Tensor): (height, width)
--keypoints (dict): (num_joints, 3), and num_joints could be [75,].
--center (torch.Tensor): (2,);
--start_pt (torch.Tensor): (2,);
--scale (torch.Tensor): (1,);
--img_path (str): the image path.
"""
all_init_smpls = []
all_pose3d_img_ids = []
for sample in tqdm(data_loader):
images = sample["image"].to(self.device)
start_pt = sample["start_pt"].to(self.device)
scale = sample["scale"][:, None].to(self.device).float()
im_shape = sample["im_shape"][:, 0:1].to(self.device)
img_ids = sample["img_id"]
with torch.no_grad():
init_smpls = self.model(images)
cams_orig = cam_init2orig(init_smpls[:, 0:3], scale, start_pt)
cams = cam_norm(cams_orig, im_shape)
init_smpls[:, 0:3] = cams
if filter_invalid:
init_smpls_info = self.get_details(init_smpls)
head_boxes = cal_head_bbox(init_smpls_info["j2d"], image_size=512)
valid = head_is_valid(head_boxes).nonzero(as_tuple=False)
valid.squeeze_(-1)
img_ids = img_ids[valid]
all_init_smpls.append(init_smpls.cpu())
all_pose3d_img_ids.append(img_ids.cpu())
all_init_smpls = torch.cat(all_init_smpls, dim=0)
all_valid_ids = torch.cat(all_pose3d_img_ids, dim=0)
smpl_infos = {
"all_init_smpls": all_init_smpls,
"all_opt_smpls": None,
"all_valid_ids": all_valid_ids
}
return smpl_infos
def get_details(self, smpls):
return self._smpl.get_details(smpls)
@property
def mean_theta(self):
mean_cam = self.model.init_cam
mean_pose = self.model.init_pose
mean_shape = self.model.init_shape
mean_theta = torch.cat([mean_cam, mean_pose, mean_shape], dim=-1)[0]
return mean_theta
@property
def body_model(self):
return self._smpl
| [
"iPERCore.tools.utils.dataloaders.build_inference_loader",
"numpy.copy",
"torch.as_tensor",
"iPERCore.tools.utils.geometry.boxes.cal_head_bbox",
"iPERCore.tools.utils.filesio.persistence.load_toml_file",
"torch.load",
"tqdm.tqdm",
"iPERCore.tools.human_digitalizer.bodynets.SMPL",
"torch.tensor",
"... | [((811, 830), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (823, 830), False, 'import torch\n'), ((2079, 2107), 'torch.load', 'torch.load', (["cfg['ckpt_path']"], {}), "(cfg['ckpt_path'])\n", (2089, 2107), False, 'import torch\n'), ((2970, 2984), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (2977, 2984), True, 'import numpy as np\n'), ((5641, 5720), 'iPERCore.tools.utils.dataloaders.build_inference_loader', 'build_inference_loader', (['dataset'], {'batch_size': 'batch_size', 'num_workers': 'num_workers'}), '(dataset, batch_size=batch_size, num_workers=num_workers)\n', (5663, 5720), False, 'from iPERCore.tools.utils.dataloaders import build_inference_loader\n'), ((6495, 6512), 'tqdm.tqdm', 'tqdm', (['data_loader'], {}), '(data_loader)\n', (6499, 6512), False, 'from tqdm import tqdm\n'), ((7909, 7941), 'torch.cat', 'torch.cat', (['all_init_smpls'], {'dim': '(0)'}), '(all_init_smpls, dim=0)\n', (7918, 7941), False, 'import torch\n'), ((7966, 7997), 'torch.cat', 'torch.cat', (['all_opt_smpls'], {'dim': '(0)'}), '(all_opt_smpls, dim=0)\n', (7975, 7997), False, 'import torch\n'), ((8022, 8058), 'torch.cat', 'torch.cat', (['all_pose3d_img_ids'], {'dim': '(0)'}), '(all_pose3d_img_ids, dim=0)\n', (8031, 8058), False, 'import torch\n'), ((9432, 9511), 'iPERCore.tools.utils.dataloaders.build_inference_loader', 'build_inference_loader', (['dataset'], {'batch_size': 'batch_size', 'num_workers': 'num_workers'}), '(dataset, batch_size=batch_size, num_workers=num_workers)\n', (9454, 9511), False, 'from iPERCore.tools.utils.dataloaders import build_inference_loader\n'), ((10259, 10276), 'tqdm.tqdm', 'tqdm', (['data_loader'], {}), '(data_loader)\n', (10263, 10276), False, 'from tqdm import tqdm\n'), ((11268, 11300), 'torch.cat', 'torch.cat', (['all_init_smpls'], {'dim': '(0)'}), '(all_init_smpls, dim=0)\n', (11277, 11300), False, 'import torch\n'), ((11325, 11361), 'torch.cat', 'torch.cat', (['all_pose3d_img_ids'], {'dim': '(0)'}), '(all_pose3d_img_ids, dim=0)\n', (11334, 11361), False, 'import torch\n'), ((3126, 3141), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3139, 3141), False, 'import torch\n'), ((3438, 3483), 'iPERCore.tools.utils.geometry.cam_pose_utils.cam_norm', 'cam_norm', (['cams_orig', "proc_info['im_shape'][0]"], {}), "(cams_orig, proc_info['im_shape'][0])\n", (3446, 3483), False, 'from iPERCore.tools.utils.geometry.cam_pose_utils import cam_init2orig, cam_norm\n'), ((6972, 7022), 'iPERCore.tools.utils.geometry.cam_pose_utils.cam_init2orig', 'cam_init2orig', (['init_smpls[:, 0:3]', 'scale', 'start_pt'], {}), '(init_smpls[:, 0:3], scale, start_pt)\n', (6985, 7022), False, 'from iPERCore.tools.utils.geometry.cam_pose_utils import cam_init2orig, cam_norm\n'), ((7042, 7071), 'iPERCore.tools.utils.geometry.cam_pose_utils.cam_norm', 'cam_norm', (['cams_orig', 'im_shape'], {}), '(cams_orig, im_shape)\n', (7050, 7071), False, 'from iPERCore.tools.utils.geometry.cam_pose_utils import cam_init2orig, cam_norm\n'), ((7310, 7406), 'torch.cat', 'torch.cat', (["[cams, smplify_results['new_opt_pose'], smplify_results['new_opt_betas']]"], {'dim': '(1)'}), "([cams, smplify_results['new_opt_pose'], smplify_results[\n 'new_opt_betas']], dim=1)\n", (7319, 7406), False, 'import torch\n'), ((10670, 10720), 'iPERCore.tools.utils.geometry.cam_pose_utils.cam_init2orig', 'cam_init2orig', (['init_smpls[:, 0:3]', 'scale', 'start_pt'], {}), '(init_smpls[:, 0:3], scale, start_pt)\n', (10683, 10720), False, 'from iPERCore.tools.utils.geometry.cam_pose_utils import cam_init2orig, cam_norm\n'), ((10740, 10769), 'iPERCore.tools.utils.geometry.cam_pose_utils.cam_norm', 'cam_norm', (['cams_orig', 'im_shape'], {}), '(cams_orig, im_shape)\n', (10748, 10769), False, 'from iPERCore.tools.utils.geometry.cam_pose_utils import cam_init2orig, cam_norm\n'), ((11813, 11865), 'torch.cat', 'torch.cat', (['[mean_cam, mean_pose, mean_shape]'], {'dim': '(-1)'}), '([mean_cam, mean_pose, mean_shape], dim=-1)\n', (11822, 11865), False, 'import torch\n'), ((1934, 1961), 'iPERCore.tools.utils.filesio.persistence.load_toml_file', 'load_toml_file', (['cfg_or_path'], {}), '(cfg_or_path)\n', (1948, 1961), False, 'from iPERCore.tools.utils.filesio.persistence import load_toml_file\n'), ((2216, 2238), 'iPERCore.tools.human_digitalizer.bodynets.SMPL', 'SMPL', (["cfg['smpl_path']"], {}), "(cfg['smpl_path'])\n", (2220, 2238), False, 'from iPERCore.tools.human_digitalizer.bodynets import SMPL\n'), ((6883, 6898), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6896, 6898), False, 'import torch\n'), ((7524, 7576), 'iPERCore.tools.utils.geometry.boxes.cal_head_bbox', 'cal_head_bbox', (["opt_smpls_info['j2d']"], {'image_size': '(512)'}), "(opt_smpls_info['j2d'], image_size=512)\n", (7537, 7576), False, 'from iPERCore.tools.utils.geometry.boxes import cal_head_bbox\n'), ((10581, 10596), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10594, 10596), False, 'import torch\n'), ((10932, 10985), 'iPERCore.tools.utils.geometry.boxes.cal_head_bbox', 'cal_head_bbox', (["init_smpls_info['j2d']"], {'image_size': '(512)'}), "(init_smpls_info['j2d'], image_size=512)\n", (10945, 10985), False, 'from iPERCore.tools.utils.geometry.boxes import cal_head_bbox\n'), ((1694, 1732), 'torch.as_tensor', 'torch.as_tensor', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (1709, 1732), False, 'import torch\n'), ((1789, 1827), 'torch.as_tensor', 'torch.as_tensor', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (1804, 1827), False, 'import torch\n'), ((3060, 3082), 'torch.tensor', 'torch.tensor', (['proc_img'], {}), '(proc_img)\n', (3072, 3082), False, 'import torch\n'), ((3354, 3409), 'torch.tensor', 'torch.tensor', (["proc_info['start_pt']"], {'device': 'self.device'}), "(proc_info['start_pt'], device=self.device)\n", (3366, 3409), False, 'import torch\n')] |
import os
import gzip
import time
import copy
import random
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from torch.optim import Adam, lr_scheduler
class Net(nn.Module):
def __init__(self):
super().__init__()
# N = (W - F + 2P) / S + 1
self.conv1 = nn.Conv2d(1, 32, 3, 1) # [32, 26, 26]
self.conv2 = nn.Conv2d(32, 64, 3, 1) # [64, 24, 24]
# max_pool2d [64, 12, 12]
self.dropout1 = nn.Dropout(0.25)
self.fc1 = nn.Linear(64 * 12 * 12, 128) # [128]
self.dropout2 = nn.Dropout(0.5)
self.fc2 = nn.Linear(128, 10) # [10]
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, (2, 2))
x = self.dropout1(x)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = self.dropout2(x)
x = F.relu(self.fc2(x))
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
class MyDataset(Dataset):
def __init__(self, X, Y, transform=None, target_transform=None):
self.X = X
self.Y = Y
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
X = self.X[idx]
Y = self.Y[idx]
if self.transform:
X = self.transform(X)
if self.target_transform:
Y = self.target_transform(Y)
return X, Y
def extract_data(data_dir, x_name, y_name):
x_path = os.path.join(data_dir, x_name)
y_path = os.path.join(data_dir, y_name)
with gzip.open(x_path, 'rb') as f:
X_content = f.read()
with gzip.open(y_path, 'rb') as f:
Y_content = f.read()
X_bytes = len(X_content)
Y_bytes = len(Y_content)
height = int(X_content[8:12].hex(), 16)
width = int(X_content[12:16].hex(), 16)
pixels = height * width
X = []
Y = []
for i in range(16, X_bytes, pixels):
X.append(np.frombuffer(X_content[i:i + pixels], dtype=np.uint8).reshape(28, 28, 1))
for i in range(8, Y_bytes):
Y.append(Y_content[i])
X = np.array(X)
Y = np.array(Y, dtype=np.int64)
return X, Y
if __name__ == '__main__':
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
mnist_dir = "./data" # 下载的MNIST的文件夹路径
X_train, Y_train = extract_data(mnist_dir, "train-images-idx3-ubyte.gz", "train-labels-idx1-ubyte.gz")
X_test, Y_test = extract_data(mnist_dir, "t10k-images-idx3-ubyte.gz", "t10k-labels-idx1-ubyte.gz")
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.3, random_state=123)
print(X_train.shape, X_val.shape)
print(Y_train.shape, Y_val.shape)
net = Net()
print(net)
transform = transforms.ToTensor()
datasets = {
'train': MyDataset(X_train, Y_train, transform),
'val': MyDataset(X_val, Y_val, transform)
}
dataset_sizes = {
'train': len(datasets['train']),
'val': len(datasets['val'])
}
batch_size = 512
workers = 4
dataloaders = {
'train': DataLoader(datasets['train'], batch_size=batch_size, num_workers=workers, shuffle=True, pin_memory=True),
'val': DataLoader(datasets['val'], batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True)
}
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=1e-3)
epochs = 10
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(epochs):
print('Epoch {}/{}'.format(epoch, epochs - 1))
print('-' * 10)
for phase in ['train', 'val']:
if phase == 'train':
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
# forward
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
test_set = MyDataset(X_test, Y_test, transform)
test_dataloader = DataLoader(test_set, batch_size=batch_size, num_workers=workers, pin_memory=True)
total = len(test_set)
correct = 0
model.load_state_dict(best_model_wts)
model.eval()
with torch.no_grad():
for inputs, labels in test_dataloader:
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
correct += torch.sum(preds == labels.data)
print()
print("Test Acc: {:.4f}".format(correct.double() / total))
path = "mnist.pt"
torch.save(best_model_wts, path)
| [
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"gzip.open",
"torch.max",
"numpy.array",
"torch.cuda.is_available",
"torch.sum",
"numpy.frombuffer",
"torchvision.transforms.ToTensor",
"sklearn.model_selection.train_test_split",
"torch.save",
"torch.no_grad",
"torch.nn.functional.max_pool2d"... | [((1911, 1941), 'os.path.join', 'os.path.join', (['data_dir', 'x_name'], {}), '(data_dir, x_name)\n', (1923, 1941), False, 'import os\n'), ((1955, 1985), 'os.path.join', 'os.path.join', (['data_dir', 'y_name'], {}), '(data_dir, y_name)\n', (1967, 1985), False, 'import os\n'), ((2528, 2539), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2536, 2539), True, 'import numpy as np\n'), ((2548, 2575), 'numpy.array', 'np.array', (['Y'], {'dtype': 'np.int64'}), '(Y, dtype=np.int64)\n', (2556, 2575), True, 'import numpy as np\n'), ((2958, 3025), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'Y_train'], {'test_size': '(0.3)', 'random_state': '(123)'}), '(X_train, Y_train, test_size=0.3, random_state=123)\n', (2974, 3025), False, 'from sklearn.model_selection import train_test_split\n'), ((3151, 3172), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3170, 3172), False, 'from torchvision import transforms\n'), ((3837, 3858), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3856, 3858), True, 'import torch.nn as nn\n'), ((3938, 3949), 'time.time', 'time.time', ([], {}), '()\n', (3947, 3949), False, 'import time\n'), ((5740, 5826), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': 'batch_size', 'num_workers': 'workers', 'pin_memory': '(True)'}), '(test_set, batch_size=batch_size, num_workers=workers, pin_memory\n =True)\n', (5750, 5826), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((6315, 6347), 'torch.save', 'torch.save', (['best_model_wts', 'path'], {}), '(best_model_wts, path)\n', (6325, 6347), False, 'import torch\n'), ((491, 513), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(32)', '(3)', '(1)'], {}), '(1, 32, 3, 1)\n', (500, 513), True, 'import torch.nn as nn\n'), ((551, 574), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)', '(3)', '(1)'], {}), '(32, 64, 3, 1)\n', (560, 574), True, 'import torch.nn as nn\n'), ((649, 665), 'torch.nn.Dropout', 'nn.Dropout', (['(0.25)'], {}), '(0.25)\n', (659, 665), True, 'import torch.nn as nn\n'), ((685, 713), 'torch.nn.Linear', 'nn.Linear', (['(64 * 12 * 12)', '(128)'], {}), '(64 * 12 * 12, 128)\n', (694, 713), True, 'import torch.nn as nn\n'), ((747, 762), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (757, 762), True, 'import torch.nn as nn\n'), ((782, 800), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(10)'], {}), '(128, 10)\n', (791, 800), True, 'import torch.nn as nn\n'), ((916, 939), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2, 2)'], {}), '(x, (2, 2))\n', (928, 939), True, 'import torch.nn.functional as F\n'), ((1996, 2019), 'gzip.open', 'gzip.open', (['x_path', '"""rb"""'], {}), "(x_path, 'rb')\n", (2005, 2019), False, 'import gzip\n'), ((2064, 2087), 'gzip.open', 'gzip.open', (['y_path', '"""rb"""'], {}), "(y_path, 'rb')\n", (2073, 2087), False, 'import gzip\n'), ((3486, 3594), 'torch.utils.data.DataLoader', 'DataLoader', (["datasets['train']"], {'batch_size': 'batch_size', 'num_workers': 'workers', 'shuffle': '(True)', 'pin_memory': '(True)'}), "(datasets['train'], batch_size=batch_size, num_workers=workers,\n shuffle=True, pin_memory=True)\n", (3496, 3594), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((3607, 3714), 'torch.utils.data.DataLoader', 'DataLoader', (["datasets['val']"], {'batch_size': 'batch_size', 'num_workers': 'workers', 'shuffle': '(False)', 'pin_memory': '(True)'}), "(datasets['val'], batch_size=batch_size, num_workers=workers,\n shuffle=False, pin_memory=True)\n", (3617, 3714), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((5499, 5510), 'time.time', 'time.time', ([], {}), '()\n', (5508, 5510), False, 'import time\n'), ((5934, 5949), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5947, 5949), False, 'import torch\n'), ((3756, 3781), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3779, 3781), False, 'import torch\n'), ((6135, 6156), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (6144, 6156), False, 'import torch\n'), ((6180, 6211), 'torch.sum', 'torch.sum', (['(preds == labels.data)'], {}), '(preds == labels.data)\n', (6189, 6211), False, 'import torch\n'), ((5051, 5082), 'torch.sum', 'torch.sum', (['(preds == labels.data)'], {}), '(preds == labels.data)\n', (5060, 5082), False, 'import torch\n'), ((2381, 2435), 'numpy.frombuffer', 'np.frombuffer', (['X_content[i:i + pixels]'], {'dtype': 'np.uint8'}), '(X_content[i:i + pixels], dtype=np.uint8)\n', (2394, 2435), True, 'import numpy as np\n'), ((4578, 4618), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (["(phase == 'train')"], {}), "(phase == 'train')\n", (4600, 4618), False, 'import torch\n'), ((4695, 4716), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (4704, 4716), False, 'import torch\n')] |
'''
实验:
用ngram来分类
其中ngram为单一数据训练模型
ngram-batch为带batch的ngram训练模型
训练取了10epoch
训练好后将原始数据通过ngram得到的特征向量保存到了./data/ngram_featrue_x.npy
0.5219466871716006
'''
import numpy as np
from sklearn.linear_model import LogisticRegression
import pandas as pd
x_train = np.array(np.load('./data/ngram_featrue_x.npy'))
y_train = np.load('./data/y_train.npy')
print(x_train.shape)
print(y_train.shape)
logist = LogisticRegression()
logist.fit(x_train,y_train)
predicted = logist.predict(x_train)
print(np.mean(predicted == y_train))
| [
"numpy.mean",
"numpy.load",
"sklearn.linear_model.LogisticRegression"
] | [((327, 356), 'numpy.load', 'np.load', (['"""./data/y_train.npy"""'], {}), "('./data/y_train.npy')\n", (334, 356), True, 'import numpy as np\n'), ((413, 433), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (431, 433), False, 'from sklearn.linear_model import LogisticRegression\n'), ((277, 314), 'numpy.load', 'np.load', (['"""./data/ngram_featrue_x.npy"""'], {}), "('./data/ngram_featrue_x.npy')\n", (284, 314), True, 'import numpy as np\n'), ((509, 538), 'numpy.mean', 'np.mean', (['(predicted == y_train)'], {}), '(predicted == y_train)\n', (516, 538), True, 'import numpy as np\n')] |
"""
Figure 3. Heterovalent bispecific
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from .figureCommon import getSetup, subplotLabel, setFontSize, heatmap, cellPopulations, overlapCellPopulation
from valentbind import polyc, polyfc
pairs = [(r"$R_1^{hi}R_2^{lo}$", r"$R_1^{med}R_2^{lo}$"), (r"$R_1^{hi}R_2^{hi}$", r"$R_1^{med}R_2^{med}$"),
(r"$R_1^{med}R_2^{hi}$", r"$R_1^{hi}R_2^{med}$"), (r"$R_1^{hi}R_2^{lo}$", r"$R_1^{lo}R_2^{hi}$")]
def makeFigure():
""" main function for Figure 3 """
ax, f = getSetup((10, 12), (4, 3))
subplotLabel(ax, [0, 3, 4, 5, 6, 7, 8, 9, 10])
ax[0].axis("off")
ax[1].axis("off")
ax[2].axis("off")
L0 = 1e-8
Kav = [[1e7, 1e5], [1e5, 1e6]]
KxStar = 1e-12
heatmap(ax[3], L0, KxStar, Kav, [1.0], Cplx=[[1, 1]], vrange=(2, 12), fully=False,
title="Bispecific Lbound, $K_x^*$={} cell·M".format(KxStar), cbar=False, layover=1)
heatmap(ax[4], L0 * 2, KxStar, Kav, [0.5, 0.5], f=1, vrange=(2, 12), fully=False,
title="Monovalents mixture Lbound, $K_x^*$={} cell·M".format(KxStar), cbar=False, layover=1)
heatmap(ax[5], L0, KxStar, Kav, [0.5, 0.5], Cplx=[[2, 0], [0, 2]], vrange=(2, 12), fully=False,
title="Bivalents mixture Lbound, $K_x^*$={} cell·M".format(KxStar), cbar=True, layover=1)
for i, KxStar in enumerate([1e-10, 1e-12, 1e-14]):
heatmap(ax[i + 6], L0, KxStar, Kav, [1.0], Cplx=[[1, 1]], vrange=(2, 12), fully=True,
title="Bispecific log fully bound, $K_x^*$={} cell·M".format(KxStar), cbar=(i == 2), layover=1)
for i in range(3, 9):
ax[i].set(xlabel="Receptor 1 Abundance [#/cell]", ylabel='Receptor 2 Abundance [#/cell]')
KxStarVary(ax[9], L0, Kav, ylim=(-9, 9), compare="tether")
KxStarVary(ax[10], L0, Kav, ylim=(-9, 9), compare="bisp", fully=True)
ax[11].axis("off")
setFontSize(ax, 9.4, xsci=[3, 4, 5, 6, 7, 8, 9, 10], ysci=[3, 4, 5, 6, 7, 8])
return f
def tetheredYN(L0, KxStar, Rtot, Kav, fully=True):
""" Compare tethered (bispecific) vs monovalent """
if fully:
return polyc(L0, KxStar, Rtot, [[1, 1]], [1.0], Kav)[2][0] / \
polyfc(L0 * 2, KxStar, 1, Rtot, [0.5, 0.5], Kav)[0]
else:
return polyc(L0, KxStar, Rtot, [[1, 1]], [1.0], Kav)[0][0] / \
polyfc(L0 * 2, KxStar, 1, Rtot, [0.5, 0.5], Kav)[0]
def mixBispecYN(L0, KxStar, Rtot, Kav, fully=True):
""" Compare bispecific to mixture of bivalent """
if fully:
return polyc(L0, KxStar, Rtot, [[1, 1]], [1.0], Kav)[2][0] / \
np.sum(polyc(L0, KxStar, Rtot, [[2, 0], [0, 2]], [0.5, 0.5], Kav)[2])
else:
return polyc(L0, KxStar, Rtot, [[1, 1]], [1.0], Kav)[0][0] / \
np.sum(polyc(L0, KxStar, Rtot, [[2, 0], [0, 2]], [0.5, 0.5], Kav)[0])
def normHeatmap(ax, L0, KxStar, Kav, vrange=(-4, 2), title="", cbar=False, fully=True, layover=True, normby=tetheredYN):
""" Make a heatmap normalized by another binding value """
nAbdPts = 70
abundRange = (1.5, 4.5)
abundScan = np.logspace(abundRange[0], abundRange[1], nAbdPts)
func = np.vectorize(lambda abund1, abund2: normby(L0, KxStar, [abund1, abund2], Kav, fully=fully))
X, Y = np.meshgrid(abundScan, abundScan)
logZ = np.log(func(X, Y))
contours = ax.contour(X, Y, logZ, levels=np.arange(-20, 20, 0.5), colors="black", linewidths=0.5)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_title(title)
plt.clabel(contours, inline=True, fontsize=6)
ax.pcolor(X, Y, logZ, cmap='RdYlGn', vmin=vrange[0], vmax=vrange[1])
norm = plt.Normalize(vmin=vrange[0], vmax=vrange[1])
if cbar:
cbar = ax.figure.colorbar(cm.ScalarMappable(norm=norm, cmap='RdYlGn'), ax=ax)
cbar.set_label("Log ratio")
if layover:
overlapCellPopulation(ax, abundRange)
def selectivity(pop1name, pop2name, L0, KxStar, Cplx, Ctheta, Kav, fully=True, untethered=False):
""" Always calculate the full binding of the 1st kind of complex """
pop1 = cellPopulations[pop1name][0], cellPopulations[pop1name][1]
pop2 = cellPopulations[pop2name][0], cellPopulations[pop2name][1]
if untethered: # mixture of monovalent
return polyfc(L0, KxStar, 1, np.power(10, pop1), [0.5, 0.5], Kav)[0] \
/ polyfc(L0, KxStar, 1, np.power(10, pop2), [0.5, 0.5], Kav)[0]
if fully:
return np.sum(polyc(L0, KxStar, np.power(10, pop1), Cplx, Ctheta, Kav)[2]) \
/ np.sum(polyc(L0, KxStar, np.power(10, pop2), Cplx, Ctheta, Kav)[2])
else:
return np.sum(polyc(L0, KxStar, np.power(10, pop1), Cplx, Ctheta, Kav)[0]) \
/ np.sum(polyc(L0, KxStar, np.power(10, pop2), Cplx, Ctheta, Kav)[0])
def KxStarVary(ax, L0, Kav, ylim=(-7, 5), fully=True, compare=None):
""" Line plot for selectivity with different KxStar """
nPoints = 50
Kxaxis = np.logspace(-15, -7, nPoints)
colors = ["royalblue", "orange", "limegreen", "orangered"]
sHolder = np.zeros((nPoints))
for i, pair in enumerate(pairs):
for j, KxStar in enumerate(Kxaxis):
if compare == "tether":
sHolder[j] = selectivity(pair[0], pair[1], L0, KxStar, [[1, 1]], [1], Kav, fully=fully, untethered=False) \
/ selectivity(pair[0], pair[1], L0 * 2, KxStar, None, None, Kav, untethered=True)
elif compare == "bisp":
sHolder[j] = selectivity(pair[0], pair[1], L0, KxStar, [[1, 1]], [1], Kav, fully=fully, untethered=False) \
/ selectivity(pair[0], pair[1], L0, KxStar, [[2, 0], [0, 2]], [0.5, 0.5], Kav, fully=fully)
elif compare == " fully":
sHolder[j] = selectivity(pair[0], pair[1], L0, KxStar, [[1, 1]], [1], Kav, fully=True, untethered=False) \
/ selectivity(pair[0], pair[1], L0, KxStar, [[1, 1]], [1], Kav, fully=False, untethered=False)
else:
sHolder[j] = np.log(selectivity(pair[0], pair[1], L0, KxStar, [[1, 1]], [1], Kav, fully=fully, untethered=False))
ax.plot(Kxaxis, sHolder, color=colors[i], label=pair[0] + " to " + pair[1], linestyle="-")
ax.set(xlim=(1e-15, 1e-7), ylim=ylim,
xlabel="$K_x^*$")
ax.set_xscale('log')
if compare == "tether":
ax.set_ylabel("Bispecific selectivity / Monovalent selectivity")
ax.set_title("Bispecific advantage over monovalent mix")
elif compare == "bisp":
ax.set_ylabel("Bispecific selectivity / Bivalent selectivity")
ax.set_title("Bispecific advantage over homo-bivalent mix")
elif compare == "fully":
ax.set_ylabel("Ratio of selectivity")
ax.set_title("Fully bound selectivity / Ligand bound selectivity")
else:
ax.set_ylabel("Log selectivity of [1, 1]")
if fully:
ax.set_title("Log selectivity varies with $K_x^*$ for Lfbnd")
else:
ax.set_title("Log selectivity varies with $K_x^*$ for Lbound")
ax.legend(loc='lower right', fancybox=True, framealpha=1)
| [
"numpy.power",
"matplotlib.pyplot.Normalize",
"numpy.zeros",
"matplotlib.cm.ScalarMappable",
"matplotlib.pyplot.clabel",
"numpy.meshgrid",
"numpy.logspace",
"valentbind.polyfc",
"valentbind.polyc",
"numpy.arange"
] | [((3081, 3131), 'numpy.logspace', 'np.logspace', (['abundRange[0]', 'abundRange[1]', 'nAbdPts'], {}), '(abundRange[0], abundRange[1], nAbdPts)\n', (3092, 3131), True, 'import numpy as np\n'), ((3247, 3280), 'numpy.meshgrid', 'np.meshgrid', (['abundScan', 'abundScan'], {}), '(abundScan, abundScan)\n', (3258, 3280), True, 'import numpy as np\n'), ((3492, 3537), 'matplotlib.pyplot.clabel', 'plt.clabel', (['contours'], {'inline': '(True)', 'fontsize': '(6)'}), '(contours, inline=True, fontsize=6)\n', (3502, 3537), True, 'import matplotlib.pyplot as plt\n'), ((3622, 3667), 'matplotlib.pyplot.Normalize', 'plt.Normalize', ([], {'vmin': 'vrange[0]', 'vmax': 'vrange[1]'}), '(vmin=vrange[0], vmax=vrange[1])\n', (3635, 3667), True, 'import matplotlib.pyplot as plt\n'), ((4896, 4925), 'numpy.logspace', 'np.logspace', (['(-15)', '(-7)', 'nPoints'], {}), '(-15, -7, nPoints)\n', (4907, 4925), True, 'import numpy as np\n'), ((5004, 5021), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (5012, 5021), True, 'import numpy as np\n'), ((3357, 3380), 'numpy.arange', 'np.arange', (['(-20)', '(20)', '(0.5)'], {}), '(-20, 20, 0.5)\n', (3366, 3380), True, 'import numpy as np\n'), ((3715, 3758), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': '"""RdYlGn"""'}), "(norm=norm, cmap='RdYlGn')\n", (3732, 3758), True, 'import matplotlib.cm as cm\n'), ((2198, 2246), 'valentbind.polyfc', 'polyfc', (['(L0 * 2)', 'KxStar', '(1)', 'Rtot', '[0.5, 0.5]', 'Kav'], {}), '(L0 * 2, KxStar, 1, Rtot, [0.5, 0.5], Kav)\n', (2204, 2246), False, 'from valentbind import polyc, polyfc\n'), ((2343, 2391), 'valentbind.polyfc', 'polyfc', (['(L0 * 2)', 'KxStar', '(1)', 'Rtot', '[0.5, 0.5]', 'Kav'], {}), '(L0 * 2, KxStar, 1, Rtot, [0.5, 0.5], Kav)\n', (2349, 2391), False, 'from valentbind import polyc, polyfc\n'), ((2130, 2175), 'valentbind.polyc', 'polyc', (['L0', 'KxStar', 'Rtot', '[[1, 1]]', '[1.0]', 'Kav'], {}), '(L0, KxStar, Rtot, [[1, 1]], [1.0], Kav)\n', (2135, 2175), False, 'from valentbind import polyc, polyfc\n'), ((2275, 2320), 'valentbind.polyc', 'polyc', (['L0', 'KxStar', 'Rtot', '[[1, 1]]', '[1.0]', 'Kav'], {}), '(L0, KxStar, Rtot, [[1, 1]], [1.0], Kav)\n', (2280, 2320), False, 'from valentbind import polyc, polyfc\n'), ((2533, 2578), 'valentbind.polyc', 'polyc', (['L0', 'KxStar', 'Rtot', '[[1, 1]]', '[1.0]', 'Kav'], {}), '(L0, KxStar, Rtot, [[1, 1]], [1.0], Kav)\n', (2538, 2578), False, 'from valentbind import polyc, polyfc\n'), ((2608, 2666), 'valentbind.polyc', 'polyc', (['L0', 'KxStar', 'Rtot', '[[2, 0], [0, 2]]', '[0.5, 0.5]', 'Kav'], {}), '(L0, KxStar, Rtot, [[2, 0], [0, 2]], [0.5, 0.5], Kav)\n', (2613, 2666), False, 'from valentbind import polyc, polyfc\n'), ((2696, 2741), 'valentbind.polyc', 'polyc', (['L0', 'KxStar', 'Rtot', '[[1, 1]]', '[1.0]', 'Kav'], {}), '(L0, KxStar, Rtot, [[1, 1]], [1.0], Kav)\n', (2701, 2741), False, 'from valentbind import polyc, polyfc\n'), ((2771, 2829), 'valentbind.polyc', 'polyc', (['L0', 'KxStar', 'Rtot', '[[2, 0], [0, 2]]', '[0.5, 0.5]', 'Kav'], {}), '(L0, KxStar, Rtot, [[2, 0], [0, 2]], [0.5, 0.5], Kav)\n', (2776, 2829), False, 'from valentbind import polyc, polyfc\n'), ((4259, 4277), 'numpy.power', 'np.power', (['(10)', 'pop1'], {}), '(10, pop1)\n', (4267, 4277), True, 'import numpy as np\n'), ((4337, 4355), 'numpy.power', 'np.power', (['(10)', 'pop2'], {}), '(10, pop2)\n', (4345, 4355), True, 'import numpy as np\n'), ((4431, 4449), 'numpy.power', 'np.power', (['(10)', 'pop1'], {}), '(10, pop1)\n', (4439, 4449), True, 'import numpy as np\n'), ((4515, 4533), 'numpy.power', 'np.power', (['(10)', 'pop2'], {}), '(10, pop2)\n', (4523, 4533), True, 'import numpy as np\n'), ((4608, 4626), 'numpy.power', 'np.power', (['(10)', 'pop1'], {}), '(10, pop1)\n', (4616, 4626), True, 'import numpy as np\n'), ((4692, 4710), 'numpy.power', 'np.power', (['(10)', 'pop2'], {}), '(10, pop2)\n', (4700, 4710), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ============================================================================ #
from matplotlib.ticker import ScalarFormatter
import numpy as np
import matplotlib.pyplot as plt
# ============================================================================ #
# prepare figure
fig_t, ax_t = plt.subplots(4, 1, figsize=(6, 8))
# ============================================================================ #
# plot
filebase = f'./accel' # t,j,a,v,x
for i in range(8):
raw = np.loadtxt(f"{filebase}_{i}.csv", delimiter=',', ndmin=2)
if raw.size == 0:
raw = np.empty(shape=(0, 5))
t = raw[:, 0]
value = raw[:, 1:1+4]
# theta
for k in range(4):
ax_t[k].plot(t, value[:, k], lw=4)
# ============================================================================ #
# t style
ylabels = ['jerk [m/s/s/s]', 'accel. [m/s/s]',
'velocity [m/s]', 'position [m]']
titles = ['Jerk', 'Acceleration', 'Velocity', 'Position']
for i, ax in enumerate(ax_t):
ax.grid(which='both')
ax.set_ylabel(ylabels[i])
ax.set_title(titles[i])
for ax in ax_t[0:-1]:
ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
ax.ticklabel_format(style="sci", axis="y", scilimits=(0, 0))
ax_t[-1].set_xlabel('time [s]')
# ============================================================================ #
# fit
fig_t.tight_layout()
# ============================================================================ #
# save
for ext in ['.png', '.svg']:
fig_t.savefig(filebase + '_t' + ext)
# ============================================================================ #
# show
plt.show()
| [
"matplotlib.ticker.ScalarFormatter",
"numpy.empty",
"numpy.loadtxt",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((337, 371), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'figsize': '(6, 8)'}), '(4, 1, figsize=(6, 8))\n', (349, 371), True, 'import matplotlib.pyplot as plt\n'), ((1664, 1674), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1672, 1674), True, 'import matplotlib.pyplot as plt\n'), ((525, 582), 'numpy.loadtxt', 'np.loadtxt', (['f"""{filebase}_{i}.csv"""'], {'delimiter': '""","""', 'ndmin': '(2)'}), "(f'{filebase}_{i}.csv', delimiter=',', ndmin=2)\n", (535, 582), True, 'import numpy as np\n'), ((619, 641), 'numpy.empty', 'np.empty', ([], {'shape': '(0, 5)'}), '(shape=(0, 5))\n', (627, 641), True, 'import numpy as np\n'), ((1175, 1208), 'matplotlib.ticker.ScalarFormatter', 'ScalarFormatter', ([], {'useMathText': '(True)'}), '(useMathText=True)\n', (1190, 1208), False, 'from matplotlib.ticker import ScalarFormatter\n')] |
# coding=utf-8
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import roc_curve, auc
from sklearn.svm import SVC
import data_processing
import globe
import word2vec_gensim_train as train
# 分类流程 liyu
def run_li():
# 读入数据
# pos_file_path = '/Users/li/Kunyan/MyRepository/DeepNaturalLanguageProcessing/DeepNLP/data/test3.txt'
# neg_file_path = '/Users/li/Kunyan/MyRepository/DeepNaturalLanguageProcessing/DeepNLP/data/test2.txt'
pos_file_path = globe.file_pos
neg_file_path = globe.file_neg
tmp = data_processing.read_data(pos_file_path, neg_file_path)
res = data_processing.data_split(tmp[0], tmp[1])
train_vecs = res[0]
test_vecs = res[1]
label_train = res[2]
label_test = res[3]
# 分类训练
lr = SGDClassifier(loss='log', penalty='l1')
lr.fit(train_vecs, label_train)
print('Test Accuracy: %.2f' % lr.score(test_vecs, label_test))
pred_probas = lr.predict_proba(test_vecs)[:, 1]
fpr, tpr, _ = roc_curve(label_test, pred_probas)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, label='area = %.2f' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.legend(loc='lower right')
plt.show()
def run_zx():
# 读入数据并处理 train模块
doc_vec_label = train.text_vecs_zx()
# 数据分割并设置标签
doc_vec_label_split = train.train_test(doc_vec_label)
train_data = doc_vec_label_split[0]
test_data = doc_vec_label_split[1]
y_train = np.array([r[0] for r in train_data])
x_train = np.array([r[1] for r in train_data])
y_test = np.array([r[0] for r in test_data])
x_test = np.array([r[1] for r in test_data])
# 三维变二维,要再研究一下!!! 2836 * 1 *200
nsamples, nx, ny = x_train.shape
d2_train_dataset = x_train.reshape((nsamples, nx * ny))
nsamples, nx, ny = x_test.shape
d2_test_dataset = x_test.reshape((nsamples, nx * ny))
# 分类训练
# lr = SGDClassifier(loss='log', penalty='l1')
# lr =LR() # Logistics
lr = SVC()
lr.fit(d2_train_dataset, y_train)
# 测试精确度
print('Test Accuracy: %.2f' % lr.score(d2_test_dataset, y_test))
# 可视化输出
# pred_probas = lr.predict_proba(d2_test_dataset)[:, 1]
pred = lr.predict(d2_test_dataset) # [:, 1]
fpr, tpr, _ = roc_curve(y_test, pred) # 标签y只能为 0 或者 1
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, label='area = %.2f' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.legend(loc='lower right')
plt.show()
if __name__ == "__main__":
run_zx()
| [
"data_processing.data_split",
"sklearn.linear_model.SGDClassifier",
"word2vec_gensim_train.text_vecs_zx",
"sklearn.svm.SVC",
"sklearn.metrics.auc",
"matplotlib.pyplot.plot",
"word2vec_gensim_train.train_test",
"numpy.array",
"sklearn.metrics.roc_curve",
"data_processing.read_data",
"matplotlib.p... | [((594, 649), 'data_processing.read_data', 'data_processing.read_data', (['pos_file_path', 'neg_file_path'], {}), '(pos_file_path, neg_file_path)\n', (619, 649), False, 'import data_processing\n'), ((660, 702), 'data_processing.data_split', 'data_processing.data_split', (['tmp[0]', 'tmp[1]'], {}), '(tmp[0], tmp[1])\n', (686, 702), False, 'import data_processing\n'), ((820, 859), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""log"""', 'penalty': '"""l1"""'}), "(loss='log', penalty='l1')\n", (833, 859), False, 'from sklearn.linear_model import SGDClassifier\n'), ((1036, 1070), 'sklearn.metrics.roc_curve', 'roc_curve', (['label_test', 'pred_probas'], {}), '(label_test, pred_probas)\n', (1045, 1070), False, 'from sklearn.metrics import roc_curve, auc\n'), ((1085, 1098), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (1088, 1098), False, 'from sklearn.metrics import roc_curve, auc\n'), ((1103, 1152), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'label': "('area = %.2f' % roc_auc)"}), "(fpr, tpr, label='area = %.2f' % roc_auc)\n", (1111, 1152), True, 'import matplotlib.pyplot as plt\n'), ((1157, 1188), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""k--"""'], {}), "([0, 1], [0, 1], 'k--')\n", (1165, 1188), True, 'import matplotlib.pyplot as plt\n'), ((1193, 1213), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (1201, 1213), True, 'import matplotlib.pyplot as plt\n'), ((1218, 1239), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (1226, 1239), True, 'import matplotlib.pyplot as plt\n'), ((1244, 1273), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (1254, 1273), True, 'import matplotlib.pyplot as plt\n'), ((1279, 1289), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1287, 1289), True, 'import matplotlib.pyplot as plt\n'), ((1348, 1368), 'word2vec_gensim_train.text_vecs_zx', 'train.text_vecs_zx', ([], {}), '()\n', (1366, 1368), True, 'import word2vec_gensim_train as train\n'), ((1412, 1443), 'word2vec_gensim_train.train_test', 'train.train_test', (['doc_vec_label'], {}), '(doc_vec_label)\n', (1428, 1443), True, 'import word2vec_gensim_train as train\n'), ((1538, 1574), 'numpy.array', 'np.array', (['[r[0] for r in train_data]'], {}), '([r[0] for r in train_data])\n', (1546, 1574), True, 'import numpy as np\n'), ((1589, 1625), 'numpy.array', 'np.array', (['[r[1] for r in train_data]'], {}), '([r[1] for r in train_data])\n', (1597, 1625), True, 'import numpy as np\n'), ((1640, 1675), 'numpy.array', 'np.array', (['[r[0] for r in test_data]'], {}), '([r[0] for r in test_data])\n', (1648, 1675), True, 'import numpy as np\n'), ((1689, 1724), 'numpy.array', 'np.array', (['[r[1] for r in test_data]'], {}), '([r[1] for r in test_data])\n', (1697, 1724), True, 'import numpy as np\n'), ((2055, 2060), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (2058, 2060), False, 'from sklearn.svm import SVC\n'), ((2322, 2345), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'pred'], {}), '(y_test, pred)\n', (2331, 2345), False, 'from sklearn.metrics import roc_curve, auc\n'), ((2377, 2390), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (2380, 2390), False, 'from sklearn.metrics import roc_curve, auc\n'), ((2395, 2444), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'label': "('area = %.2f' % roc_auc)"}), "(fpr, tpr, label='area = %.2f' % roc_auc)\n", (2403, 2444), True, 'import matplotlib.pyplot as plt\n'), ((2449, 2480), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""k--"""'], {}), "([0, 1], [0, 1], 'k--')\n", (2457, 2480), True, 'import matplotlib.pyplot as plt\n'), ((2485, 2505), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (2493, 2505), True, 'import matplotlib.pyplot as plt\n'), ((2510, 2531), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (2518, 2531), True, 'import matplotlib.pyplot as plt\n'), ((2536, 2565), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (2546, 2565), True, 'import matplotlib.pyplot as plt\n'), ((2571, 2581), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2579, 2581), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
#
# Tests the basic methods of the DREAM MCMC method.
#
# This file is part of PINTS.
# Copyright (c) 2017-2019, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
import unittest
import numpy as np
import pints
import pints.toy as toy
from shared import StreamCapture
# Consistent unit testing in Python 2 and 3
try:
unittest.TestCase.assertRaisesRegex
except AttributeError:
unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
debug = False
class TestDreamMCMC(unittest.TestCase):
"""
Tests the basic methods of the DREAM MCMC method.
"""
@classmethod
def setUpClass(cls):
""" Prepare a problem for testing. """
# Random seed
np.random.seed(1)
# Create toy model
cls.model = toy.LogisticModel()
cls.real_parameters = [0.015, 500]
cls.times = np.linspace(0, 1000, 1000)
cls.values = cls.model.simulate(cls.real_parameters, cls.times)
# Add noise
cls.noise = 10
cls.values += np.random.normal(0, cls.noise, cls.values.shape)
cls.real_parameters.append(cls.noise)
cls.real_parameters = np.array(cls.real_parameters)
# Create an object with links to the model and time series
cls.problem = pints.SingleOutputProblem(
cls.model, cls.times, cls.values)
# Create a uniform prior over both the parameters and the new noise
# variable
cls.log_prior = pints.UniformLogPrior(
[0.01, 400, cls.noise * 0.1],
[0.02, 600, cls.noise * 100]
)
# Create a log likelihood
cls.log_likelihood = pints.GaussianLogLikelihood(cls.problem)
# Create an un-normalised log-posterior (log-likelihood + log-prior)
cls.log_posterior = pints.LogPosterior(
cls.log_likelihood, cls.log_prior)
def test_method(self):
# Create mcmc
xs = [
self.real_parameters * 1.1,
self.real_parameters * 1.05,
self.real_parameters * 0.9,
self.real_parameters * 0.95,
]
mcmc = pints.DreamMCMC(4, xs)
self.assertFalse(mcmc.constant_crossover())
# Starts in initial phase
self.assertTrue(mcmc.needs_initial_phase())
self.assertTrue(mcmc.in_initial_phase())
# Perform short run
chains = []
for i in range(100):
xs = mcmc.ask()
fxs = [self.log_posterior(x) for x in xs]
samples = mcmc.tell(fxs)
if i == 20:
mcmc.set_initial_phase(False)
if i >= 50:
chains.append(samples)
if np.all(samples == xs):
self.assertTrue(np.all(mcmc.current_log_pdfs() == fxs))
chains = np.array(chains)
self.assertEqual(chains.shape[0], 50)
self.assertEqual(chains.shape[1], len(xs))
self.assertEqual(chains.shape[2], len(xs[0]))
# Repeat with constant crossover
mcmc = pints.DreamMCMC(4, xs)
mcmc.set_constant_crossover(True)
self.assertTrue(mcmc.constant_crossover())
# Perform short run
chains = []
for i in range(100):
xs = mcmc.ask()
fxs = [self.log_posterior(x) for x in xs]
samples = mcmc.tell(fxs)
if i == 20:
mcmc.set_initial_phase(False)
if i >= 50:
chains.append(samples)
chains = np.array(chains)
self.assertEqual(chains.shape[0], 50)
self.assertEqual(chains.shape[1], len(xs))
self.assertEqual(chains.shape[2], len(xs[0]))
def test_flow(self):
# Test we have at least 3 chains
n = 2
x0 = [self.real_parameters] * n
self.assertRaises(ValueError, pints.DreamMCMC, n, x0)
# Test initial proposal is first point
n = 3
x0 = [self.real_parameters] * n
mcmc = pints.DreamMCMC(n, x0)
self.assertTrue(np.all(mcmc.ask() == mcmc._x0))
# Double initialisation
mcmc = pints.DreamMCMC(n, x0)
mcmc.ask()
self.assertRaises(RuntimeError, mcmc._initialise)
# Tell without ask
mcmc = pints.DreamMCMC(n, x0)
self.assertRaises(RuntimeError, mcmc.tell, 0)
# Repeated asks should return same point
mcmc = pints.DreamMCMC(n, x0)
# Get into accepting state
for i in range(100):
mcmc.tell([self.log_posterior(x) for x in mcmc.ask()])
x = mcmc.ask()
for i in range(10):
self.assertTrue(x is mcmc.ask())
# Repeated tells should fail
mcmc.tell([1, 1, 1])
self.assertRaises(RuntimeError, mcmc.tell, [1, 1, 1])
# Bad starting point
mcmc = pints.DreamMCMC(n, x0)
mcmc.ask()
self.assertRaises(ValueError, mcmc.tell, float('-inf'))
def test_set_hyper_parameters(self):
"""
Tests the hyper-parameter interface for this optimiser.
"""
n = 5
x0 = [self.real_parameters] * n
mcmc = pints.DreamMCMC(n, x0)
self.assertEqual(mcmc.n_hyper_parameters(), 8)
# Test getting/setting b
b = mcmc.b()
self.assertEqual(mcmc.b(), b)
b += 0.01
self.assertNotEqual(mcmc.b(), b)
mcmc.set_b(b)
self.assertEqual(mcmc.b(), b)
mcmc.set_b(0)
self.assertRaises(ValueError, mcmc.set_b, -1)
# B star
x = mcmc.b_star() + 1
mcmc.set_b_star(x)
self.assertEqual(mcmc.b_star(), x)
self.assertRaises(ValueError, mcmc.set_b_star, -5)
# p_g
x = mcmc.p_g() + 0.1
mcmc.set_p_g(x)
self.assertEqual(mcmc._p_g, x)
self.assertRaises(ValueError, mcmc.set_p_g, -0.5)
self.assertRaises(ValueError, mcmc.set_p_g, 1.5)
# delta max
x = mcmc.delta_max() - 1
mcmc.set_delta_max(x)
self.assertEqual(mcmc.delta_max(), x)
self.assertRaises(ValueError, mcmc.set_delta_max, -1)
self.assertRaises(ValueError, mcmc.set_delta_max, 1000)
# CR
x = mcmc.CR() * 0.9
mcmc.set_CR(x)
self.assertEqual(mcmc.CR(), x)
self.assertRaises(ValueError, mcmc.set_CR, -0.5)
self.assertRaises(ValueError, mcmc.set_CR, 1.5)
# nCR
x = mcmc.nCR() + 1
mcmc.set_nCR(x)
self.assertEqual(mcmc.nCR(), 4)
self.assertRaises(ValueError, mcmc.set_nCR, 1)
# should implicitly convert floats to int
mcmc.set_nCR(2.1)
self.assertEqual(mcmc.nCR(), 2)
mcmc.set_hyper_parameters([0.50, 1.25, 0.45, 1,
0, 1, 0.32, 5])
self.assertEqual(mcmc._b, 0.50)
self.assertEqual(mcmc._b_star, 1.25)
self.assertEqual(mcmc._p_g, 0.45)
self.assertEqual(mcmc._delta_max, 1)
self.assertEqual(mcmc._initial_phase, False)
self.assertEqual(mcmc._constant_crossover, True)
self.assertEqual(mcmc._CR, 0.32)
self.assertEqual(mcmc._nCR, 5)
def test_logging(self):
"""
Test logging includes name and custom fields.
"""
x = [self.real_parameters] * 3
mcmc = pints.MCMCController(
self.log_posterior, 3, x, method=pints.DreamMCMC)
mcmc.set_max_iterations(5)
with StreamCapture() as c:
mcmc.run()
text = c.text()
self.assertIn('DREAM', text)
if __name__ == '__main__':
print('Add -v for more debug output')
import sys
if '-v' in sys.argv:
debug = True
unittest.main()
| [
"numpy.random.normal",
"pints.GaussianLogLikelihood",
"pints.UniformLogPrior",
"pints.DreamMCMC",
"pints.toy.LogisticModel",
"shared.StreamCapture",
"pints.LogPosterior",
"numpy.array",
"numpy.linspace",
"pints.MCMCController",
"numpy.random.seed",
"unittest.main",
"numpy.all",
"pints.Sing... | [((7693, 7708), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7706, 7708), False, 'import unittest\n'), ((806, 823), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (820, 823), True, 'import numpy as np\n'), ((872, 891), 'pints.toy.LogisticModel', 'toy.LogisticModel', ([], {}), '()\n', (889, 891), True, 'import pints.toy as toy\n'), ((955, 981), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)', '(1000)'], {}), '(0, 1000, 1000)\n', (966, 981), True, 'import numpy as np\n'), ((1120, 1168), 'numpy.random.normal', 'np.random.normal', (['(0)', 'cls.noise', 'cls.values.shape'], {}), '(0, cls.noise, cls.values.shape)\n', (1136, 1168), True, 'import numpy as np\n'), ((1245, 1274), 'numpy.array', 'np.array', (['cls.real_parameters'], {}), '(cls.real_parameters)\n', (1253, 1274), True, 'import numpy as np\n'), ((1365, 1424), 'pints.SingleOutputProblem', 'pints.SingleOutputProblem', (['cls.model', 'cls.times', 'cls.values'], {}), '(cls.model, cls.times, cls.values)\n', (1390, 1424), False, 'import pints\n'), ((1558, 1643), 'pints.UniformLogPrior', 'pints.UniformLogPrior', (['[0.01, 400, cls.noise * 0.1]', '[0.02, 600, cls.noise * 100]'], {}), '([0.01, 400, cls.noise * 0.1], [0.02, 600, cls.noise *\n 100])\n', (1579, 1643), False, 'import pints\n'), ((1738, 1778), 'pints.GaussianLogLikelihood', 'pints.GaussianLogLikelihood', (['cls.problem'], {}), '(cls.problem)\n', (1765, 1778), False, 'import pints\n'), ((1885, 1938), 'pints.LogPosterior', 'pints.LogPosterior', (['cls.log_likelihood', 'cls.log_prior'], {}), '(cls.log_likelihood, cls.log_prior)\n', (1903, 1938), False, 'import pints\n'), ((2205, 2227), 'pints.DreamMCMC', 'pints.DreamMCMC', (['(4)', 'xs'], {}), '(4, xs)\n', (2220, 2227), False, 'import pints\n'), ((2874, 2890), 'numpy.array', 'np.array', (['chains'], {}), '(chains)\n', (2882, 2890), True, 'import numpy as np\n'), ((3099, 3121), 'pints.DreamMCMC', 'pints.DreamMCMC', (['(4)', 'xs'], {}), '(4, xs)\n', (3114, 3121), False, 'import pints\n'), ((3562, 3578), 'numpy.array', 'np.array', (['chains'], {}), '(chains)\n', (3570, 3578), True, 'import numpy as np\n'), ((4031, 4053), 'pints.DreamMCMC', 'pints.DreamMCMC', (['n', 'x0'], {}), '(n, x0)\n', (4046, 4053), False, 'import pints\n'), ((4158, 4180), 'pints.DreamMCMC', 'pints.DreamMCMC', (['n', 'x0'], {}), '(n, x0)\n', (4173, 4180), False, 'import pints\n'), ((4301, 4323), 'pints.DreamMCMC', 'pints.DreamMCMC', (['n', 'x0'], {}), '(n, x0)\n', (4316, 4323), False, 'import pints\n'), ((4443, 4465), 'pints.DreamMCMC', 'pints.DreamMCMC', (['n', 'x0'], {}), '(n, x0)\n', (4458, 4465), False, 'import pints\n'), ((4867, 4889), 'pints.DreamMCMC', 'pints.DreamMCMC', (['n', 'x0'], {}), '(n, x0)\n', (4882, 4889), False, 'import pints\n'), ((5172, 5194), 'pints.DreamMCMC', 'pints.DreamMCMC', (['n', 'x0'], {}), '(n, x0)\n', (5187, 5194), False, 'import pints\n'), ((7319, 7389), 'pints.MCMCController', 'pints.MCMCController', (['self.log_posterior', '(3)', 'x'], {'method': 'pints.DreamMCMC'}), '(self.log_posterior, 3, x, method=pints.DreamMCMC)\n', (7339, 7389), False, 'import pints\n'), ((2761, 2782), 'numpy.all', 'np.all', (['(samples == xs)'], {}), '(samples == xs)\n', (2767, 2782), True, 'import numpy as np\n'), ((7451, 7466), 'shared.StreamCapture', 'StreamCapture', ([], {}), '()\n', (7464, 7466), False, 'from shared import StreamCapture\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 12 09:57:11 2016
@author: smrak
"""
import numpy as np
from pandas import read_hdf
import matplotlib.pyplot as plt
from scipy import interpolate
from scipy import signal
#fs = 10
#order = 5
#highcut = 0.025
#nyq = 0.5 * fs
#high = highcut / nyq
#b, a = signal.butter(order, high, btype='highpass', analog=True)
#w, h = signal.freqs(b, a)
#plt.plot(w*nyq, 20 * np.log10(abs(h)))
#plt.ylim([-50, 1])
#plt.xscale('log')
def butter_hp(highcut, fs, order=3):
nyq = 0.5 * fs
high = highcut / nyq
b, a = signal.butter(order, high, btype='highpass', analog=False)
return b, a
rx = 8
sv = 23
skip = 100
rinex = 'C:\\Users\\smrak\\Google Drive\\BU\\software\\gsit\\paper\\'
receiver = {2:'mah22800.h5', 3:'mah32800.h5', 4:'mah42800.h5', 5:'mah52800.h5',
6:'mah62800.h5', 7:'mah72800.h5', 8:'mah82800.h5', 9:'mah92800.h5',
13:'ma132800.h5'}
f1 = 1575.42E6
c0 = 3E8
fn = rinex + receiver.get(rx)
data = read_hdf(fn)
obstimes = data.major_axis
L1 = np.array(data['L1', sv, skip:, 'data'])*c0/f1
P1 = np.array(data['C1', sv, skip:, 'data'])
idx = np.where(np.isfinite(L1))[0]
L1 = L1[idx]
P1 = P1[idx]
y = L1-P1
x = np.arange(0, y.shape[0])
f = interpolate.interp1d(x, y)
x_new = np.arange(0, y.shape[0]-1, 0.01)
y_new = f(x_new)
b, a = butter_hp(0.005, 1)
Y1 = signal.lfilter(b, a, y)
Y2 = signal.lfilter(b, a, y_new)
plt.plot(Y2[500:]/c0*f1*2*np.pi)
N = 60
sp = []
Y2 = Y2[500:]
for i in range(len(Y2)-N):
sp.append(np.std(Y2[i:i+N]))
#plt.plot(Y2) | [
"numpy.arange",
"matplotlib.pyplot.plot",
"scipy.signal.butter",
"scipy.interpolate.interp1d",
"numpy.array",
"scipy.signal.lfilter",
"numpy.isfinite",
"numpy.std",
"pandas.read_hdf"
] | [((984, 996), 'pandas.read_hdf', 'read_hdf', (['fn'], {}), '(fn)\n', (992, 996), False, 'from pandas import read_hdf\n'), ((1080, 1119), 'numpy.array', 'np.array', (["data['C1', sv, skip:, 'data']"], {}), "(data['C1', sv, skip:, 'data'])\n", (1088, 1119), True, 'import numpy as np\n'), ((1195, 1219), 'numpy.arange', 'np.arange', (['(0)', 'y.shape[0]'], {}), '(0, y.shape[0])\n', (1204, 1219), True, 'import numpy as np\n'), ((1224, 1250), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x', 'y'], {}), '(x, y)\n', (1244, 1250), False, 'from scipy import interpolate\n'), ((1260, 1294), 'numpy.arange', 'np.arange', (['(0)', '(y.shape[0] - 1)', '(0.01)'], {}), '(0, y.shape[0] - 1, 0.01)\n', (1269, 1294), True, 'import numpy as np\n'), ((1343, 1366), 'scipy.signal.lfilter', 'signal.lfilter', (['b', 'a', 'y'], {}), '(b, a, y)\n', (1357, 1366), False, 'from scipy import signal\n'), ((1372, 1399), 'scipy.signal.lfilter', 'signal.lfilter', (['b', 'a', 'y_new'], {}), '(b, a, y_new)\n', (1386, 1399), False, 'from scipy import signal\n'), ((1400, 1440), 'matplotlib.pyplot.plot', 'plt.plot', (['(Y2[500:] / c0 * f1 * 2 * np.pi)'], {}), '(Y2[500:] / c0 * f1 * 2 * np.pi)\n', (1408, 1440), True, 'import matplotlib.pyplot as plt\n'), ((561, 619), 'scipy.signal.butter', 'signal.butter', (['order', 'high'], {'btype': '"""highpass"""', 'analog': '(False)'}), "(order, high, btype='highpass', analog=False)\n", (574, 619), False, 'from scipy import signal\n'), ((1029, 1068), 'numpy.array', 'np.array', (["data['L1', sv, skip:, 'data']"], {}), "(data['L1', sv, skip:, 'data'])\n", (1037, 1068), True, 'import numpy as np\n'), ((1135, 1150), 'numpy.isfinite', 'np.isfinite', (['L1'], {}), '(L1)\n', (1146, 1150), True, 'import numpy as np\n'), ((1503, 1522), 'numpy.std', 'np.std', (['Y2[i:i + N]'], {}), '(Y2[i:i + N])\n', (1509, 1522), True, 'import numpy as np\n')] |
import time
import multiprocessing as mp
from multiprocessing import Pool as ProcessPool
import numpy as np
import pandas as pd
from floris.utils.tools import valid_ops as vops
from floris.utils.tools import farm_config as fconfig
from floris.utils.visualization import wflo_eval as vweval
from floris.utils.visualization import wflo_opt as vwopt
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# MAIN #
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
class LayoutPower(object):
def __init__(self, configs, **kwargs):
self.config = configs
# self.wtnum = None
# self.layout = None
# self.yawed = None
# self.speed = None
self.params = configs["param"]
self.vbins = configs["vbins"]
self.wbins = configs["wbins"]
self.turb = configs["turb"]
self.bins = (self.vbins, self.wbins)
self.uniform = self.uniform_check(configs["param"])
self.wdcdf = vops.wind_speed_dist()[1]
self.windn = configs["wind"]
self.wmn = configs["wm"]
self.wm = self.models(configs["wm"], "wm")
self.wsmn = configs["wsm"]
self.wsm = self.models(configs["wsm"], "wsm")
self.timn = configs["tim"]
self.tim = self.models(configs["tim"], "tim")
self.costn = configs["cost"]
self.cost = self.models(configs["cost"], "cost")
self.wdepth = configs["wdepth"]
self.pool = ProcessPool(int(mp.cpu_count()))
def initial(self, layout, **kwargs):
self.layout = layout
self.wtnum = layout.shape[0]
self.yawed = kwargs.get("yawed", None)
if not kwargs.get("params", None):
self.param = self.params_uniform(self.wtnum)
self.pow_curve = vops.params_loader(self.param["power_curve"][0]).pow_curve
else:
self.param = self.params_nonuniform(kwargs["params"])
self.speed = (np.min(self.param["v_in"]), np.max(self.param["v_out"]))
self.v_bin, self.v_point, self.w_bin, self.w_point = \
self.discretization(self.bins, self.speed)
self.wind, self.wind_pdf = self.data_load('wind'), self.data_load('pdf').values
assert self.wind.shape[0] == self.w_point.shape[0]
self.capacity = self.param["P_rated"].values
def uniform_check(self, params):
if isinstance(params, list):
return False if len(params) > 1 else True
else:
return True
def layout_check(self):
pass
def params_uniform(self, num):
params = vops.params_loader(self.params).params().values
cols = vops.params_loader(self.params).params().columns
return pd.DataFrame(np.repeat(params, num, axis=0), columns=cols)
def params_nonuniform(self, params): # TODO
self.uniform = False
return None
def data_load(self, data):
return vops.winds_loader(data, self.windn, self.bins, self.speed)
def models(self, name, model):
return vops.find_and_load_model(name, model)
def discretization(self, bins, speeds):
return vops.winds_discretization(bins, speeds)
def unpack_nonuniform(self, ):
pass
def plot_layout(self, layout, theta=0, annotate=False):
return vweval.layout_plot(
vops.coordinate_transform(layout, theta), annotate)
def wakes(self, mprocess=False):
wd_num, ws_num = self.w_point.shape[0], self.v_point.shape[0]
if mprocess:
args = list(zip(list(self.w_point), [self.v_point] * wd_num,
[self.wm] * wd_num, [self.wsm] * wd_num,
[self.tim] * wd_num, [self.turb] * wd_num,
[self.param] * wd_num, [self.layout] * wd_num,))
result = self.pool.map_async(deficits, args); result.wait()
wt_deficits = np.transpose(np.array(result.get()), (1, 2, 0))
else:
wt_deficits = np.zeros((wd_num, ws_num, self.wtnum))
for i, wd in enumerate(self.w_point):
wt_deficits[i, :, :] = self.deficits(wd, self.layout)
# wt_deficits = np.vectorize(self.deficits(wd, self.layout))
wt_deficits = np.transpose(wt_deficits, (1, 2, 0))
return wt_deficits
def one_wakes(self, layout, theta, mprocess=False, **kwargs):
self.initial(layout, **kwargs)
if mprocess:
args = list(zip([theta], [self.v_point], [self.wm], [self.wsm],
[self.tim], [self.turb], [self.param], [self.layout]))
result = self.pool.map_async(deficits, args); result.wait()
return result
else:
return self.deficits(theta, self.layout)
def powers_old(self, deficits, params):
v_in, v_out, power_curve = params["v_in"], params["v_out"], \
vops.params_loader(params["power_curve"]).pow_curve
v_bins = vops.winds_discretization(self.bins, (v_in, v_out))[0]
v_bins_j_1, v_bins_j, wind_freq_bins = v_bins[:-1], v_bins[1:], self.wind["w_l-1"]
c_list, k_list = self.wind["c"], self.wind["k"]
power_cdf_bins = np.zeros(len(self.wind["l-1"]))
no_wake_power_cdf_bins = np.zeros(len(self.wind["l-1"]))
for i in range(len(self.wind["l-1"])):
pr_v_bins = self.wdcdf(v_bins_j, c_list[i], k_list[i]) - self.wdcdf(v_bins_j_1, c_list[i], k_list[i])
power_bins = np.vectorize(power_curve)(((v_bins_j_1 + v_bins_j) / 2) * (1 - deficits[:, i]))
no_wake_power_bins = np.vectorize(power_curve)((v_bins_j_1 + v_bins_j) / 2)
power_cdf_bins[i] = np.dot(power_bins, pr_v_bins)
no_wake_power_cdf_bins[i] = np.dot(no_wake_power_bins, pr_v_bins)
return np.array([np.dot(power_cdf_bins, wind_freq_bins),
np.dot(no_wake_power_cdf_bins, wind_freq_bins)])
def powers(self, deficits, params, **kwargs):
pow_curve = vops.params_loader(params["power_curve"]).pow_curve \
if not self.uniform else self.pow_curve
wt_power = np.vectorize(pow_curve)(self.v_point[:, None] * (1. - deficits))
no_wake_wt_power = \
np.vectorize(pow_curve)(self.v_point[:, None] * np.ones((deficits.shape)))
wd_powers = np.zeros((2, self.w_point.shape[0]))
if kwargs.get("wd_output", False):
wds_fs = self.wind_pdf / self.wind.values[:, -1][None, :]
wd_power, no_wake_wd_power = \
np.sum(wt_power * wds_fs, axis=0), np.sum(no_wake_wt_power * wds_fs, axis=0)
wd_powers = np.concatenate((wd_power[None, :], no_wake_wd_power[None, :]), axis=0)
wt_power, no_wake_wt_power = \
np.sum(wt_power * self.wind_pdf), np.sum(no_wake_wt_power * self.wind_pdf)
return np.array([wt_power, no_wake_wt_power], dtype=np.float), wd_powers
def output(self, deficits, **kwargs):
assert deficits.shape == (self.v_point.shape[0], self.wtnum, self.w_point.shape[0])
powers, wd_powers = \
np.zeros((self.wtnum, 2)), np.zeros((self.wtnum, 2, self.w_point.shape[0]))
for i in range(self.wtnum):
powers[i, :], wd_powers[i, :, :] = \
self.powers(deficits[:, i, :], self.param.iloc[i], **kwargs)
return powers, np.sum(wd_powers, axis=0).transpose(1, 0)
def run(self, layout, **kwargs):
self.initial(layout, **kwargs)
powers, wd_powers = self.output(self.wakes(mprocess=True), **kwargs)
cost = self.cost(layout, powers, self.capacity, wdepth=self.wdepth, **kwargs)
return cost, powers, wd_powers
def test(self, layout, baseline=None, verbose=True, **kwargs):
start = time.time()
cost, powers, wd_powers = self.run(layout, **kwargs)
end = time.time()
if verbose:
power, no_wake_power = np.sum(powers, axis=0)
cf, eff, = power * 100 / np.sum(self.capacity), power * 100 / no_wake_power
print(f"Interactive time: {end - start:.3f} s")
print(f"Optimal({self.costn}[€/MWh] / Power[MW] / No-wake[MW] / " +
f"CF[%] / Eff[%] / Loss[%]):\n ==> {cost:.3f} / {power:.3f} / " +
f"{no_wake_power:.3f} / {cf:.2f} / {eff:.2f} / {100. - eff:.2f}\n")
if baseline is not None:
bcost, bpowers, _ = self.run(baseline, **kwargs)
bpower, bno_wake_power = np.sum(bpowers, axis=0)[0], np.sum(bpowers, axis=0)[1]
bcf, beff = bpower * 100 / np.sum(self.capacity), bpower * 100 / bno_wake_power
print(f"Baseline({self.costn}[€/MWh] / Power[MW] / No-wake[MW] / " +
f"CF[%] / Eff[%] / Loss[%]):\n ==> {bcost:.3f} / {bpower:.3f} / " +
f"{bno_wake_power:.3f} / {bcf:.2f} / {beff:.2f} / {100. - beff:.2f}\n")
if kwargs.get("wd_output", False):
assert wd_powers.all() != 0.
vwopt.wd_power_plot(self.w_point, wd_powers, self.capacity, **kwargs)
if kwargs.get("wt_output", False):
vwopt.wt_power_plot(powers, self.capacity, **kwargs)
return cost, powers
def deficits(self, theta, layout):
wt_loc = vops.coordinate_transform(layout, theta)
wt_index = vops.wind_turbines_sort(wt_loc)
assert wt_index.shape[0] == wt_loc.shape[0]
deficits = np.zeros((len(self.v_point), len(wt_index)))
deficit_tab = np.full((len(self.v_point), len(wt_index), len(wt_index) + 2), None)
turbulence_tab = np.full((len(self.v_point), len(wt_index), len(wt_index) + 2), None)
v_start = time.time()
for z, v_i in enumerate(self.v_point):
deficit_tab[z, 0, -2], deficit_tab[z, 0, -1] = 0., v_i
if self.tim is not None:
turbulence_tab[z, 0, -2], turbulence_tab[z, 0, -1] = 0., self.turb
for i, t in enumerate(wt_index):
# wt_start = time.time()
ct_curve = vops.params_loader(self.param.iloc[t]["ct_curve"]).ct_curve
wake = self.wm(wt_loc[t, :], ct_curve(deficit_tab[z, i, -1]),
self.param.iloc[t]["D_r"],
self.param.iloc[t]["z_hub"], T_m=self.tim,
I_w=turbulence_tab[z, i, -1], I_a=self.turb)
if i < len(wt_index) - 1:
for j, wt in enumerate(wt_index[i+1:]):
deficit_tab[z, i, i + j + 1], turbulence_tab[z, i, i + j + 1] = \
wake.wake_loss(wt_loc[wt, :], self.param.iloc[wt]["D_r"], debug=None)
total_deficit = self.wsm(deficit_tab[z, :, :], i + 1, inflow=v_i)
if self.tim is not None:
turbulence_tab[z, i + 1, -2] = np.max(turbulence_tab[z, :i+1, i+1])
turbulence_tab[z, i + 1, -1] = np.sqrt(
np.max(turbulence_tab[z, :i+1, i+1])**2 + self.turb**2)
deficit_tab[z, i + 1, -2] = total_deficit
deficit_tab[z, i + 1, -1] = v_i * (1 - total_deficit)
else:
break
# wt_end = time.time()
# print(f"WT: {i} || Time: {wt_end - wt_start}")
deficits[z, :] = vops.wt_power_reorder(wt_index, deficit_tab[z, :, -2])
v_end = time.time()
print(f"Wind: {theta} | Time: {v_end - v_start}")
return deficits
def deficits(args):
theta, speeds, wm, wsm, tim, turb, params, layout = args
wt_loc = vops.coordinate_transform(layout, theta)
wt_index = vops.wind_turbines_sort(wt_loc)
assert wt_index.shape[0] == wt_loc.shape[0]
deficits = np.zeros((len(speeds), len(wt_index)))
deficit_tab = np.full((len(speeds), len(wt_index), len(wt_index) + 2), None)
turbulence_tab = np.full((len(speeds), len(wt_index), len(wt_index) + 2), None)
start = time.time()
for z, v_i in enumerate(speeds):
deficit_tab[z, 0, -2], deficit_tab[z, 0, -1] = 0., v_i
if tim is not None:
turbulence_tab[z, 0, -2], turbulence_tab[z, 0, -1] = 0., turb
for i, t in enumerate(wt_index):
ct_curve = vops.params_loader(params.iloc[t]["ct_curve"]).ct_curve
wake = wm(wt_loc[t, :], ct_curve(deficit_tab[z, i, -1]),
params.iloc[t]["D_r"],
params.iloc[t]["z_hub"], T_m=tim,
I_w=turbulence_tab[z, i, -1], I_a=turb)
if i < len(wt_index) - 1:
for j, wt in enumerate(wt_index[i+1:]):
deficit_tab[z, i, i + j + 1], turbulence_tab[z, i, i + j + 1] = \
wake.wake_loss(wt_loc[wt, :], params.iloc[wt]["D_r"], debug=None)
total_deficit = wsm(deficit_tab[z, :, :], i + 1, inflow=v_i)
if tim is not None:
turbulence_tab[z, i + 1, -2] = np.max(turbulence_tab[z, :i+1, i+1])
turbulence_tab[z, i + 1, -1] = np.sqrt(
np.max(turbulence_tab[z, :i+1, i+1])**2 + turb**2)
deficit_tab[z, i + 1, -2] = total_deficit
deficit_tab[z, i + 1, -1] = v_i * (1 - total_deficit)
else:
break
deficits[z, :] = vops.wt_power_reorder(wt_index, deficit_tab[z, :, -2])
end = time.time()
# print(f"Wind: {theta} | Time: {end - start:.3f}")
return deficits
def analysis(path="solution", baseline="horns", result=None, config=None,
**kwargs):
result = result if isinstance(result, dict) else \
vops.json_load(f"{path}/{result}.json")
config = config or result['config']
wf = LayoutPower(config)
if wf.uniform:
layout = np.array(result['layout'][-1]) if config['stage'] == 2 \
else np.array(result['layout'])
param = None
else:
print("NOTE: Nonuniform Wind Farm Configuration")
layout, param = wf.unpack_nonuniform(result['layout'])
# print(layout.shape)
if layout.shape[0] == config['num']:
wt_num = layout.shape[0]
else:
wt_num = layout.shape[0] // 2
layout = layout.reshape((wt_num, 2))
assert wt_num == config['num'], \
'WTs number is not matching. Please check!'
print("\nWind Turbine Num: ", wt_num)
if baseline in ['horns', ]:
baseline = vops.params_loader(baseline).baseline(wt_num)
if (config["opt"] == "ga" and config["stage"] != 2) and config["grid"]:
_, grids = vops.layout2grids([0, 0], [63, 48.89], config["grid"])
layout = vops.grids2layout(layout, grids)
layout = layout[np.argsort(layout[:, 1]), :] * 80.
layout = layout - np.array([0, 589])
cost, _ = wf.test(layout, baseline, param=param, path=path, **kwargs)
if cost is not None:
if kwargs.get("layout_output", False):
vwopt.wf_layout_plot(layout, baseline, path=path, **kwargs)
if kwargs.get("curve_output", False):
vwopt.opt_curve_plot(result, path=path, **kwargs)
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# MISCELLANEOUS #
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
def power_debug():
from scipy import integrate
def weibull(v, shape=2.3, scale=10.59):
return (shape / scale) * (v / scale)**(shape - 1) * np.exp(-(v / scale) ** shape)
def linear(v):
return ((2 / 11) * v + (8 / 11))
def directions(theta):
return 1 / 360
a, _ = integrate.quad(
lambda v: (0.18 * v + 0.73) * 0.217 * (v / 10.59)**1.3 * np.exp(-(v / 10.59) ** 2.3),
4, 15)
b, _ = integrate.quad(
lambda v: 2 * 0.217 * (v / 10.59)**1.3 * np.exp(-(v / 10.59) ** 2.3),
15, 25)
integral_a, _ = integrate.quad(lambda t: (1 / 360) * a, 0, 360)
integral_b, _ = integrate.quad(lambda t: (1 / 360) * b, 0, 360)
return integral_a, integral_b, integral_a + integral_b
if __name__ == "__main__":
config = {
"stage":2,
"opt":['ga', 'pso'],
"tag":'25',
# "pop": 40,
"pop": [20, 20],
# "maxg": 5,
"maxg": [20, 20],
"grid": 5,
"num": 25,
"param": "horns",
"wind": "horns",
"vbins": 3,
"wbins": 15,
"wdepth": "linear_x",
"cost": "LCOE",
"turb": 0.077,
"wm": "Jensen",
# "wm": "Bastankhah",
"wsm": "SS",
"tim": "Frandsen",
# "tim": None,
}
layout = (fconfig.Horns.baseline(25) / 80.).ravel()
# layout = None
# path = "output/21_6_30/Jen_49_mos"
# path = "solution"
# analysis(path=path,
# baseline="horns",
# result="eapso_results_49",
# # result={'layout': layout},
# # config=config,
# layout_output=True,
# layout_name="layout_49",
# curve_output=True,
# curve_name="curve_25",
# wd_output=False,
# wd_name="wds_25",
# wt_output=False,
# wt_name="wts_25",
# )
# LayoutPower(config).one_wakes(layout, 105., mprocess=True)
| [
"multiprocessing.cpu_count",
"numpy.argsort",
"floris.utils.tools.valid_ops.winds_discretization",
"numpy.array",
"floris.utils.tools.valid_ops.grids2layout",
"floris.utils.tools.valid_ops.wind_turbines_sort",
"floris.utils.tools.valid_ops.wt_power_reorder",
"floris.utils.visualization.wflo_opt.wt_pow... | [((11682, 11722), 'floris.utils.tools.valid_ops.coordinate_transform', 'vops.coordinate_transform', (['layout', 'theta'], {}), '(layout, theta)\n', (11707, 11722), True, 'from floris.utils.tools import valid_ops as vops\n'), ((11738, 11769), 'floris.utils.tools.valid_ops.wind_turbines_sort', 'vops.wind_turbines_sort', (['wt_loc'], {}), '(wt_loc)\n', (11761, 11769), True, 'from floris.utils.tools import valid_ops as vops\n'), ((12049, 12060), 'time.time', 'time.time', ([], {}), '()\n', (12058, 12060), False, 'import time\n'), ((13479, 13490), 'time.time', 'time.time', ([], {}), '()\n', (13488, 13490), False, 'import time\n'), ((16029, 16074), 'scipy.integrate.quad', 'integrate.quad', (['(lambda t: 1 / 360 * a)', '(0)', '(360)'], {}), '(lambda t: 1 / 360 * a, 0, 360)\n', (16043, 16074), False, 'from scipy import integrate\n'), ((16097, 16142), 'scipy.integrate.quad', 'integrate.quad', (['(lambda t: 1 / 360 * b)', '(0)', '(360)'], {}), '(lambda t: 1 / 360 * b, 0, 360)\n', (16111, 16142), False, 'from scipy import integrate\n'), ((3013, 3071), 'floris.utils.tools.valid_ops.winds_loader', 'vops.winds_loader', (['data', 'self.windn', 'self.bins', 'self.speed'], {}), '(data, self.windn, self.bins, self.speed)\n', (3030, 3071), True, 'from floris.utils.tools import valid_ops as vops\n'), ((3123, 3160), 'floris.utils.tools.valid_ops.find_and_load_model', 'vops.find_and_load_model', (['name', 'model'], {}), '(name, model)\n', (3147, 3160), True, 'from floris.utils.tools import valid_ops as vops\n'), ((3221, 3260), 'floris.utils.tools.valid_ops.winds_discretization', 'vops.winds_discretization', (['bins', 'speeds'], {}), '(bins, speeds)\n', (3246, 3260), True, 'from floris.utils.tools import valid_ops as vops\n'), ((6397, 6433), 'numpy.zeros', 'np.zeros', (['(2, self.w_point.shape[0])'], {}), '((2, self.w_point.shape[0]))\n', (6405, 6433), True, 'import numpy as np\n'), ((7828, 7839), 'time.time', 'time.time', ([], {}), '()\n', (7837, 7839), False, 'import time\n'), ((7915, 7926), 'time.time', 'time.time', ([], {}), '()\n', (7924, 7926), False, 'import time\n'), ((9323, 9363), 'floris.utils.tools.valid_ops.coordinate_transform', 'vops.coordinate_transform', (['layout', 'theta'], {}), '(layout, theta)\n', (9348, 9363), True, 'from floris.utils.tools import valid_ops as vops\n'), ((9383, 9414), 'floris.utils.tools.valid_ops.wind_turbines_sort', 'vops.wind_turbines_sort', (['wt_loc'], {}), '(wt_loc)\n', (9406, 9414), True, 'from floris.utils.tools import valid_ops as vops\n'), ((9734, 9745), 'time.time', 'time.time', ([], {}), '()\n', (9743, 9745), False, 'import time\n'), ((11490, 11501), 'time.time', 'time.time', ([], {}), '()\n', (11499, 11501), False, 'import time\n'), ((13414, 13468), 'floris.utils.tools.valid_ops.wt_power_reorder', 'vops.wt_power_reorder', (['wt_index', 'deficit_tab[z, :, -2]'], {}), '(wt_index, deficit_tab[z, :, -2])\n', (13435, 13468), True, 'from floris.utils.tools import valid_ops as vops\n'), ((13731, 13770), 'floris.utils.tools.valid_ops.json_load', 'vops.json_load', (['f"""{path}/{result}.json"""'], {}), "(f'{path}/{result}.json')\n", (13745, 13770), True, 'from floris.utils.tools import valid_ops as vops\n'), ((14646, 14700), 'floris.utils.tools.valid_ops.layout2grids', 'vops.layout2grids', (['[0, 0]', '[63, 48.89]', "config['grid']"], {}), "([0, 0], [63, 48.89], config['grid'])\n", (14663, 14700), True, 'from floris.utils.tools import valid_ops as vops\n'), ((14718, 14750), 'floris.utils.tools.valid_ops.grids2layout', 'vops.grids2layout', (['layout', 'grids'], {}), '(layout, grids)\n', (14735, 14750), True, 'from floris.utils.tools import valid_ops as vops\n'), ((14828, 14846), 'numpy.array', 'np.array', (['[0, 589]'], {}), '([0, 589])\n', (14836, 14846), True, 'import numpy as np\n'), ((1084, 1106), 'floris.utils.tools.valid_ops.wind_speed_dist', 'vops.wind_speed_dist', ([], {}), '()\n', (1104, 1106), True, 'from floris.utils.tools import valid_ops as vops\n'), ((2043, 2069), 'numpy.min', 'np.min', (["self.param['v_in']"], {}), "(self.param['v_in'])\n", (2049, 2069), True, 'import numpy as np\n'), ((2071, 2098), 'numpy.max', 'np.max', (["self.param['v_out']"], {}), "(self.param['v_out'])\n", (2077, 2098), True, 'import numpy as np\n'), ((2820, 2850), 'numpy.repeat', 'np.repeat', (['params', 'num'], {'axis': '(0)'}), '(params, num, axis=0)\n', (2829, 2850), True, 'import numpy as np\n'), ((3418, 3458), 'floris.utils.tools.valid_ops.coordinate_transform', 'vops.coordinate_transform', (['layout', 'theta'], {}), '(layout, theta)\n', (3443, 3458), True, 'from floris.utils.tools import valid_ops as vops\n'), ((4075, 4113), 'numpy.zeros', 'np.zeros', (['(wd_num, ws_num, self.wtnum)'], {}), '((wd_num, ws_num, self.wtnum))\n', (4083, 4113), True, 'import numpy as np\n'), ((4333, 4369), 'numpy.transpose', 'np.transpose', (['wt_deficits', '(1, 2, 0)'], {}), '(wt_deficits, (1, 2, 0))\n', (4345, 4369), True, 'import numpy as np\n'), ((5044, 5095), 'floris.utils.tools.valid_ops.winds_discretization', 'vops.winds_discretization', (['self.bins', '(v_in, v_out)'], {}), '(self.bins, (v_in, v_out))\n', (5069, 5095), True, 'from floris.utils.tools import valid_ops as vops\n'), ((5754, 5783), 'numpy.dot', 'np.dot', (['power_bins', 'pr_v_bins'], {}), '(power_bins, pr_v_bins)\n', (5760, 5783), True, 'import numpy as np\n'), ((5824, 5861), 'numpy.dot', 'np.dot', (['no_wake_power_bins', 'pr_v_bins'], {}), '(no_wake_power_bins, pr_v_bins)\n', (5830, 5861), True, 'import numpy as np\n'), ((6196, 6219), 'numpy.vectorize', 'np.vectorize', (['pow_curve'], {}), '(pow_curve)\n', (6208, 6219), True, 'import numpy as np\n'), ((6302, 6325), 'numpy.vectorize', 'np.vectorize', (['pow_curve'], {}), '(pow_curve)\n', (6314, 6325), True, 'import numpy as np\n'), ((6707, 6777), 'numpy.concatenate', 'np.concatenate', (['(wd_power[None, :], no_wake_wd_power[None, :])'], {'axis': '(0)'}), '((wd_power[None, :], no_wake_wd_power[None, :]), axis=0)\n', (6721, 6777), True, 'import numpy as np\n'), ((6829, 6861), 'numpy.sum', 'np.sum', (['(wt_power * self.wind_pdf)'], {}), '(wt_power * self.wind_pdf)\n', (6835, 6861), True, 'import numpy as np\n'), ((6863, 6903), 'numpy.sum', 'np.sum', (['(no_wake_wt_power * self.wind_pdf)'], {}), '(no_wake_wt_power * self.wind_pdf)\n', (6869, 6903), True, 'import numpy as np\n'), ((6919, 6973), 'numpy.array', 'np.array', (['[wt_power, no_wake_wt_power]'], {'dtype': 'np.float'}), '([wt_power, no_wake_wt_power], dtype=np.float)\n', (6927, 6973), True, 'import numpy as np\n'), ((7162, 7187), 'numpy.zeros', 'np.zeros', (['(self.wtnum, 2)'], {}), '((self.wtnum, 2))\n', (7170, 7187), True, 'import numpy as np\n'), ((7189, 7237), 'numpy.zeros', 'np.zeros', (['(self.wtnum, 2, self.w_point.shape[0])'], {}), '((self.wtnum, 2, self.w_point.shape[0]))\n', (7197, 7237), True, 'import numpy as np\n'), ((7983, 8005), 'numpy.sum', 'np.sum', (['powers'], {'axis': '(0)'}), '(powers, axis=0)\n', (7989, 8005), True, 'import numpy as np\n'), ((9059, 9128), 'floris.utils.visualization.wflo_opt.wd_power_plot', 'vwopt.wd_power_plot', (['self.w_point', 'wd_powers', 'self.capacity'], {}), '(self.w_point, wd_powers, self.capacity, **kwargs)\n', (9078, 9128), True, 'from floris.utils.visualization import wflo_opt as vwopt\n'), ((9184, 9236), 'floris.utils.visualization.wflo_opt.wt_power_plot', 'vwopt.wt_power_plot', (['powers', 'self.capacity'], {}), '(powers, self.capacity, **kwargs)\n', (9203, 9236), True, 'from floris.utils.visualization import wflo_opt as vwopt\n'), ((11419, 11473), 'floris.utils.tools.valid_ops.wt_power_reorder', 'vops.wt_power_reorder', (['wt_index', 'deficit_tab[z, :, -2]'], {}), '(wt_index, deficit_tab[z, :, -2])\n', (11440, 11473), True, 'from floris.utils.tools import valid_ops as vops\n'), ((13876, 13906), 'numpy.array', 'np.array', (["result['layout'][-1]"], {}), "(result['layout'][-1])\n", (13884, 13906), True, 'import numpy as np\n'), ((13950, 13976), 'numpy.array', 'np.array', (["result['layout']"], {}), "(result['layout'])\n", (13958, 13976), True, 'import numpy as np\n'), ((15005, 15064), 'floris.utils.visualization.wflo_opt.wf_layout_plot', 'vwopt.wf_layout_plot', (['layout', 'baseline'], {'path': 'path'}), '(layout, baseline, path=path, **kwargs)\n', (15025, 15064), True, 'from floris.utils.visualization import wflo_opt as vwopt\n'), ((15123, 15172), 'floris.utils.visualization.wflo_opt.opt_curve_plot', 'vwopt.opt_curve_plot', (['result'], {'path': 'path'}), '(result, path=path, **kwargs)\n', (15143, 15172), True, 'from floris.utils.visualization import wflo_opt as vwopt\n'), ((15577, 15606), 'numpy.exp', 'np.exp', (['(-(v / scale) ** shape)'], {}), '(-(v / scale) ** shape)\n', (15583, 15606), True, 'import numpy as np\n'), ((1581, 1595), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (1593, 1595), True, 'import multiprocessing as mp\n'), ((1882, 1930), 'floris.utils.tools.valid_ops.params_loader', 'vops.params_loader', (["self.param['power_curve'][0]"], {}), "(self.param['power_curve'][0])\n", (1900, 1930), True, 'from floris.utils.tools import valid_ops as vops\n'), ((4975, 5016), 'floris.utils.tools.valid_ops.params_loader', 'vops.params_loader', (["params['power_curve']"], {}), "(params['power_curve'])\n", (4993, 5016), True, 'from floris.utils.tools import valid_ops as vops\n'), ((5554, 5579), 'numpy.vectorize', 'np.vectorize', (['power_curve'], {}), '(power_curve)\n', (5566, 5579), True, 'import numpy as np\n'), ((5667, 5692), 'numpy.vectorize', 'np.vectorize', (['power_curve'], {}), '(power_curve)\n', (5679, 5692), True, 'import numpy as np\n'), ((5887, 5925), 'numpy.dot', 'np.dot', (['power_cdf_bins', 'wind_freq_bins'], {}), '(power_cdf_bins, wind_freq_bins)\n', (5893, 5925), True, 'import numpy as np\n'), ((5951, 5997), 'numpy.dot', 'np.dot', (['no_wake_power_cdf_bins', 'wind_freq_bins'], {}), '(no_wake_power_cdf_bins, wind_freq_bins)\n', (5957, 5997), True, 'import numpy as np\n'), ((6071, 6112), 'floris.utils.tools.valid_ops.params_loader', 'vops.params_loader', (["params['power_curve']"], {}), "(params['power_curve'])\n", (6089, 6112), True, 'from floris.utils.tools import valid_ops as vops\n'), ((6350, 6373), 'numpy.ones', 'np.ones', (['deficits.shape'], {}), '(deficits.shape)\n', (6357, 6373), True, 'import numpy as np\n'), ((6606, 6639), 'numpy.sum', 'np.sum', (['(wt_power * wds_fs)'], {'axis': '(0)'}), '(wt_power * wds_fs, axis=0)\n', (6612, 6639), True, 'import numpy as np\n'), ((6641, 6682), 'numpy.sum', 'np.sum', (['(no_wake_wt_power * wds_fs)'], {'axis': '(0)'}), '(no_wake_wt_power * wds_fs, axis=0)\n', (6647, 6682), True, 'import numpy as np\n'), ((12327, 12373), 'floris.utils.tools.valid_ops.params_loader', 'vops.params_loader', (["params.iloc[t]['ct_curve']"], {}), "(params.iloc[t]['ct_curve'])\n", (12345, 12373), True, 'from floris.utils.tools import valid_ops as vops\n'), ((14505, 14533), 'floris.utils.tools.valid_ops.params_loader', 'vops.params_loader', (['baseline'], {}), '(baseline)\n', (14523, 14533), True, 'from floris.utils.tools import valid_ops as vops\n'), ((14771, 14795), 'numpy.argsort', 'np.argsort', (['layout[:, 1]'], {}), '(layout[:, 1])\n', (14781, 14795), True, 'import numpy as np\n'), ((15820, 15847), 'numpy.exp', 'np.exp', (['(-(v / 10.59) ** 2.3)'], {}), '(-(v / 10.59) ** 2.3)\n', (15826, 15847), True, 'import numpy as np\n'), ((15956, 15983), 'numpy.exp', 'np.exp', (['(-(v / 10.59) ** 2.3)'], {}), '(-(v / 10.59) ** 2.3)\n', (15962, 15983), True, 'import numpy as np\n'), ((16763, 16789), 'floris.utils.tools.farm_config.Horns.baseline', 'fconfig.Horns.baseline', (['(25)'], {}), '(25)\n', (16785, 16789), True, 'from floris.utils.tools import farm_config as fconfig\n'), ((2680, 2711), 'floris.utils.tools.valid_ops.params_loader', 'vops.params_loader', (['self.params'], {}), '(self.params)\n', (2698, 2711), True, 'from floris.utils.tools import valid_ops as vops\n'), ((2743, 2774), 'floris.utils.tools.valid_ops.params_loader', 'vops.params_loader', (['self.params'], {}), '(self.params)\n', (2761, 2774), True, 'from floris.utils.tools import valid_ops as vops\n'), ((7423, 7448), 'numpy.sum', 'np.sum', (['wd_powers'], {'axis': '(0)'}), '(wd_powers, axis=0)\n', (7429, 7448), True, 'import numpy as np\n'), ((8045, 8066), 'numpy.sum', 'np.sum', (['self.capacity'], {}), '(self.capacity)\n', (8051, 8066), True, 'import numpy as np\n'), ((10093, 10143), 'floris.utils.tools.valid_ops.params_loader', 'vops.params_loader', (["self.param.iloc[t]['ct_curve']"], {}), "(self.param.iloc[t]['ct_curve'])\n", (10111, 10143), True, 'from floris.utils.tools import valid_ops as vops\n'), ((13049, 13089), 'numpy.max', 'np.max', (['turbulence_tab[z, :i + 1, i + 1]'], {}), '(turbulence_tab[z, :i + 1, i + 1])\n', (13055, 13089), True, 'import numpy as np\n'), ((8545, 8568), 'numpy.sum', 'np.sum', (['bpowers'], {'axis': '(0)'}), '(bpowers, axis=0)\n', (8551, 8568), True, 'import numpy as np\n'), ((8573, 8596), 'numpy.sum', 'np.sum', (['bpowers'], {'axis': '(0)'}), '(bpowers, axis=0)\n', (8579, 8596), True, 'import numpy as np\n'), ((8644, 8665), 'numpy.sum', 'np.sum', (['self.capacity'], {}), '(self.capacity)\n', (8650, 8665), True, 'import numpy as np\n'), ((10915, 10955), 'numpy.max', 'np.max', (['turbulence_tab[z, :i + 1, i + 1]'], {}), '(turbulence_tab[z, :i + 1, i + 1])\n', (10921, 10955), True, 'import numpy as np\n'), ((13170, 13210), 'numpy.max', 'np.max', (['turbulence_tab[z, :i + 1, i + 1]'], {}), '(turbulence_tab[z, :i + 1, i + 1])\n', (13176, 13210), True, 'import numpy as np\n'), ((11044, 11084), 'numpy.max', 'np.max', (['turbulence_tab[z, :i + 1, i + 1]'], {}), '(turbulence_tab[z, :i + 1, i + 1])\n', (11050, 11084), True, 'import numpy as np\n')] |
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""Test that `LocalSnapshot` and `LocalSnapshotGPU` work."""
from copy import deepcopy
import hoomd
from hoomd.data.array import HOOMDGPUArray
import numpy as np
import pytest
try:
# This try block is purely to allow testing locally without mpi4py. We could
# require it for testing, and simplify the logic here. The CI containers all
# have mpi4py.
from mpi4py import MPI
except ImportError:
skip_mpi4py = True
else:
skip_mpi4py = False
skip_mpi4py = pytest.mark.skipif(skip_mpi4py,
reason='mpi4py could not be imported.')
try:
# We use the CUPY_IMPORTED variable to allow for local GPU testing without
# CuPy installed. This code could be simplified to only work with CuPy, by
# requiring its installation for testing. The CI containers already have
# CuPy installed when build for the GPU.
import cupy
CUPY_IMPORTED = True
except ImportError:
CUPY_IMPORTED = False
"""
_N and _types are distinct in that the local snapshot does not know about them.
We use the underscore to signify this. Those keys are skipped when testing the
local snapshots, though are still used to define the state.
"""
Np = 5
_particle_data = dict(
_N=Np,
position=dict(np_type=np.floating,
value=[[-1, -1, -1], [-1, -1, 0], [-1, 0, 0], [1, 1, 1],
[1, 0, 0]],
new_value=[[5, 5, 5]] * Np,
shape=(Np, 3)),
velocity=dict(np_type=np.floating,
value=np.linspace(-4, 4, Np * 3).reshape((Np, 3)),
new_value=np.linspace(4, 8, Np * 3).reshape((Np, 3)),
shape=(Np, 3)),
acceleration=dict(np_type=np.floating,
value=np.linspace(-4, 4, Np * 3).reshape((Np, 3)),
new_value=np.linspace(4, 8, Np * 3).reshape((Np, 3)),
shape=(Np, 3)),
typeid=dict(np_type=np.integer,
value=[0, 0, 0, 1, 1],
new_value=[1, 1, 1, 0, 0],
shape=(Np,)),
mass=dict(np_type=np.floating,
value=[5, 4, 3, 2, 1],
new_value=[1, 2, 3, 4, 5],
shape=(Np,)),
charge=dict(np_type=np.floating,
value=[1, 2, 3, 2, 1],
new_value=[-1, -1, -3, -2, -1],
shape=(Np,)),
diameter=dict(np_type=np.floating,
value=[5, 2, 3, 2, 5],
new_value=[2, 1, 0.5, 1, 2],
shape=(Np,)),
image=dict(np_type=np.integer,
value=np.linspace(-10, 20, Np * 3, dtype=int).reshape(Np, 3),
new_value=np.linspace(-20, 10, Np * 3, dtype=int).reshape(Np, 3),
shape=(Np, 3)),
tag=dict(np_type=np.unsignedinteger, value=None, shape=(Np,)),
_types=['p1', 'p2'])
_particle_local_data = dict(
net_force=dict(np_type=np.floating,
value=np.linspace(0.5, 4.5, Np * 3).reshape((Np, 3)),
new_value=np.linspace(6, 12, Np * 3).reshape((Np, 3)),
shape=(Np, 3)),
net_torque=dict(np_type=np.floating,
value=np.linspace(-0.5, 2.5, Np * 3).reshape((Np, 3)),
new_value=np.linspace(12.75, 25, Np * 3).reshape((Np, 3)),
shape=(Np, 3)),
net_virial=dict(np_type=np.floating,
value=np.linspace(-1.5, 6.5, Np * 6).reshape((Np, 6)),
new_value=np.linspace(9.75, 13.12, Np * 6).reshape((Np, 6)),
shape=(Np, 6)),
net_energy=dict(np_type=np.floating,
value=np.linspace(0.5, 3.5, Np),
new_value=np.linspace(0, 4.2, Np),
shape=(Np,)),
)
Nb = 2
_bond_data = dict(_N=Nb,
typeid=dict(np_type=np.unsignedinteger,
value=[0, 1],
new_value=[1, 0],
shape=(Nb,)),
group=dict(np_type=np.unsignedinteger,
value=[[0, 1], [2, 3]],
new_value=[[1, 0], [3, 2]],
shape=(Nb, 2)),
tag=dict(np_type=np.unsignedinteger, value=None, shape=(Nb,)),
_types=['b1', 'b2'])
Na = 2
_angle_data = dict(_N=Na,
typeid=dict(np_type=np.unsignedinteger,
value=[1, 0],
new_value=[0, 1],
shape=(Na,)),
group=dict(np_type=np.unsignedinteger,
value=[[0, 1, 2], [2, 3, 4]],
new_value=[[1, 3, 4], [0, 2, 4]],
shape=(Na, 3)),
tag=dict(np_type=np.unsignedinteger, value=None,
shape=(Na,)),
_types=['a1', 'a2'])
Nd = 2
_dihedral_data = dict(_N=Nd,
typeid=dict(np_type=np.unsignedinteger,
value=[1, 0],
new_value=[0, 1],
shape=(Nd,)),
group=dict(np_type=np.unsignedinteger,
value=[[0, 1, 2, 3], [1, 2, 3, 4]],
new_value=[[4, 3, 2, 1], [2, 4, 0, 1]],
shape=(Nd, 4)),
tag=dict(np_type=np.unsignedinteger,
value=None,
shape=(Nd,)),
_types=['d1', 'd2'])
Ni = 2
_improper_data = dict(_N=Ni,
typeid=dict(np_type=np.unsignedinteger,
value=[0, 0],
shape=(Ni,)),
group=dict(np_type=np.unsignedinteger,
value=[[3, 2, 1, 0], [1, 2, 3, 4]],
new_value=[[1, 2, 3, 0], [4, 2, 3, 1]],
shape=(Ni, 4)),
tag=dict(np_type=np.unsignedinteger,
value=None,
shape=(Ni,)),
_types=['i1'])
Nc = 3
_constraint_data = dict(
_N=Nc,
value=dict(np_type=np.floating,
value=[2.5, 0.5, 2.],
new_value=[3., 1.5, 1.],
shape=(Nc,)),
group=dict(np_type=np.unsignedinteger,
value=[[0, 1], [2, 3], [1, 3]],
new_value=[[4, 1], [3, 1], [2, 4]],
shape=(Nc, 2)),
tag=dict(np_type=np.unsignedinteger, value=None, shape=(Nc,)),
)
Npa = 2
_pair_data = dict(_N=Npa,
typeid=dict(np_type=np.unsignedinteger,
value=[0, 1],
new_value=[1, 0],
shape=(Npa,)),
group=dict(np_type=np.unsignedinteger,
value=[[0, 1], [2, 3]],
new_value=[[4, 1], [0, 3]],
shape=(Npa, 2)),
tag=dict(np_type=np.unsignedinteger, value=None,
shape=(Npa,)),
_types=['p1', 'p2'])
_global_dict = dict(rtag=dict(
particles=dict(np_type=np.unsignedinteger, value=None, shape=(Np,)),
bonds=dict(np_type=np.unsignedinteger, value=None, shape=(Nb,)),
angles=dict(np_type=np.unsignedinteger, value=None, shape=(Na,)),
dihedrals=dict(np_type=np.unsignedinteger, value=None, shape=(Nd,)),
impropers=dict(np_type=np.unsignedinteger, value=None, shape=(Ni,)),
constraints=dict(np_type=np.unsignedinteger, value=None, shape=(Nc,)),
pairs=dict(np_type=np.unsignedinteger, value=None, shape=(Npa,)),
))
@pytest.fixture(scope='session')
def base_snapshot(device):
"""Defines a snapshot using the data given above."""
def set_snapshot(snap, data, base):
"""Sets individual sections of snapshot (e.g. particles)."""
snap_section = getattr(snap, base)
for k in data:
if k.startswith('_'):
setattr(snap_section, k[1:], data[k])
continue
elif data[k]['value'] is None:
continue
try:
array = getattr(snap_section, k)
array[:] = data[k]['value']
except TypeError:
setattr(snap_section, k, data[k]['value'])
snapshot = hoomd.Snapshot(device.communicator)
if snapshot.communicator.rank == 0:
snapshot.configuration.box = [2.1, 2.1, 2.1, 0, 0, 0]
set_snapshot(snapshot, _particle_data, 'particles')
set_snapshot(snapshot, _bond_data, 'bonds')
set_snapshot(snapshot, _angle_data, 'angles')
set_snapshot(snapshot, _dihedral_data, 'dihedrals')
set_snapshot(snapshot, _improper_data, 'impropers')
set_snapshot(snapshot, _constraint_data, 'constraints')
set_snapshot(snapshot, _pair_data, 'pairs')
return snapshot
@pytest.fixture(params=[
'particles', 'bonds', 'angles', 'dihedrals', 'impropers', 'constraints',
'pairs'
])
def snapshot_section(request):
return request.param
@pytest.fixture(scope="function",
params=[(section_name, prop_name, prop_dict)
for prop_name, global_prop_dict in _global_dict.items()
for section_name, prop_dict in global_prop_dict.items()
],
ids=lambda x: x[0] + '-' + x[1])
def global_property(request):
return request.param
@pytest.fixture(
scope='function',
params=[(name, prop_name, prop_dict)
for name, section_dict in [('particles', {
**_particle_data,
**_particle_local_data
}), ('bonds', _bond_data), (
'angles', _angle_data), (
'dihedrals',
_dihedral_data), (
'impropers',
_improper_data), (
'constraints',
_constraint_data), ('pairs', _pair_data)]
for prop_name, prop_dict in section_dict.items()
if not prop_name.startswith('_')],
ids=lambda x: x[0] + '-' + x[1])
def section_name_dict(request):
"""Parameterization of expected values for local_snapshot properties.
Examples include ``('particles', 'position', position_dict)`` where
``position_dict`` is the dictionary with the expected typecodes, shape, and
value of particle positions.
"""
return deepcopy(request.param)
@pytest.fixture(scope='function',
params=['', 'ghost_', '_with_ghost'],
ids=lambda x: x.strip('_'))
def affix(request):
"""Parameterizes over the different variations of a local_snapshot property.
These include ``property``, ``ghost_property``, and
``property_with_ghosts``.
"""
return request.param
def get_property_name_from_affix(name, affix):
if affix.startswith('_'):
return name + affix
elif affix.endswith('_'):
return affix + name
else:
return name
def general_array_equality(arr1, arr2):
"""Allows checking of equality with both HOOMDArrays and HOOMDGPUArrays."""
if any(np.issubdtype(a.dtype, np.floating) for a in (arr1, arr2)):
if any(isinstance(a, HOOMDGPUArray) for a in (arr1, arr2)):
return cupy.allclose(arr1, arr2)
else:
return np.allclose(arr1, arr2)
else:
return all(arr1.ravel() == arr2.ravel())
def check_type(data, prop_dict, tags):
"""Check that the expected dtype is found for local snapshots."""
assert np.issubdtype(data.dtype, prop_dict['np_type'])
def check_shape(data, prop_dict, tags):
"""Check shape of properties in the snapshot."""
# checks size of prop_dict values and tags.
if isinstance(data, HOOMDGPUArray):
if len(tags) == 0:
assert data.shape == (0,)
else:
assert data.shape == (len(tags),) + prop_dict['shape'][1:]
else:
assert data.shape == (len(tags),) + prop_dict['shape'][1:]
def check_getting(data, prop_dict, tags):
"""Checks getting properties of the state through a local snapshot."""
# Check to end test early
if isinstance(data, HOOMDGPUArray) and not CUPY_IMPORTED:
pytest.skip("Not available for HOOMDGPUArray without CuPy.")
if len(tags) == 0 or prop_dict['value'] is None:
return None
if isinstance(data, HOOMDGPUArray):
expected_values = cupy.array(prop_dict['value'])
else:
expected_values = np.array(prop_dict['value'])
assert general_array_equality(data, expected_values[tags.tolist()])
def check_setting(data, prop_dict, tags):
"""Checks setting properties of the state through a local snapshot.
Also tests error raising for read only arrays.
"""
# Test if test should be skipped or just return
if isinstance(data, HOOMDGPUArray) and not CUPY_IMPORTED:
pytest.skip("Not available for HOOMDGPUArray without CuPy.")
if 'new_value' not in prop_dict:
return None
if isinstance(data, HOOMDGPUArray):
new_values = cupy.array(prop_dict['new_value'])[tags.tolist()]
else:
new_values = np.array(prop_dict['new_value'])[tags]
if data.read_only:
with pytest.raises(ValueError):
data[:] = new_values
else:
data[:] = new_values
assert general_array_equality(data, new_values)
@pytest.fixture(scope='function',
params=[check_type, check_shape, check_getting, check_setting])
def property_check(request):
"""Parameterizes differnt types of checks on local_snapshot properties."""
return request.param
class TestLocalSnapshots:
"""Base class for CPU and GPU based localsnapshot tests."""
@staticmethod
def check_box(local_snapshot, global_box, ranks):
"""General check that ``box`` and ``local_box`` properties work."""
assert type(local_snapshot.global_box) == hoomd.Box
assert type(local_snapshot.local_box) == hoomd.Box
assert local_snapshot.global_box == global_box
# The local box and global box are equal if and only if
# we run on a single rank.
assert (local_snapshot.local_box == global_box) == (ranks == 1)
def test_box(self, base_simulation, base_snapshot):
sim = base_simulation()
for lcl_snapshot_attr in self.get_snapshot_attr(sim):
with getattr(sim.state, lcl_snapshot_attr) as data:
self.check_box(data, sim.state.box,
sim.device.communicator.num_ranks)
@staticmethod
def check_tag_shape(base_snapshot, local_snapshot, group, ranks):
mpi_comm = MPI.COMM_WORLD
if base_snapshot.communicator.rank == 0:
N = getattr(base_snapshot, group).N
else:
N = None
N = mpi_comm.bcast(N, root=0)
# check particles tag size
if group == 'particles':
total_len = mpi_comm.allreduce(len(local_snapshot.particles.tag),
op=MPI.SUM)
assert total_len == N
else:
local_snapshot_section = getattr(local_snapshot, group)
if ranks > 1:
assert len(local_snapshot_section.tag) <= N
else:
assert len(local_snapshot_section.tag) == N
@skip_mpi4py
@pytest.mark.cupy_optional
def test_tags_shape(self, base_simulation, base_snapshot, snapshot_section):
"""Checks that tags are the appropriate size from local snapshots.
tags are used for checking other shapes so this is necessary to validate
those tests.
"""
sim = base_simulation()
for lcl_snapshot_attr in self.get_snapshot_attr(sim):
with getattr(sim.state, lcl_snapshot_attr) as data:
self.check_tag_shape(base_snapshot, data, snapshot_section,
sim.device.communicator.num_ranks)
@staticmethod
def check_global_properties(prop, global_property_dict, N):
assert prop.shape == global_property_dict['shape']
assert np.issubdtype(prop.dtype, global_property_dict['np_type'])
if isinstance(prop, HOOMDGPUArray) and not CUPY_IMPORTED:
return
else:
if global_property_dict['value'] is not None:
general_array_equality(prop, global_property_dict['value'])
with pytest.raises(ValueError):
prop[:] = 1
@skip_mpi4py
@pytest.mark.cupy_optional
def test_cpu_global_properties(self, base_simulation, base_snapshot,
global_property):
section_name, prop_name, prop_dict = global_property
sim = base_simulation()
snapshot = sim.state.snapshot
mpi_comm = MPI.COMM_WORLD
if snapshot.communicator.rank == 0:
N = getattr(snapshot, section_name).N
else:
N = None
N = mpi_comm.bcast(N, root=0)
with sim.state.cpu_local_snapshot as data:
self.check_global_properties(
getattr(getattr(data, section_name), prop_name), prop_dict, N)
@pytest.mark.cupy_optional
def test_arrays_properties(self, base_simulation, section_name_dict, affix,
property_check):
"""This test makes extensive use of parameterizing in pytest.
This test tests the type, shape, getting, and setting of array values in
the local snapshot. We test all properties including ghost and both
ghost and normal particles, bonds, etc.
"""
name, property_name, property_dict = section_name_dict
property_name = get_property_name_from_affix(property_name, affix)
tag_name = get_property_name_from_affix('tag', affix)
sim = base_simulation()
for lcl_snapshot_attr in self.get_snapshot_attr(sim):
with getattr(sim.state, lcl_snapshot_attr) as data:
# gets the particle, bond, etc data
snapshot_section = getattr(data, name)
hoomd_buffer = getattr(snapshot_section, property_name)
tags = getattr(snapshot_section, tag_name)
property_check(hoomd_buffer, property_dict, tags)
def test_run_failure(self, base_simulation):
sim = base_simulation()
for lcl_snapshot_attr in self.get_snapshot_attr(sim):
with getattr(sim.state, lcl_snapshot_attr):
with pytest.raises(RuntimeError):
sim.run(1)
def test_setting_snapshot_failure(self, base_simulation, base_snapshot):
sim = base_simulation()
for lcl_snapshot_attr in self.get_snapshot_attr(sim):
with getattr(sim.state, lcl_snapshot_attr):
with pytest.raises(RuntimeError):
sim.state.snapshot = base_snapshot
@pytest.fixture
def base_simulation(self, simulation_factory, base_snapshot):
"""Creates the simulation from the base_snapshot."""
def factory():
sim = simulation_factory(base_snapshot)
with sim.state.cpu_local_snapshot as snap:
particle_data = getattr(snap, 'particles')
tags = snap.particles.tag
for attr, inner_dict in _particle_local_data.items():
arr_values = np.array(inner_dict['value'])[tags]
getattr(particle_data, attr)[:] = arr_values
return sim
return factory
def get_snapshot_attr(self, sim):
if isinstance(sim.device, hoomd.device.CPU):
yield 'cpu_local_snapshot'
else:
yield 'cpu_local_snapshot'
yield 'gpu_local_snapshot'
| [
"numpy.allclose",
"numpy.issubdtype",
"pytest.mark.skipif",
"numpy.array",
"cupy.allclose",
"pytest.raises",
"cupy.array",
"copy.deepcopy",
"pytest.fixture",
"numpy.linspace",
"pytest.skip",
"hoomd.Snapshot"
] | [((637, 708), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip_mpi4py'], {'reason': '"""mpi4py could not be imported."""'}), "(skip_mpi4py, reason='mpi4py could not be imported.')\n", (655, 708), False, 'import pytest\n'), ((7926, 7957), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (7940, 7957), False, 'import pytest\n'), ((9178, 9287), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['particles', 'bonds', 'angles', 'dihedrals', 'impropers', 'constraints',\n 'pairs']"}), "(params=['particles', 'bonds', 'angles', 'dihedrals',\n 'impropers', 'constraints', 'pairs'])\n", (9192, 9287), False, 'import pytest\n'), ((13709, 13809), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""', 'params': '[check_type, check_shape, check_getting, check_setting]'}), "(scope='function', params=[check_type, check_shape,\n check_getting, check_setting])\n", (13723, 13809), False, 'import pytest\n'), ((8614, 8649), 'hoomd.Snapshot', 'hoomd.Snapshot', (['device.communicator'], {}), '(device.communicator)\n', (8628, 8649), False, 'import hoomd\n'), ((10752, 10775), 'copy.deepcopy', 'deepcopy', (['request.param'], {}), '(request.param)\n', (10760, 10775), False, 'from copy import deepcopy\n'), ((11870, 11917), 'numpy.issubdtype', 'np.issubdtype', (['data.dtype', "prop_dict['np_type']"], {}), "(data.dtype, prop_dict['np_type'])\n", (11883, 11917), True, 'import numpy as np\n'), ((12547, 12607), 'pytest.skip', 'pytest.skip', (['"""Not available for HOOMDGPUArray without CuPy."""'], {}), "('Not available for HOOMDGPUArray without CuPy.')\n", (12558, 12607), False, 'import pytest\n'), ((12748, 12778), 'cupy.array', 'cupy.array', (["prop_dict['value']"], {}), "(prop_dict['value'])\n", (12758, 12778), False, 'import cupy\n'), ((12815, 12843), 'numpy.array', 'np.array', (["prop_dict['value']"], {}), "(prop_dict['value'])\n", (12823, 12843), True, 'import numpy as np\n'), ((13214, 13274), 'pytest.skip', 'pytest.skip', (['"""Not available for HOOMDGPUArray without CuPy."""'], {}), "('Not available for HOOMDGPUArray without CuPy.')\n", (13225, 13274), False, 'import pytest\n'), ((16434, 16492), 'numpy.issubdtype', 'np.issubdtype', (['prop.dtype', "global_property_dict['np_type']"], {}), "(prop.dtype, global_property_dict['np_type'])\n", (16447, 16492), True, 'import numpy as np\n'), ((11459, 11494), 'numpy.issubdtype', 'np.issubdtype', (['a.dtype', 'np.floating'], {}), '(a.dtype, np.floating)\n', (11472, 11494), True, 'import numpy as np\n'), ((11606, 11631), 'cupy.allclose', 'cupy.allclose', (['arr1', 'arr2'], {}), '(arr1, arr2)\n', (11619, 11631), False, 'import cupy\n'), ((11665, 11688), 'numpy.allclose', 'np.allclose', (['arr1', 'arr2'], {}), '(arr1, arr2)\n', (11676, 11688), True, 'import numpy as np\n'), ((13394, 13428), 'cupy.array', 'cupy.array', (["prop_dict['new_value']"], {}), "(prop_dict['new_value'])\n", (13404, 13428), False, 'import cupy\n'), ((13475, 13507), 'numpy.array', 'np.array', (["prop_dict['new_value']"], {}), "(prop_dict['new_value'])\n", (13483, 13507), True, 'import numpy as np\n'), ((13551, 13576), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (13564, 13576), False, 'import pytest\n'), ((3760, 3785), 'numpy.linspace', 'np.linspace', (['(0.5)', '(3.5)', 'Np'], {}), '(0.5, 3.5, Np)\n', (3771, 3785), True, 'import numpy as np\n'), ((3817, 3840), 'numpy.linspace', 'np.linspace', (['(0)', '(4.2)', 'Np'], {}), '(0, 4.2, Np)\n', (3828, 3840), True, 'import numpy as np\n'), ((16743, 16768), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (16756, 16768), False, 'import pytest\n'), ((18811, 18838), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (18824, 18838), False, 'import pytest\n'), ((19120, 19147), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (19133, 19147), False, 'import pytest\n'), ((1678, 1704), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(Np * 3)'], {}), '(-4, 4, Np * 3)\n', (1689, 1704), True, 'import numpy as np\n'), ((1751, 1776), 'numpy.linspace', 'np.linspace', (['(4)', '(8)', '(Np * 3)'], {}), '(4, 8, Np * 3)\n', (1762, 1776), True, 'import numpy as np\n'), ((1900, 1926), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(Np * 3)'], {}), '(-4, 4, Np * 3)\n', (1911, 1926), True, 'import numpy as np\n'), ((1977, 2002), 'numpy.linspace', 'np.linspace', (['(4)', '(8)', '(Np * 3)'], {}), '(4, 8, Np * 3)\n', (1988, 2002), True, 'import numpy as np\n'), ((2717, 2756), 'numpy.linspace', 'np.linspace', (['(-10)', '(20)', '(Np * 3)'], {'dtype': 'int'}), '(-10, 20, Np * 3, dtype=int)\n', (2728, 2756), True, 'import numpy as np\n'), ((2798, 2837), 'numpy.linspace', 'np.linspace', (['(-20)', '(10)', '(Np * 3)'], {'dtype': 'int'}), '(-20, 10, Np * 3, dtype=int)\n', (2809, 2837), True, 'import numpy as np\n'), ((3072, 3101), 'numpy.linspace', 'np.linspace', (['(0.5)', '(4.5)', '(Np * 3)'], {}), '(0.5, 4.5, Np * 3)\n', (3083, 3101), True, 'import numpy as np\n'), ((3149, 3175), 'numpy.linspace', 'np.linspace', (['(6)', '(12)', '(Np * 3)'], {}), '(6, 12, Np * 3)\n', (3160, 3175), True, 'import numpy as np\n'), ((3296, 3326), 'numpy.linspace', 'np.linspace', (['(-0.5)', '(2.5)', '(Np * 3)'], {}), '(-0.5, 2.5, Np * 3)\n', (3307, 3326), True, 'import numpy as np\n'), ((3375, 3405), 'numpy.linspace', 'np.linspace', (['(12.75)', '(25)', '(Np * 3)'], {}), '(12.75, 25, Np * 3)\n', (3386, 3405), True, 'import numpy as np\n'), ((3527, 3557), 'numpy.linspace', 'np.linspace', (['(-1.5)', '(6.5)', '(Np * 6)'], {}), '(-1.5, 6.5, Np * 6)\n', (3538, 3557), True, 'import numpy as np\n'), ((3606, 3638), 'numpy.linspace', 'np.linspace', (['(9.75)', '(13.12)', '(Np * 6)'], {}), '(9.75, 13.12, Np * 6)\n', (3617, 3638), True, 'import numpy as np\n'), ((19687, 19716), 'numpy.array', 'np.array', (["inner_dict['value']"], {}), "(inner_dict['value'])\n", (19695, 19716), True, 'import numpy as np\n')] |
from abc import ABC
import numpy as np
import gym
import mujoco_py
from gym.envs.registration import register
def change_fetch_model(change_model):
import os
import shutil
gym_folder = os.path.dirname(gym.__file__)
xml_folder = 'envs/robotics/assets/fetch'
full_folder_path = os.path.join(gym_folder, xml_folder)
xml_file_path = os.path.join(full_folder_path, 'shared.xml')
backup_file_path = os.path.join(full_folder_path, 'shared_backup.xml')
if change_model:
if not os.path.exists(backup_file_path):
shutil.copy2(xml_file_path, backup_file_path)
shutil.copy2('fetch_yellow_obj.xml', xml_file_path)
else:
if os.path.exists(backup_file_path):
shutil.copy2(backup_file_path, xml_file_path)
def make(domain_name, task_name, seed, from_pixels, height, width, cameras=range(1),
visualize_reward=False, frame_skip=None, reward_type='dense', change_model=False):
if 'RealArm' not in domain_name:
change_fetch_model(change_model)
env = gym.make(domain_name, reward_type=reward_type)
env = GymEnvWrapper(env, from_pixels=from_pixels, cameras=cameras, height=height, width=width)
else:
import gym_xarm
env = gym.make(domain_name)
env.env.set_reward_mode(reward_type)
env = RealEnvWrapper(env, from_pixels=from_pixels, cameras=cameras, height=height, width=width)
env.seed(seed)
return env
class EnvWrapper(gym.Env, ABC):
def __init__(self, env, cameras, from_pixels=True, height=100, width=100, channels_first=True):
camera_0 = {'trackbodyid': -1, 'distance': 1.5, 'lookat': np.array((0.0, 0.6, 0)),
'elevation': -45.0, 'azimuth': 90}
camera_1 = {'trackbodyid': -1, 'distance': 1.5, 'lookat': np.array((0.0, 0.6, 0)),
'elevation': -45.0, 'azimuth': 135}
camera_2 = {'trackbodyid': -1, 'distance': 1.5, 'lookat': np.array((0.0, 0.6, 0)),
'elevation': -45.0, 'azimuth': 180}
camera_3 = {'trackbodyid': -1, 'distance': 1.5, 'lookat': np.array((0.0, 0.6, 0)),
'elevation': -45.0, 'azimuth': 225}
camera_4 = {'trackbodyid': -1, 'distance': 1.5, 'lookat': np.array((0.0, 0.6, 0)),
'elevation': -45.0, 'azimuth': 270}
camera_5 = {'trackbodyid': -1, 'distance': 1.5, 'lookat': np.array((0.0, 0.6, 0)),
'elevation': -45.0, 'azimuth': 315}
camera_6 = {'trackbodyid': -1, 'distance': 1.5, 'lookat': np.array((0.0, 0.6, 0)),
'elevation': -45.0, 'azimuth': 0}
camera_7 = {'trackbodyid': -1, 'distance': 1.5, 'lookat': np.array((0.0, 0.6, 0)),
'elevation': -45.0, 'azimuth': 45}
self.all_cameras = [camera_0, camera_1, camera_2, camera_3, camera_4, camera_5, camera_6, camera_7]
self._env = env
self.cameras = cameras
self.from_pixels = from_pixels
self.height = height
self.width = width
self.channels_first = channels_first
self.special_reset = None
self.special_reset_save = None
self.hybrid_obs = False
self.viewer = None
self.metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': int(np.round(1.0 / self.dt))
}
shape = [3 * len(cameras), height, width] if channels_first else [height, width, 3 * len(cameras)]
self._observation_space = gym.spaces.Box(
low=0, high=255, shape=shape, dtype=np.uint8
)
self._state_obs = None
self.change_camera()
self.reset()
def change_camera(self):
return
@property
def observation_space(self):
if self.from_pixels:
return self._observation_space
else:
return self._env.observation_space
@property
def action_space(self):
return self._env.action_space
def seed(self, seed=None):
return self._env.seed(seed)
def reset_model(self):
self._env.reset()
def viewer_setup(self, camera_id=0):
for key, value in self.all_cameras[camera_id].items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
def set_hybrid_obs(self, mode):
self.hybrid_obs = mode
def _get_obs(self):
if self.from_pixels:
imgs = []
for c in self.cameras:
imgs.append(self.render(mode='rgb_array', camera_id=c))
if self.channels_first:
pixel_obs = np.concatenate(imgs, axis=0)
else:
pixel_obs = np.concatenate(imgs, axis=2)
if self.hybrid_obs:
return [pixel_obs, self._get_hybrid_state()]
else:
return pixel_obs
else:
return self._get_state_obs()
def _get_state_obs(self):
return self._state_obs
def _get_hybrid_state(self):
return self._state_obs
@property
def hybrid_state_shape(self):
if self.hybrid_obs:
return self._get_hybrid_state().shape
else:
return None
def step(self, action):
self._state_obs, reward, done, info = self._env.step(action)
return self._get_obs(), reward, done, info
def reset(self, save_special_steps=False):
self._state_obs = self._env.reset()
return self._get_obs()
def set_state(self, qpos, qvel):
self._env.set_state(qpos, qvel)
@property
def dt(self):
if hasattr(self._env, 'dt'):
return self._env.dt
else:
return 1
@property
def _max_episode_steps(self):
return self._env.max_path_length
def do_simulation(self, ctrl, n_frames):
self._env.do_simulatiaon(ctrl, n_frames)
def render(self, mode='human', camera_id=0, height=None, width=None):
if mode == 'human':
self._env.render()
if height is None:
height = self.height
if width is None:
width = self.width
if mode == 'rgb_array':
if isinstance(self, GymEnvWrapper):
self._env.unwrapped._render_callback()
viewer = self._get_viewer(camera_id)
# Calling render twice to fix Mujoco change of resolution bug.
viewer.render(width, height, camera_id=-1)
viewer.render(width, height, camera_id=-1)
# window size used for old mujoco-py:
data = viewer.read_pixels(width, height, depth=False)
# original image is upside-down, so flip it
data = data[::-1, :, :]
if self.channels_first:
data = data.transpose((2, 0, 1))
return data
def close(self):
if self.viewer is not None:
self.viewer = None
self._env.close()
def _get_viewer(self, camera_id):
if self.viewer is None:
from mujoco_py import GlfwContext
GlfwContext(offscreen=True)
self.viewer = mujoco_py.MjRenderContextOffscreen(self._env.sim, -1)
self.viewer_setup(camera_id)
return self.viewer
def get_body_com(self, body_name):
return self._env.get_body_com(body_name)
def state_vector(self):
return self._env.state_vector
class GymEnvWrapper(EnvWrapper):
def change_camera(self):
for c in self.all_cameras:
c['lookat'] = np.array((1.3, 0.75, 0.4))
c['distance'] = 1.2
# Zoomed out cameras
camera_8 = {'trackbodyid': -1, 'distance': 1.8, 'lookat': np.array((1.3, 0.75, 0.4)),
'elevation': -45.0, 'azimuth': 135}
camera_9 = {'trackbodyid': -1, 'distance': 1.8, 'lookat': np.array((1.3, 0.75, 0.4)),
'elevation': -45.0, 'azimuth': 225}
# Gripper head camera
camera_10 = {'trackbodyid': -1, 'distance': 0.2, 'lookat': np.array((1.3, 0.75, 0.4)),
'elevation': -90, 'azimuth': 0}
self.all_cameras.append(camera_8)
self.all_cameras.append(camera_9)
self.all_cameras.append(camera_10)
def update_tracking_cameras(self):
gripper_pos = self._state_obs['observation'][:3].copy()
self.all_cameras[10]['lookat'] = gripper_pos
def _get_obs(self):
self.update_tracking_cameras()
return super()._get_obs()
@property
def _max_episode_steps(self):
return self._env._max_episode_steps
def set_special_reset(self, mode):
self.special_reset = mode
def register_special_reset_move(self, action, reward):
if self.special_reset_save is not None:
self.special_reset_save['obs'].append(self._get_obs())
self.special_reset_save['act'].append(action)
self.special_reset_save['reward'].append(reward)
def go_to_pos(self, pos):
grip_pos = self._state_obs['observation'][:3]
action = np.zeros(4)
for i in range(10):
if np.linalg.norm(grip_pos - pos) < 0.02:
break
action[:3] = (pos - grip_pos) * 10
self._state_obs, r, d, i = self._env.step(action)
self.register_special_reset_move(action, r)
grip_pos = self._state_obs['observation'][:3]
def raise_gripper(self):
grip_pos = self._state_obs['observation'][:3]
raised_pos = grip_pos.copy()
raised_pos[2] += 0.1
self.go_to_pos(raised_pos)
def open_gripper(self):
action = np.array([0, 0, 0, 1])
for i in range(2):
self._state_obs, r, d, i = self._env.step(action)
self.register_special_reset_move(action, r)
def close_gripper(self):
action = np.array([0, 0, 0, -1])
for i in range(2):
self._state_obs, r, d, i = self._env.step(action)
self.register_special_reset_move(action, r)
def reset(self, save_special_steps=False):
self._state_obs = self._env.reset()
if save_special_steps:
self.special_reset_save = {'obs': [], 'act': [], 'reward': []}
self.special_reset_save['obs'].append(self._get_obs())
if self.special_reset == 'close' and self._env.has_object:
obs = self._state_obs['observation']
goal = self._state_obs['desired_goal']
obj_pos = obs[3:6]
goal_distance = np.linalg.norm(obj_pos - goal)
desired_reset_pos = obj_pos + (obj_pos - goal) / goal_distance * 0.06
desired_reset_pos_raised = desired_reset_pos.copy()
desired_reset_pos_raised[2] += 0.1
self.raise_gripper()
self.go_to_pos(desired_reset_pos_raised)
self.go_to_pos(desired_reset_pos)
elif self.special_reset == 'grip' and self._env.has_object and not self._env.block_gripper:
obs = self._state_obs['observation']
obj_pos = obs[3:6]
above_obj = obj_pos.copy()
above_obj[2] += 0.1
self.open_gripper()
self.raise_gripper()
self.go_to_pos(above_obj)
self.go_to_pos(obj_pos)
self.close_gripper()
return self._get_obs()
def _get_state_obs(self):
obs = np.concatenate([self._state_obs['observation'],
self._state_obs['achieved_goal'],
self._state_obs['desired_goal']])
return obs
def _get_hybrid_state(self):
grip_pos = self._env.sim.data.get_site_xpos('robot0:grip')
dt = self._env.sim.nsubsteps * self._env.sim.model.opt.timestep
grip_velp = self._env.sim.data.get_site_xvelp('robot0:grip') * dt
robot_qpos, robot_qvel = gym.envs.robotics.utils.robot_get_obs(self._env.sim)
gripper_state = robot_qpos[-2:]
gripper_vel = robot_qvel[-2:] * dt # change to a scalar if the gripper is made symmetric
robot_info = np.concatenate([grip_pos, gripper_state, grip_velp, gripper_vel])
hybrid_obs_list = []
if 'robot' in self.hybrid_obs:
hybrid_obs_list.append(robot_info)
if 'goal' in self.hybrid_obs:
hybrid_obs_list.append(self._state_obs['desired_goal'])
return np.concatenate(hybrid_obs_list)
@property
def observation_space(self):
shape = self._get_state_obs().shape
return gym.spaces.Box(-np.inf, np.inf, shape=shape, dtype='float32')
class RealEnvWrapper(GymEnvWrapper):
def render(self, mode='human', camera_id=0, height=None, width=None):
if mode == 'human':
self._env.render()
if height is None:
height = self.height
if width is None:
width = self.width
if mode == 'rgb_array':
data = self._env.render(mode='rgb_array', height=height, width=width)
if self.channels_first:
data = data.transpose((2, 0, 1))
if camera_id == 8:
data = data[3:]
return data
def _get_obs(self):
return self.render(mode='rgb_array', height=self.height, width=self.width)
def _get_state_obs(self):
return self._get_obs()
def reset(self, save_special_steps=False):
self._state_obs = self._env.reset(rand_pos=True)
return self._get_obs()
| [
"os.path.exists",
"shutil.copy2",
"os.path.join",
"gym.spaces.Box",
"os.path.dirname",
"numpy.zeros",
"numpy.array",
"mujoco_py.GlfwContext",
"mujoco_py.MjRenderContextOffscreen",
"numpy.concatenate",
"numpy.linalg.norm",
"gym.envs.robotics.utils.robot_get_obs",
"gym.make",
"numpy.round"
] | [((199, 228), 'os.path.dirname', 'os.path.dirname', (['gym.__file__'], {}), '(gym.__file__)\n', (214, 228), False, 'import os\n'), ((298, 334), 'os.path.join', 'os.path.join', (['gym_folder', 'xml_folder'], {}), '(gym_folder, xml_folder)\n', (310, 334), False, 'import os\n'), ((355, 399), 'os.path.join', 'os.path.join', (['full_folder_path', '"""shared.xml"""'], {}), "(full_folder_path, 'shared.xml')\n", (367, 399), False, 'import os\n'), ((423, 474), 'os.path.join', 'os.path.join', (['full_folder_path', '"""shared_backup.xml"""'], {}), "(full_folder_path, 'shared_backup.xml')\n", (435, 474), False, 'import os\n'), ((611, 662), 'shutil.copy2', 'shutil.copy2', (['"""fetch_yellow_obj.xml"""', 'xml_file_path'], {}), "('fetch_yellow_obj.xml', xml_file_path)\n", (623, 662), False, 'import shutil\n'), ((684, 716), 'os.path.exists', 'os.path.exists', (['backup_file_path'], {}), '(backup_file_path)\n', (698, 716), False, 'import os\n'), ((1047, 1093), 'gym.make', 'gym.make', (['domain_name'], {'reward_type': 'reward_type'}), '(domain_name, reward_type=reward_type)\n', (1055, 1093), False, 'import gym\n'), ((1245, 1266), 'gym.make', 'gym.make', (['domain_name'], {}), '(domain_name)\n', (1253, 1266), False, 'import gym\n'), ((3493, 3553), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': 'shape', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=shape, dtype=np.uint8)\n', (3507, 3553), False, 'import gym\n'), ((9100, 9111), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (9108, 9111), True, 'import numpy as np\n'), ((9670, 9692), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (9678, 9692), True, 'import numpy as np\n'), ((9885, 9908), 'numpy.array', 'np.array', (['[0, 0, 0, -1]'], {}), '([0, 0, 0, -1])\n', (9893, 9908), True, 'import numpy as np\n'), ((11400, 11520), 'numpy.concatenate', 'np.concatenate', (["[self._state_obs['observation'], self._state_obs['achieved_goal'], self.\n _state_obs['desired_goal']]"], {}), "([self._state_obs['observation'], self._state_obs[\n 'achieved_goal'], self._state_obs['desired_goal']])\n", (11414, 11520), True, 'import numpy as np\n'), ((11875, 11927), 'gym.envs.robotics.utils.robot_get_obs', 'gym.envs.robotics.utils.robot_get_obs', (['self._env.sim'], {}), '(self._env.sim)\n', (11912, 11927), False, 'import gym\n'), ((12087, 12152), 'numpy.concatenate', 'np.concatenate', (['[grip_pos, gripper_state, grip_velp, gripper_vel]'], {}), '([grip_pos, gripper_state, grip_velp, gripper_vel])\n', (12101, 12152), True, 'import numpy as np\n'), ((12389, 12420), 'numpy.concatenate', 'np.concatenate', (['hybrid_obs_list'], {}), '(hybrid_obs_list)\n', (12403, 12420), True, 'import numpy as np\n'), ((12528, 12589), 'gym.spaces.Box', 'gym.spaces.Box', (['(-np.inf)', 'np.inf'], {'shape': 'shape', 'dtype': '"""float32"""'}), "(-np.inf, np.inf, shape=shape, dtype='float32')\n", (12542, 12589), False, 'import gym\n'), ((511, 543), 'os.path.exists', 'os.path.exists', (['backup_file_path'], {}), '(backup_file_path)\n', (525, 543), False, 'import os\n'), ((557, 602), 'shutil.copy2', 'shutil.copy2', (['xml_file_path', 'backup_file_path'], {}), '(xml_file_path, backup_file_path)\n', (569, 602), False, 'import shutil\n'), ((730, 775), 'shutil.copy2', 'shutil.copy2', (['backup_file_path', 'xml_file_path'], {}), '(backup_file_path, xml_file_path)\n', (742, 775), False, 'import shutil\n'), ((1651, 1674), 'numpy.array', 'np.array', (['(0.0, 0.6, 0)'], {}), '((0.0, 0.6, 0))\n', (1659, 1674), True, 'import numpy as np\n'), ((1797, 1820), 'numpy.array', 'np.array', (['(0.0, 0.6, 0)'], {}), '((0.0, 0.6, 0))\n', (1805, 1820), True, 'import numpy as np\n'), ((1944, 1967), 'numpy.array', 'np.array', (['(0.0, 0.6, 0)'], {}), '((0.0, 0.6, 0))\n', (1952, 1967), True, 'import numpy as np\n'), ((2091, 2114), 'numpy.array', 'np.array', (['(0.0, 0.6, 0)'], {}), '((0.0, 0.6, 0))\n', (2099, 2114), True, 'import numpy as np\n'), ((2238, 2261), 'numpy.array', 'np.array', (['(0.0, 0.6, 0)'], {}), '((0.0, 0.6, 0))\n', (2246, 2261), True, 'import numpy as np\n'), ((2385, 2408), 'numpy.array', 'np.array', (['(0.0, 0.6, 0)'], {}), '((0.0, 0.6, 0))\n', (2393, 2408), True, 'import numpy as np\n'), ((2532, 2555), 'numpy.array', 'np.array', (['(0.0, 0.6, 0)'], {}), '((0.0, 0.6, 0))\n', (2540, 2555), True, 'import numpy as np\n'), ((2677, 2700), 'numpy.array', 'np.array', (['(0.0, 0.6, 0)'], {}), '((0.0, 0.6, 0))\n', (2685, 2700), True, 'import numpy as np\n'), ((7136, 7163), 'mujoco_py.GlfwContext', 'GlfwContext', ([], {'offscreen': '(True)'}), '(offscreen=True)\n', (7147, 7163), False, 'from mujoco_py import GlfwContext\n'), ((7190, 7243), 'mujoco_py.MjRenderContextOffscreen', 'mujoco_py.MjRenderContextOffscreen', (['self._env.sim', '(-1)'], {}), '(self._env.sim, -1)\n', (7224, 7243), False, 'import mujoco_py\n'), ((7589, 7615), 'numpy.array', 'np.array', (['(1.3, 0.75, 0.4)'], {}), '((1.3, 0.75, 0.4))\n', (7597, 7615), True, 'import numpy as np\n'), ((7743, 7769), 'numpy.array', 'np.array', (['(1.3, 0.75, 0.4)'], {}), '((1.3, 0.75, 0.4))\n', (7751, 7769), True, 'import numpy as np\n'), ((7893, 7919), 'numpy.array', 'np.array', (['(1.3, 0.75, 0.4)'], {}), '((1.3, 0.75, 0.4))\n', (7901, 7919), True, 'import numpy as np\n'), ((8074, 8100), 'numpy.array', 'np.array', (['(1.3, 0.75, 0.4)'], {}), '((1.3, 0.75, 0.4))\n', (8082, 8100), True, 'import numpy as np\n'), ((10545, 10575), 'numpy.linalg.norm', 'np.linalg.norm', (['(obj_pos - goal)'], {}), '(obj_pos - goal)\n', (10559, 10575), True, 'import numpy as np\n'), ((3316, 3339), 'numpy.round', 'np.round', (['(1.0 / self.dt)'], {}), '(1.0 / self.dt)\n', (3324, 3339), True, 'import numpy as np\n'), ((4681, 4709), 'numpy.concatenate', 'np.concatenate', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (4695, 4709), True, 'import numpy as np\n'), ((4756, 4784), 'numpy.concatenate', 'np.concatenate', (['imgs'], {'axis': '(2)'}), '(imgs, axis=2)\n', (4770, 4784), True, 'import numpy as np\n'), ((9155, 9185), 'numpy.linalg.norm', 'np.linalg.norm', (['(grip_pos - pos)'], {}), '(grip_pos - pos)\n', (9169, 9185), True, 'import numpy as np\n')] |
import numpy as np
import scipy.sparse
from typing import Text, Union, Optional, Dict, Any
from rasa.nlu.constants import FEATURIZER_CLASS_ALIAS
from rasa.nlu.components import Component
from rasa.utils.tensorflow.constants import MEAN_POOLING, MAX_POOLING
class Features:
"""Stores the features produces by any featurizer."""
def __init__(
self,
features: Union[np.ndarray, scipy.sparse.spmatrix],
message_attribute: Text,
origin: Text,
) -> None:
self.features = features
self.type = type
self.origin = origin
self.message_attribute = message_attribute
def is_sparse(self) -> bool:
"""Checks if features are sparse or not.
Returns:
True, if features are sparse, false otherwise.
"""
return isinstance(self.features, scipy.sparse.spmatrix)
def is_dense(self) -> bool:
"""Checks if features are dense or not.
Returns:
True, if features are dense, false otherwise.
"""
return not self.is_sparse()
def combine_with_features(
self, additional_features: Optional[Union[np.ndarray, scipy.sparse.spmatrix]]
) -> Optional[Union[np.ndarray, scipy.sparse.spmatrix]]:
"""Combine the incoming features with this instance's features.
Args:
additional_features: additional features to add
Returns:
Combined features.
"""
if additional_features is None:
return self.features
if self.is_dense() and isinstance(additional_features, np.ndarray):
return self._combine_dense_features(self.features, additional_features)
if self.is_sparse() and isinstance(additional_features, scipy.sparse.spmatrix):
return self._combine_sparse_features(self.features, additional_features)
raise ValueError("Cannot combine sparse and dense features.")
@staticmethod
def _combine_dense_features(
features: np.ndarray, additional_features: np.ndarray
) -> np.ndarray:
if features.ndim != additional_features.ndim:
raise ValueError(
f"Cannot combine dense features as sequence dimensions do not "
f"match: {features.ndim} != {additional_features.ndim}."
)
return np.concatenate((features, additional_features), axis=-1)
@staticmethod
def _combine_sparse_features(
features: scipy.sparse.spmatrix, additional_features: scipy.sparse.spmatrix
) -> scipy.sparse.spmatrix:
from scipy.sparse import hstack
if features.shape[0] != additional_features.shape[0]:
raise ValueError(
f"Cannot combine sparse features as sequence dimensions do not "
f"match: {features.shape[0]} != {additional_features.shape[0]}."
)
return hstack([features, additional_features])
class Featurizer(Component):
def __init__(self, component_config: Optional[Dict[Text, Any]] = None) -> None:
if not component_config:
component_config = {}
# makes sure the alias name is set
component_config.setdefault(FEATURIZER_CLASS_ALIAS, self.name)
super().__init__(component_config)
class DenseFeaturizer(Featurizer):
@staticmethod
def _calculate_cls_vector(
features: np.ndarray, pooling_operation: Text
) -> np.ndarray:
# take only non zeros feature vectors into account
non_zero_features = np.array([f for f in features if f.any()])
# if features are all zero just return a vector with all zeros
if non_zero_features.size == 0:
return np.zeros([1, features.shape[-1]])
if pooling_operation == MEAN_POOLING:
return np.mean(non_zero_features, axis=0, keepdims=True)
if pooling_operation == MAX_POOLING:
return np.max(non_zero_features, axis=0, keepdims=True)
raise ValueError(
f"Invalid pooling operation specified. Available operations are "
f"'{MEAN_POOLING}' or '{MAX_POOLING}', but provided value is "
f"'{pooling_operation}'."
)
class SparseFeaturizer(Featurizer):
pass
| [
"numpy.mean",
"numpy.max",
"numpy.zeros",
"scipy.sparse.hstack",
"numpy.concatenate"
] | [((2344, 2400), 'numpy.concatenate', 'np.concatenate', (['(features, additional_features)'], {'axis': '(-1)'}), '((features, additional_features), axis=-1)\n', (2358, 2400), True, 'import numpy as np\n'), ((2895, 2934), 'scipy.sparse.hstack', 'hstack', (['[features, additional_features]'], {}), '([features, additional_features])\n', (2901, 2934), False, 'from scipy.sparse import hstack\n'), ((3698, 3731), 'numpy.zeros', 'np.zeros', (['[1, features.shape[-1]]'], {}), '([1, features.shape[-1]])\n', (3706, 3731), True, 'import numpy as np\n'), ((3798, 3847), 'numpy.mean', 'np.mean', (['non_zero_features'], {'axis': '(0)', 'keepdims': '(True)'}), '(non_zero_features, axis=0, keepdims=True)\n', (3805, 3847), True, 'import numpy as np\n'), ((3913, 3961), 'numpy.max', 'np.max', (['non_zero_features'], {'axis': '(0)', 'keepdims': '(True)'}), '(non_zero_features, axis=0, keepdims=True)\n', (3919, 3961), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#--------Include modules---------------
from copy import copy
import rospy
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Point
from nav_msgs.msg import OccupancyGrid
import tf
from rrt_slam.msg import PointArray
from time import time
from numpy import array
from numpy import linalg as LA
from numpy import all as All
from numpy import inf
from functions import robot, informationGain, discount
from numpy.linalg import norm
# Subscribers' callbacks------------------------------
mapData = OccupancyGrid()
frontiers = []
global1 = OccupancyGrid()
global2 = OccupancyGrid()
global3 = OccupancyGrid()
globalmaps = []
def callBack(data):
global frontiers
frontiers = []
for point in data.points:
frontiers.append(array([point.x, point.y]))
def mapCallBack(data):
global mapData
mapData = data
# Node----------------------------------------------
def node():
global frontiers, mapData, global1, global2, global3, globalmaps
rospy.init_node('assigner', anonymous=False)
# fetching all parameters
map_topic = rospy.get_param('~map_topic', '/map')
info_radius = rospy.get_param(
'~info_radius', 1.0
) #this can be smaller than the laser scanner range, >> smaller >>less computation time>> too small is not good, info gain won't be accurate
info_multiplier = rospy.get_param('~info_multiplier', 3.0)
hysteresis_radius = rospy.get_param(
'~hysteresis_radius',
3.0) #at least as much as the laser scanner range
hysteresis_gain = rospy.get_param(
'~hysteresis_gain',
2.0) #bigger than 1 (biase robot to continue exploring current region
frontiers_topic = rospy.get_param('~frontiers_topic', '/filtered_points')
n_robots = rospy.get_param('~n_robots', 1)
namespace = rospy.get_param('~namespace', '')
namespace_init_count = rospy.get_param('namespace_init_count', 1)
delay_after_assignement = rospy.get_param('~delay_after_assignement', 0.5)
rateHz = rospy.get_param('~rate', 100)
rate = rospy.Rate(rateHz)
#-------------------------------------------
rospy.Subscriber(map_topic, OccupancyGrid, mapCallBack)
rospy.Subscriber(frontiers_topic, PointArray, callBack)
#---------------------------------------------------------------------------------------------------------------
# wait if no frontier is received yet
while len(frontiers) < 1:
pass
centroids = copy(frontiers)
#wait if map is not received yet
while (len(mapData.data) < 1):
pass
robots = []
if len(namespace) > 0:
for i in range(0, n_robots):
robots.append(robot(namespace + str(i + namespace_init_count)))
elif len(namespace) == 0:
robots.append(robot(namespace))
for i in range(0, n_robots):
robots[i].sendGoal(robots[i].getPosition())
#-------------------------------------------------------------------------
#--------------------- Main Loop -------------------------------
#-------------------------------------------------------------------------
while not rospy.is_shutdown():
centroids = copy(frontiers)
#-------------------------------------------------------------------------
#Get information gain for each frontier point
infoGain = []
for ip in range(0, len(centroids)):
infoGain.append(
informationGain(mapData, [centroids[ip][0], centroids[ip][1]],
info_radius))
#-------------------------------------------------------------------------
#get number of available/busy robots
na = [] #available robots
nb = [] #busy robots
for i in range(0, n_robots):
if (robots[i].getState() == 1):
nb.append(i)
else:
na.append(i)
rospy.loginfo("available robots: " + str(na))
#-------------------------------------------------------------------------
#get dicount and update informationGain
for i in nb + na:
infoGain = discount(mapData, robots[i].assigned_point, centroids,
infoGain, info_radius)
#-------------------------------------------------------------------------
revenue_record = []
centroid_record = []
id_record = []
for ir in na:
for ip in range(0, len(centroids)):
cost = norm(robots[ir].getPosition() - centroids[ip])
threshold = 1
information_gain = infoGain[ip]
if (norm(robots[ir].getPosition() - centroids[ip]) <=
hysteresis_radius):
information_gain *= hysteresis_gain
revenue = information_gain * info_multiplier - cost
revenue_record.append(revenue)
centroid_record.append(centroids[ip])
id_record.append(ir)
if len(na) < 1:
revenue_record = []
centroid_record = []
id_record = []
for ir in nb:
for ip in range(0, len(centroids)):
cost = norm(robots[ir].getPosition() - centroids[ip])
threshold = 1
information_gain = infoGain[ip]
if (norm(robots[ir].getPosition() - centroids[ip]) <=
hysteresis_radius):
information_gain *= hysteresis_gain
if ((norm(centroids[ip] - robots[ir].assigned_point)) <
hysteresis_radius):
information_gain = informationGain(
mapData, [centroids[ip][0], centroids[ip][1]],
info_radius) * hysteresis_gain
revenue = information_gain * info_multiplier - cost
revenue_record.append(revenue)
centroid_record.append(centroids[ip])
id_record.append(ir)
rospy.loginfo("revenue record: " + str(revenue_record))
rospy.loginfo("centroid record: " + str(centroid_record))
rospy.loginfo("robot IDs record: " + str(id_record))
#-------------------------------------------------------------------------
if (len(id_record) > 0):
winner_id = revenue_record.index(max(revenue_record))
robots[id_record[winner_id]].sendGoal(centroid_record[winner_id])
rospy.loginfo(namespace +
str(namespace_init_count + id_record[winner_id]) +
" assigned to " + str(centroid_record[winner_id]))
rospy.sleep(delay_after_assignement)
#-------------------------------------------------------------------------
rate.sleep()
#-------------------------------------------------------------------------
if __name__ == '__main__':
try:
node()
except rospy.ROSInterruptException:
pass
| [
"functions.robot",
"nav_msgs.msg.OccupancyGrid",
"rospy.is_shutdown",
"functions.informationGain",
"rospy.init_node",
"rospy.get_param",
"functions.discount",
"numpy.array",
"rospy.Rate",
"numpy.linalg.norm",
"rospy.sleep",
"copy.copy",
"rospy.Subscriber"
] | [((540, 555), 'nav_msgs.msg.OccupancyGrid', 'OccupancyGrid', ([], {}), '()\n', (553, 555), False, 'from nav_msgs.msg import OccupancyGrid\n'), ((581, 596), 'nav_msgs.msg.OccupancyGrid', 'OccupancyGrid', ([], {}), '()\n', (594, 596), False, 'from nav_msgs.msg import OccupancyGrid\n'), ((607, 622), 'nav_msgs.msg.OccupancyGrid', 'OccupancyGrid', ([], {}), '()\n', (620, 622), False, 'from nav_msgs.msg import OccupancyGrid\n'), ((633, 648), 'nav_msgs.msg.OccupancyGrid', 'OccupancyGrid', ([], {}), '()\n', (646, 648), False, 'from nav_msgs.msg import OccupancyGrid\n'), ((1014, 1058), 'rospy.init_node', 'rospy.init_node', (['"""assigner"""'], {'anonymous': '(False)'}), "('assigner', anonymous=False)\n", (1029, 1058), False, 'import rospy\n'), ((1106, 1143), 'rospy.get_param', 'rospy.get_param', (['"""~map_topic"""', '"""/map"""'], {}), "('~map_topic', '/map')\n", (1121, 1143), False, 'import rospy\n'), ((1162, 1198), 'rospy.get_param', 'rospy.get_param', (['"""~info_radius"""', '(1.0)'], {}), "('~info_radius', 1.0)\n", (1177, 1198), False, 'import rospy\n'), ((1375, 1415), 'rospy.get_param', 'rospy.get_param', (['"""~info_multiplier"""', '(3.0)'], {}), "('~info_multiplier', 3.0)\n", (1390, 1415), False, 'import rospy\n'), ((1440, 1482), 'rospy.get_param', 'rospy.get_param', (['"""~hysteresis_radius"""', '(3.0)'], {}), "('~hysteresis_radius', 3.0)\n", (1455, 1482), False, 'import rospy\n'), ((1568, 1608), 'rospy.get_param', 'rospy.get_param', (['"""~hysteresis_gain"""', '(2.0)'], {}), "('~hysteresis_gain', 2.0)\n", (1583, 1608), False, 'import rospy\n'), ((1714, 1769), 'rospy.get_param', 'rospy.get_param', (['"""~frontiers_topic"""', '"""/filtered_points"""'], {}), "('~frontiers_topic', '/filtered_points')\n", (1729, 1769), False, 'import rospy\n'), ((1785, 1816), 'rospy.get_param', 'rospy.get_param', (['"""~n_robots"""', '(1)'], {}), "('~n_robots', 1)\n", (1800, 1816), False, 'import rospy\n'), ((1833, 1866), 'rospy.get_param', 'rospy.get_param', (['"""~namespace"""', '""""""'], {}), "('~namespace', '')\n", (1848, 1866), False, 'import rospy\n'), ((1894, 1936), 'rospy.get_param', 'rospy.get_param', (['"""namespace_init_count"""', '(1)'], {}), "('namespace_init_count', 1)\n", (1909, 1936), False, 'import rospy\n'), ((1967, 2015), 'rospy.get_param', 'rospy.get_param', (['"""~delay_after_assignement"""', '(0.5)'], {}), "('~delay_after_assignement', 0.5)\n", (1982, 2015), False, 'import rospy\n'), ((2029, 2058), 'rospy.get_param', 'rospy.get_param', (['"""~rate"""', '(100)'], {}), "('~rate', 100)\n", (2044, 2058), False, 'import rospy\n'), ((2071, 2089), 'rospy.Rate', 'rospy.Rate', (['rateHz'], {}), '(rateHz)\n', (2081, 2089), False, 'import rospy\n'), ((2143, 2198), 'rospy.Subscriber', 'rospy.Subscriber', (['map_topic', 'OccupancyGrid', 'mapCallBack'], {}), '(map_topic, OccupancyGrid, mapCallBack)\n', (2159, 2198), False, 'import rospy\n'), ((2203, 2258), 'rospy.Subscriber', 'rospy.Subscriber', (['frontiers_topic', 'PointArray', 'callBack'], {}), '(frontiers_topic, PointArray, callBack)\n', (2219, 2258), False, 'import rospy\n'), ((2478, 2493), 'copy.copy', 'copy', (['frontiers'], {}), '(frontiers)\n', (2482, 2493), False, 'from copy import copy\n'), ((3130, 3149), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (3147, 3149), False, 'import rospy\n'), ((3171, 3186), 'copy.copy', 'copy', (['frontiers'], {}), '(frontiers)\n', (3175, 3186), False, 'from copy import copy\n'), ((782, 807), 'numpy.array', 'array', (['[point.x, point.y]'], {}), '([point.x, point.y])\n', (787, 807), False, 'from numpy import array\n'), ((4112, 4189), 'functions.discount', 'discount', (['mapData', 'robots[i].assigned_point', 'centroids', 'infoGain', 'info_radius'], {}), '(mapData, robots[i].assigned_point, centroids, infoGain, info_radius)\n', (4120, 4189), False, 'from functions import robot, informationGain, discount\n'), ((6711, 6747), 'rospy.sleep', 'rospy.sleep', (['delay_after_assignement'], {}), '(delay_after_assignement)\n', (6722, 6747), False, 'import rospy\n'), ((2788, 2804), 'functions.robot', 'robot', (['namespace'], {}), '(namespace)\n', (2793, 2804), False, 'from functions import robot, informationGain, discount\n'), ((3435, 3510), 'functions.informationGain', 'informationGain', (['mapData', '[centroids[ip][0], centroids[ip][1]]', 'info_radius'], {}), '(mapData, [centroids[ip][0], centroids[ip][1]], info_radius)\n', (3450, 3510), False, 'from functions import robot, informationGain, discount\n'), ((5536, 5583), 'numpy.linalg.norm', 'norm', (['(centroids[ip] - robots[ir].assigned_point)'], {}), '(centroids[ip] - robots[ir].assigned_point)\n', (5540, 5583), False, 'from numpy.linalg import norm\n'), ((5678, 5753), 'functions.informationGain', 'informationGain', (['mapData', '[centroids[ip][0], centroids[ip][1]]', 'info_radius'], {}), '(mapData, [centroids[ip][0], centroids[ip][1]], info_radius)\n', (5693, 5753), False, 'from functions import robot, informationGain, discount\n')] |
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lingvo.tasks.car.ops.car_metrics_ops."""
from lingvo import compat as tf
from lingvo.core import test_utils
from lingvo.tasks.car import ops
import numpy as np
class ImageMetricsOpsTest(test_utils.TestCase):
def _GenerateRandomBBoxes(self, num_images, num_bboxes):
xyz = np.random.uniform(low=-1.0, high=1.0, size=(num_bboxes, 3))
dimension = np.random.uniform(low=0.1, high=1.0, size=(num_bboxes, 3))
rotation = np.random.uniform(low=-np.pi, high=np.pi, size=(num_bboxes, 1))
bboxes = np.concatenate([xyz, dimension, rotation], axis=-1)
imageid = np.random.randint(0, num_images, size=[num_bboxes])
scores = np.random.uniform(size=[num_bboxes])
return bboxes, imageid, scores
def _GetAP(self, gt_bbox, gt_imgid, pd_bbox, pd_imgid, pd_score, algorithm):
g = tf.Graph()
with g.as_default():
iou, pr, score_and_hit = ops.average_precision3d(
iou_threshold=0.5,
groundtruth_bbox=gt_bbox,
groundtruth_imageid=gt_imgid,
groundtruth_ignore=tf.zeros_like(gt_imgid, dtype=tf.int32),
prediction_bbox=pd_bbox,
prediction_imageid=pd_imgid,
prediction_score=pd_score,
prediction_ignore=tf.zeros_like(pd_imgid, dtype=tf.int32),
num_recall_points=41,
algorithm=algorithm)
with self.session(graph=g) as sess:
val = sess.run([iou, pr, score_and_hit])
return val
def testAPKITTI(self):
k, n, m = 10, 100, 20
gt_bbox, gt_imgid, _ = self._GenerateRandomBBoxes(k, n)
pd_bbox, pd_imgid, pd_score = self._GenerateRandomBBoxes(k, m)
# IoU between two set of random boxes;
iou, _, score_and_hit = self._GetAP(
gt_bbox, gt_imgid, pd_bbox, pd_imgid, pd_score, algorithm='KITTI')
self.assertAllEqual(score_and_hit.shape, (m, 2))
self.assertTrue(0 <= iou and iou <= 1.0)
# Make the predictions be a duplicate of the ground truth to emulate
# perfect detection.
iou, _, score_and_hit = self._GetAP(
gt_bbox, gt_imgid, gt_bbox, gt_imgid, np.ones(n), algorithm='KITTI')
self.assertAllEqual(score_and_hit.shape, (n, 2))
self.assertAllEqual(score_and_hit[:, 1], np.ones(n))
self.assertEqual(1, iou)
# Ditto as above but make the detection scores unique so that one can test
# that the scores are correctly returned.
iou, _, score_and_hit = self._GetAP(
gt_bbox,
gt_imgid,
gt_bbox,
gt_imgid,
np.linspace(0, 1, n),
algorithm='KITTI')
self.assertAllEqual(score_and_hit.shape, (n, 2))
self.assertAllClose(score_and_hit[:, 0], np.linspace(0, 1, n))
self.assertAllEqual(score_and_hit[:, 1], np.ones(n))
self.assertEqual(1, iou)
# IoU of empty detection
iou, _, score_and_hit = self._GetAP(
gt_bbox, gt_imgid, pd_bbox, pd_imgid + n, pd_score, algorithm='KITTI')
self.assertAllEqual(score_and_hit.shape, (m, 2))
self.assertAllEqual(score_and_hit[:, 1], np.zeros(m))
self.assertEqual(0, iou)
def testAPVOC(self):
k, n, m = 10, 100, 20
gt_bbox, gt_imgid, _ = self._GenerateRandomBBoxes(k, n)
pd_bbox, pd_imgid, pd_score = self._GenerateRandomBBoxes(k, m)
# IoU between two set of random boxes;
iou, _, _ = self._GetAP(
gt_bbox, gt_imgid, pd_bbox, pd_imgid, pd_score, algorithm='VOC')
self.assertTrue(0 <= iou and iou <= 1.0)
# IoU of perfect detection
iou, _, score_and_hit = self._GetAP(
gt_bbox, gt_imgid, gt_bbox, gt_imgid, np.ones(n), algorithm='VOC')
# Just check that dummy values are returned.
self.assertAllEqual(score_and_hit.shape, (n, 2))
self.assertAllEqual(score_and_hit, -1.0 * np.ones(shape=(n, 2)))
self.assertEqual(1, iou)
# IoU of empty detection
iou, _, _ = self._GetAP(
gt_bbox, gt_imgid, pd_bbox, pd_imgid + n, pd_score, algorithm='VOC')
self.assertEqual(0, iou)
def testAllZeroValue(self):
k, n, m = 10, 100, 20
gt_bbox, gt_imgid, _ = self._GenerateRandomBBoxes(k, n)
pd_bbox, pd_imgid, pd_score = self._GenerateRandomBBoxes(k, m)
# IoU between two set of random boxes;
iou, pr, _ = self._GetAP(
gt_bbox * 0,
gt_imgid * 0,
pd_bbox * 0,
pd_imgid * 0,
pd_score * 0,
algorithm='KITTI')
self.assertEqual(0, iou)
self.assertAllEqual(pr.shape, (41, 2))
self.assertAllEqual(np.zeros(41), pr[:, 0])
if __name__ == '__main__':
tf.test.main()
| [
"numpy.ones",
"lingvo.compat.zeros_like",
"lingvo.compat.test.main",
"numpy.random.randint",
"numpy.linspace",
"numpy.zeros",
"numpy.concatenate",
"numpy.random.uniform",
"lingvo.compat.Graph"
] | [((5136, 5150), 'lingvo.compat.test.main', 'tf.test.main', ([], {}), '()\n', (5148, 5150), True, 'from lingvo import compat as tf\n'), ((1002, 1061), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1.0)', 'high': '(1.0)', 'size': '(num_bboxes, 3)'}), '(low=-1.0, high=1.0, size=(num_bboxes, 3))\n', (1019, 1061), True, 'import numpy as np\n'), ((1078, 1136), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.1)', 'high': '(1.0)', 'size': '(num_bboxes, 3)'}), '(low=0.1, high=1.0, size=(num_bboxes, 3))\n', (1095, 1136), True, 'import numpy as np\n'), ((1152, 1215), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-np.pi)', 'high': 'np.pi', 'size': '(num_bboxes, 1)'}), '(low=-np.pi, high=np.pi, size=(num_bboxes, 1))\n', (1169, 1215), True, 'import numpy as np\n'), ((1229, 1280), 'numpy.concatenate', 'np.concatenate', (['[xyz, dimension, rotation]'], {'axis': '(-1)'}), '([xyz, dimension, rotation], axis=-1)\n', (1243, 1280), True, 'import numpy as np\n'), ((1295, 1346), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_images'], {'size': '[num_bboxes]'}), '(0, num_images, size=[num_bboxes])\n', (1312, 1346), True, 'import numpy as np\n'), ((1360, 1396), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[num_bboxes]'}), '(size=[num_bboxes])\n', (1377, 1396), True, 'import numpy as np\n'), ((1520, 1530), 'lingvo.compat.Graph', 'tf.Graph', ([], {}), '()\n', (1528, 1530), True, 'from lingvo import compat as tf\n'), ((2754, 2764), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (2761, 2764), True, 'import numpy as np\n'), ((2883, 2893), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (2890, 2893), True, 'import numpy as np\n'), ((3169, 3189), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (3180, 3189), True, 'import numpy as np\n'), ((3316, 3336), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (3327, 3336), True, 'import numpy as np\n'), ((3383, 3393), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (3390, 3393), True, 'import numpy as np\n'), ((3672, 3683), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (3680, 3683), True, 'import numpy as np\n'), ((4199, 4209), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (4206, 4209), True, 'import numpy as np\n'), ((5081, 5093), 'numpy.zeros', 'np.zeros', (['(41)'], {}), '(41)\n', (5089, 5093), True, 'import numpy as np\n'), ((4376, 4397), 'numpy.ones', 'np.ones', ([], {'shape': '(n, 2)'}), '(shape=(n, 2))\n', (4383, 4397), True, 'import numpy as np\n'), ((1746, 1785), 'lingvo.compat.zeros_like', 'tf.zeros_like', (['gt_imgid'], {'dtype': 'tf.int32'}), '(gt_imgid, dtype=tf.int32)\n', (1759, 1785), True, 'from lingvo import compat as tf\n'), ((1926, 1965), 'lingvo.compat.zeros_like', 'tf.zeros_like', (['pd_imgid'], {'dtype': 'tf.int32'}), '(pd_imgid, dtype=tf.int32)\n', (1939, 1965), True, 'from lingvo import compat as tf\n')] |
import random
import matplotlib.pyplot as plt
import numpy as np
def cmap(label: str) -> str:
"""Return RGB string of color for given standard psp label"""
_, pp_family, pp_z, pp_type, pp_version = label.split("/")
if pp_family == "sg15" and pp_version == "v1.0":
return "#000000"
if pp_family == "sg15" and pp_version == "v1.2":
return "#708090"
if pp_family == "gbrv" and pp_version == "v1":
return "#4682B4"
if pp_family == "psl" and pp_version == "v1.0.0" and pp_type == "us":
return "#F50E02"
if pp_family == "psl" and pp_version == "v1.0.0" and pp_type == "paw":
return "#2D8B00F"
if pp_family == "dojo" and pp_version == "v04":
return "#F9A501"
# TODO: more mapping
# if a unknow type generate random color based on ascii sum
ascn = sum([ord(c) for c in label])
random.seed(ascn)
return "#%06x" % random.randint(0, 0xFFFFFF)
def delta_measure_hist(pseudos: dict, measure_type):
px = 1 / plt.rcParams["figure.dpi"] # pixel in inches
fig, ax = plt.subplots(1, 1, figsize=(1024 * px, 360 * px))
structures = ["X", "XO", "X2O", "XO3", "X2O", "X2O3", "X2O5"]
num_structures = len(structures)
# element
try:
v0 = list(pseudos.values())[0]
element = v0["pseudo_info"]["element"]
except Exception:
element = None
if measure_type == "delta":
keyname = "delta"
ylabel = "Δ -factor"
elif measure_type == "nv_delta":
keyname = "rel_errors_vec_length"
ylabel = "ν -factor"
for i, (label, output) in enumerate(pseudos.items()):
N = len(structures)
idx = np.arange(N) # the x locations for the groups
width = 0.1 # the width of the bars
y_delta = []
for structure in structures:
try:
res = output["delta_measure"]["output_delta_analyze"][
f"output_{structure}"
]
y_delta.append(res[keyname])
except Exception:
y_delta.append(-1)
_, pp_family, pp_z, pp_type, pp_version = label.split("/")
out_label = f"{pp_z}/{pp_type}({pp_family}-{pp_version})"
ax.bar(
idx + width * i,
y_delta,
width,
color=cmap(label),
edgecolor="black",
linewidth=1,
label=out_label,
)
ax.legend()
ax.set_title(f"X={element}")
ax.axhline(y=1.0, linestyle="--", color="gray")
ax.set_ylabel(ylabel)
ax.set_ylim([0, 10])
ax.set_yticks(np.arange(10))
ax.set_xticks(list(range(num_structures)))
ax.set_xticklabels(structures)
return fig
def convergence(pseudos: dict, wf_name, measure_name, ylabel, threshold=None):
px = 1 / plt.rcParams["figure.dpi"]
fig, (ax1, ax2) = plt.subplots(
1, 2, gridspec_kw={"width_ratios": [2, 1]}, figsize=(1024 * px, 360 * px)
)
for label, output in pseudos.items():
# Calculate the avg delta measure value
structures = ["X", "XO", "X2O", "XO3", "X2O", "X2O3", "X2O5"]
lst = []
for structure in structures:
try:
res = output["delta_measure"]["output_delta_analyze"][
f"output_{structure}"
]
lst.append(res["rel_errors_vec_length"])
except Exception:
pass
avg_delta = sum(lst) / len(lst)
try:
res = output[wf_name]
x_wfc = res["output_parameters_wfc_test"]["ecutwfc"]
y_wfc = res["output_parameters_wfc_test"][measure_name]
x_rho = res["output_parameters_rho_test"]["ecutrho"]
y_rho = res["output_parameters_rho_test"][measure_name]
wfc_cutoff = res["final_output_parameters"]["wfc_cutoff"]
_, pp_family, pp_z, pp_type, pp_version = label.split("/")
out_label = f"{pp_z}/{pp_type}(ν={avg_delta:.2f})({pp_family}-{pp_version})"
ax1.plot(x_wfc, y_wfc, marker="o", color=cmap(label), label=out_label)
ax2.plot(
x_rho,
y_rho,
marker="o",
color=cmap(label),
label=f"cutoff wfc = {wfc_cutoff} Ry",
)
except Exception:
pass
ax1.set_ylabel(ylabel)
ax1.set_xlabel("Wavefuntion cutoff (Ry)")
ax1.set_title(
"Fixed rho cutoff at 200 * dual (dual=4 for NC and dual=8 for non-NC)"
)
# ax2.legend(loc='upper left', bbox_to_anchor=(1, 1.0))
ax1.legend()
ax2.set_xlabel("Charge density cudoff (Ry)")
ax2.set_title("Convergence test at fixed wavefunction cutoff")
ax2.legend()
if threshold:
ax1.axhline(y=threshold, color="r", linestyle="--")
ax2.axhline(y=threshold, color="r", linestyle="--")
ax1.set_ylim(-0.5 * threshold, 10 * threshold)
ax2.set_ylim(-0.5 * threshold, 10 * threshold)
plt.tight_layout()
return fig
| [
"random.seed",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout",
"random.randint",
"numpy.arange"
] | [((875, 892), 'random.seed', 'random.seed', (['ascn'], {}), '(ascn)\n', (886, 892), False, 'import random\n'), ((1071, 1120), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(1024 * px, 360 * px)'}), '(1, 1, figsize=(1024 * px, 360 * px))\n', (1083, 1120), True, 'import matplotlib.pyplot as plt\n'), ((2867, 2958), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'gridspec_kw': "{'width_ratios': [2, 1]}", 'figsize': '(1024 * px, 360 * px)'}), "(1, 2, gridspec_kw={'width_ratios': [2, 1]}, figsize=(1024 * px,\n 360 * px))\n", (2879, 2958), True, 'import matplotlib.pyplot as plt\n'), ((5000, 5018), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5016, 5018), True, 'import matplotlib.pyplot as plt\n'), ((914, 941), 'random.randint', 'random.randint', (['(0)', '(16777215)'], {}), '(0, 16777215)\n', (928, 941), False, 'import random\n'), ((1676, 1688), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1685, 1688), True, 'import numpy as np\n'), ((2610, 2623), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2619, 2623), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 22 11:53:09 2022
@author: Oliver
"""
import numpy as np
from matplotlib import pyplot as plt
import cv2 as cv
from PIL import Image
from PIL.ImageOps import grayscale
from .pattern_tools import patchmaker, align_pattern, microns_into_pattern
def histogram_patches(patches, bins=100, xlim=(140, 200), output=None):
"""
Display histogram of brightness values from all entries in a collection of images
patches : DICT
Values are numpy int arrays.
"""
brightness = np.array([])
for patch in patches.values():
brightness = np.concatenate((brightness,
patch.ravel()))
plt.hist(brightness, bins=bins)
plt.xlim(xlim)
if output is None:
plt.show()
else:
plt.savefig(output)
plt.clf()
def isolate_patches(picture, pattern_file, pattern_params, offsets,
exclude=[]):
pic = Image.open(picture)
pic = np.fliplr(np.flipud(np.array(grayscale(pic)).T))
pattern = align_pattern(pattern_file,
pattern_params['px_per_mm'] / 1e4,
pattern_params['theta'],
pattern_params['pattern_offset'])
patches = {}
for i, offset in enumerate(offsets):
if not i in exclude:
point, angle = microns_into_pattern(
offset, pattern, pattern_params['px_per_mm'] * 1e-3)
patch = patchmaker(pic,
height=pattern_params['spacing'],
width=pattern_params['pitch'],
center_y=int(point[0]),
center_x=int(point[1]),
angle=angle)
patches[i] = patch
return patches
def parse_patch(patch, threshold=170, min_size=6, return_image=False):
bw = (patch >= threshold).astype("uint8")
contours, hierarchy = cv.findContours(
bw, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
if not hierarchy is None:
# Remove inclusions, small features, and features on edges
top_hier = hierarchy.squeeze(
0)[:, -1] == -1 # must not have parent
good_size = np.array([cntr.shape[0]
for cntr in contours]) >= min_size
boxes = [cv.boundingRect(cntr) for cntr in contours] # x, y, w, h
no_touch = ~np.array([box_touches_edge(box, patch.shape)
for box in boxes])
allowed = top_hier * good_size * no_touch # AND
contours = [cnt for j, cnt in enumerate(contours) if allowed[j]]
# calculate properties
count = len(contours)
area = np.sum([cv.contourArea(cnt) for cnt in contours]) # pixels
if return_image:
return area, count, cv.drawContours(patch, contours, -1, 0, 2)
return area, count
def box_touches_edge(box, imshape):
x, y, w, h = box
hh, ww = imshape
return x <= 0 or y <= 0 or x+w >= ww or y+h >= hh
| [
"PIL.Image.open",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.savefig",
"cv2.drawContours",
"matplotlib.pyplot.clf",
"cv2.boundingRect",
"cv2.contourArea",
"PIL.ImageOps.grayscale",
"numpy.array",
"cv2.findContours",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show"
] | [((541, 553), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (549, 553), True, 'import numpy as np\n'), ((695, 726), 'matplotlib.pyplot.hist', 'plt.hist', (['brightness'], {'bins': 'bins'}), '(brightness, bins=bins)\n', (703, 726), True, 'from matplotlib import pyplot as plt\n'), ((731, 745), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (739, 745), True, 'from matplotlib import pyplot as plt\n'), ((830, 839), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (837, 839), True, 'from matplotlib import pyplot as plt\n'), ((953, 972), 'PIL.Image.open', 'Image.open', (['picture'], {}), '(picture)\n', (963, 972), False, 'from PIL import Image\n'), ((1972, 2027), 'cv2.findContours', 'cv.findContours', (['bw', 'cv.RETR_TREE', 'cv.CHAIN_APPROX_NONE'], {}), '(bw, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)\n', (1987, 2027), True, 'import cv2 as cv\n'), ((777, 787), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (785, 787), True, 'from matplotlib import pyplot as plt\n'), ((806, 825), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output'], {}), '(output)\n', (817, 825), True, 'from matplotlib import pyplot as plt\n'), ((2245, 2291), 'numpy.array', 'np.array', (['[cntr.shape[0] for cntr in contours]'], {}), '([cntr.shape[0] for cntr in contours])\n', (2253, 2291), True, 'import numpy as np\n'), ((2350, 2371), 'cv2.boundingRect', 'cv.boundingRect', (['cntr'], {}), '(cntr)\n', (2365, 2371), True, 'import cv2 as cv\n'), ((2724, 2743), 'cv2.contourArea', 'cv.contourArea', (['cnt'], {}), '(cnt)\n', (2738, 2743), True, 'import cv2 as cv\n'), ((2826, 2868), 'cv2.drawContours', 'cv.drawContours', (['patch', 'contours', '(-1)', '(0)', '(2)'], {}), '(patch, contours, -1, 0, 2)\n', (2841, 2868), True, 'import cv2 as cv\n'), ((1012, 1026), 'PIL.ImageOps.grayscale', 'grayscale', (['pic'], {}), '(pic)\n', (1021, 1026), False, 'from PIL.ImageOps import grayscale\n')] |
# Copyright 2019 The PlaNet Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
import numpy as np
import gym
from planet import control
from planet import networks
from planet import tools
Task = collections.namedtuple(
'Task', 'name, env_ctor, max_length, state_components')
def cartpole_balance(config, params):
action_repeat = params.get('action_repeat', 8)
max_length = 1000 // action_repeat # max_length = 1000 // 8 = 125.
state_components = ['reward', 'position', 'velocity']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'cartpole', 'balance')
return Task('cartpole_balance', env_ctor, max_length, state_components)
def cartpole_swingup(config, params):
action_repeat = params.get('action_repeat', 8)
max_length = 1000 // action_repeat
state_components = ['reward', 'position', 'velocity']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'cartpole', 'swingup')
return Task('cartpole_swingup', env_ctor, max_length, state_components)
def finger_spin(config, params):
action_repeat = params.get('action_repeat', 2)
max_length = 1000 // action_repeat
state_components = ['reward', 'position', 'velocity', 'touch']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'finger', 'spin')
return Task('finger_spin', env_ctor, max_length, state_components)
def cheetah_run(config, params):
action_repeat = params.get('action_repeat', 4)
max_length = 1000 // action_repeat
state_components = ['reward', 'position', 'velocity']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'cheetah', 'run')
return Task('cheetah_run', env_ctor, max_length, state_components)
def cup_catch(config, params):
action_repeat = params.get('action_repeat', 6)
max_length = 1000 // action_repeat
state_components = ['reward', 'position', 'velocity']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'ball_in_cup', 'catch')
return Task('cup_catch', env_ctor, max_length, state_components)
def walker_walk(config, params):
action_repeat = params.get('action_repeat', 2)
max_length = 1000 // action_repeat
state_components = ['reward', 'height', 'orientations', 'velocity']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'walker', 'walk')
return Task('walker_walk', env_ctor, max_length, state_components)
def humanoid_walk(config, params):
action_repeat = params.get('action_repeat', 2)
max_length = 1000 // action_repeat
state_components = [
'reward', 'com_velocity', 'extremities', 'head_height', 'joint_angles',
'torso_vertical', 'velocity']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'humanoid', 'walk')
return Task('humanoid_walk', env_ctor, max_length, state_components)
def _dm_control_env(action_repeat, max_length, domain, task):
from dm_control import suite
def env_ctor():
env = control.wrappers.DeepMindWrapper(suite.load(domain, task), (64, 64))
env = control.wrappers.ActionRepeat(env, action_repeat)
env = control.wrappers.LimitDuration(env, max_length)
env = control.wrappers.PixelObservations(env, (64, 64), np.uint8, 'image')
env = control.wrappers.ConvertTo32Bit(env)
return env
env = control.wrappers.ExternalProcess(env_ctor)
return env
# gym classic_control
#=============================================================
def pendulum(config, params):
action_repeat = params.get('action_repeat', 2)
max_length = 1000 // action_repeat
state_components = [
'reward', 'state']
env_ctor = functools.partial(
_dm_control_env_gym, action_repeat, max_length, 'Pendulum-v0')
return Task('pendulum', env_ctor, max_length, state_components)
class DeepMindWrapper_gym(object):
"""Wraps a Gym environment into an interface for downstream process"""
metadata = {'render.modes': ['rgb_array']}
reward_range = (-np.inf, np.inf)
def __init__(self, env, render_size=(64, 64), camera_id=0):
self._env = env
self._render_size = render_size
self._camera_id = camera_id
self.observation_space = gym.spaces.Dict({'state':self.observation_space})
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
obs, reward, done, info = self._env.step(action)
obs = {'state':obs}
return obs, reward, done, info
def reset(self):
return {'state':self._env.reset()}
def render(self, *args, **kwargs):
if kwargs.get('mode', 'rgb_array') != 'rgb_array':
raise ValueError("Only render mode 'rgb_array' is supported.")
del args # Unused
del kwargs # Unused
return self._env.render(mode='rgb_array',render_size=(100,100))[18:82,18:82] # pendulum.py is modified.
def _dm_control_env_gym(action_repeat, max_length, env_name):
import gym
def env_ctor():
env = gym.make(env_name) # 'Pendulum-v0'
env = env.env # 'remove TimeLimit wrapper
env = DeepMindWrapper_gym(env, (64, 64))
env = control.wrappers.ActionRepeat(env, action_repeat)
env = control.wrappers.LimitDuration(env, max_length)
env = control.wrappers.PixelObservations(env, (64, 64), np.uint8, 'image')
env = control.wrappers.ConvertTo32Bit(env)
return env
env = control.wrappers.ExternalProcess(env_ctor)
return env
# carla
#=============================================================
def carla(config, params):
action_repeat = params.get('action_repeat', 1)
print("+++++++++++++++++++++++++++++++++++++++++++++++++++")
max_length = 100 // action_repeat
state_components = [
'reward', 'state']
env_ctor = functools.partial(
_dm_control_env_carla, action_repeat, max_length, 'carla')
return Task('carla', env_ctor, max_length, state_components)
class DeepMindWrapper_carla(object):
"""Wraps a Gym environment into an interface for downstream process"""
metadata = {'render.modes': ['rgb_array']}
reward_range = (-np.inf, np.inf)
def __init__(self, env, render_size=(96, 96), camera_id=0):
self._env = env
self._render_size = render_size
self._camera_id = camera_id
self.observation_space = gym.spaces.Dict({'state':gym.spaces.Box(low=-1,high=1,shape=(1,))})
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
self.img, reward, done, info = self._env.step(action)
obs = {'state':np.array([0.0])}
return obs, reward, done, {}
def reset(self):
self.img = self._env.reset()
return {'state':np.array([0.0])}
def render(self, *args, **kwargs):
if kwargs.get('mode', 'rgb_array') != 'rgb_array':
raise ValueError("Only render mode 'rgb_array' is supported.")
del args # Unused
del kwargs # Unused
return self.img
def _dm_control_env_carla(action_repeat, max_length, env_name):
assert env_name == 'carla'
from planet.envs.carla.env import CarlaEnv
def env_ctor():
env = CarlaEnv()
env = DeepMindWrapper_carla(env, (96, 96))
env = control.wrappers.ActionRepeat(env, action_repeat)
env = control.wrappers.LimitDuration(env, max_length)
env = control.wrappers.PixelObservations(env, (96, 96), np.uint8, 'image')
env = control.wrappers.ConvertTo32Bit(env)
return env
env = control.wrappers.ExternalProcess(env_ctor)
return env
| [
"collections.namedtuple",
"planet.control.wrappers.ActionRepeat",
"planet.control.wrappers.LimitDuration",
"gym.spaces.Dict",
"dm_control.suite.load",
"gym.spaces.Box",
"numpy.array",
"planet.envs.carla.env.CarlaEnv",
"functools.partial",
"planet.control.wrappers.ConvertTo32Bit",
"planet.control... | [((884, 962), 'collections.namedtuple', 'collections.namedtuple', (['"""Task"""', '"""name, env_ctor, max_length, state_components"""'], {}), "('Task', 'name, env_ctor, max_length, state_components')\n", (906, 962), False, 'import collections\n'), ((1215, 1303), 'functools.partial', 'functools.partial', (['_dm_control_env', 'action_repeat', 'max_length', '"""cartpole"""', '"""balance"""'], {}), "(_dm_control_env, action_repeat, max_length, 'cartpole',\n 'balance')\n", (1232, 1303), False, 'import functools\n'), ((1576, 1664), 'functools.partial', 'functools.partial', (['_dm_control_env', 'action_repeat', 'max_length', '"""cartpole"""', '"""swingup"""'], {}), "(_dm_control_env, action_repeat, max_length, 'cartpole',\n 'swingup')\n", (1593, 1664), False, 'import functools\n'), ((1941, 2020), 'functools.partial', 'functools.partial', (['_dm_control_env', 'action_repeat', 'max_length', '"""finger"""', '"""spin"""'], {}), "(_dm_control_env, action_repeat, max_length, 'finger', 'spin')\n", (1958, 2020), False, 'import functools\n'), ((2287, 2366), 'functools.partial', 'functools.partial', (['_dm_control_env', 'action_repeat', 'max_length', '"""cheetah"""', '"""run"""'], {}), "(_dm_control_env, action_repeat, max_length, 'cheetah', 'run')\n", (2304, 2366), False, 'import functools\n'), ((2631, 2720), 'functools.partial', 'functools.partial', (['_dm_control_env', 'action_repeat', 'max_length', '"""ball_in_cup"""', '"""catch"""'], {}), "(_dm_control_env, action_repeat, max_length, 'ball_in_cup',\n 'catch')\n", (2648, 2720), False, 'import functools\n'), ((2995, 3074), 'functools.partial', 'functools.partial', (['_dm_control_env', 'action_repeat', 'max_length', '"""walker"""', '"""walk"""'], {}), "(_dm_control_env, action_repeat, max_length, 'walker', 'walk')\n", (3012, 3074), False, 'import functools\n'), ((3424, 3509), 'functools.partial', 'functools.partial', (['_dm_control_env', 'action_repeat', 'max_length', '"""humanoid"""', '"""walk"""'], {}), "(_dm_control_env, action_repeat, max_length, 'humanoid',\n 'walk')\n", (3441, 3509), False, 'import functools\n'), ((4043, 4085), 'planet.control.wrappers.ExternalProcess', 'control.wrappers.ExternalProcess', (['env_ctor'], {}), '(env_ctor)\n', (4075, 4085), False, 'from planet import control\n'), ((4364, 4449), 'functools.partial', 'functools.partial', (['_dm_control_env_gym', 'action_repeat', 'max_length', '"""Pendulum-v0"""'], {}), "(_dm_control_env_gym, action_repeat, max_length, 'Pendulum-v0'\n )\n", (4381, 4449), False, 'import functools\n'), ((6042, 6084), 'planet.control.wrappers.ExternalProcess', 'control.wrappers.ExternalProcess', (['env_ctor'], {}), '(env_ctor)\n', (6074, 6084), False, 'from planet import control\n'), ((6409, 6485), 'functools.partial', 'functools.partial', (['_dm_control_env_carla', 'action_repeat', 'max_length', '"""carla"""'], {}), "(_dm_control_env_carla, action_repeat, max_length, 'carla')\n", (6426, 6485), False, 'import functools\n'), ((8031, 8073), 'planet.control.wrappers.ExternalProcess', 'control.wrappers.ExternalProcess', (['env_ctor'], {}), '(env_ctor)\n', (8063, 8073), False, 'from planet import control\n'), ((3786, 3835), 'planet.control.wrappers.ActionRepeat', 'control.wrappers.ActionRepeat', (['env', 'action_repeat'], {}), '(env, action_repeat)\n', (3815, 3835), False, 'from planet import control\n'), ((3846, 3893), 'planet.control.wrappers.LimitDuration', 'control.wrappers.LimitDuration', (['env', 'max_length'], {}), '(env, max_length)\n', (3876, 3893), False, 'from planet import control\n'), ((3904, 3972), 'planet.control.wrappers.PixelObservations', 'control.wrappers.PixelObservations', (['env', '(64, 64)', 'np.uint8', '"""image"""'], {}), "(env, (64, 64), np.uint8, 'image')\n", (3938, 3972), False, 'from planet import control\n'), ((3983, 4019), 'planet.control.wrappers.ConvertTo32Bit', 'control.wrappers.ConvertTo32Bit', (['env'], {}), '(env)\n', (4014, 4019), False, 'from planet import control\n'), ((4889, 4939), 'gym.spaces.Dict', 'gym.spaces.Dict', (["{'state': self.observation_space}"], {}), "({'state': self.observation_space})\n", (4904, 4939), False, 'import gym\n'), ((5630, 5648), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (5638, 5648), False, 'import gym\n'), ((5785, 5834), 'planet.control.wrappers.ActionRepeat', 'control.wrappers.ActionRepeat', (['env', 'action_repeat'], {}), '(env, action_repeat)\n', (5814, 5834), False, 'from planet import control\n'), ((5845, 5892), 'planet.control.wrappers.LimitDuration', 'control.wrappers.LimitDuration', (['env', 'max_length'], {}), '(env, max_length)\n', (5875, 5892), False, 'from planet import control\n'), ((5903, 5971), 'planet.control.wrappers.PixelObservations', 'control.wrappers.PixelObservations', (['env', '(64, 64)', 'np.uint8', '"""image"""'], {}), "(env, (64, 64), np.uint8, 'image')\n", (5937, 5971), False, 'from planet import control\n'), ((5982, 6018), 'planet.control.wrappers.ConvertTo32Bit', 'control.wrappers.ConvertTo32Bit', (['env'], {}), '(env)\n', (6013, 6018), False, 'from planet import control\n'), ((7706, 7716), 'planet.envs.carla.env.CarlaEnv', 'CarlaEnv', ([], {}), '()\n', (7714, 7716), False, 'from planet.envs.carla.env import CarlaEnv\n'), ((7774, 7823), 'planet.control.wrappers.ActionRepeat', 'control.wrappers.ActionRepeat', (['env', 'action_repeat'], {}), '(env, action_repeat)\n', (7803, 7823), False, 'from planet import control\n'), ((7834, 7881), 'planet.control.wrappers.LimitDuration', 'control.wrappers.LimitDuration', (['env', 'max_length'], {}), '(env, max_length)\n', (7864, 7881), False, 'from planet import control\n'), ((7892, 7960), 'planet.control.wrappers.PixelObservations', 'control.wrappers.PixelObservations', (['env', '(96, 96)', 'np.uint8', '"""image"""'], {}), "(env, (96, 96), np.uint8, 'image')\n", (7926, 7960), False, 'from planet import control\n'), ((7971, 8007), 'planet.control.wrappers.ConvertTo32Bit', 'control.wrappers.ConvertTo32Bit', (['env'], {}), '(env)\n', (8002, 8007), False, 'from planet import control\n'), ((3740, 3764), 'dm_control.suite.load', 'suite.load', (['domain', 'task'], {}), '(domain, task)\n', (3750, 3764), False, 'from dm_control import suite\n'), ((7167, 7182), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (7175, 7182), True, 'import numpy as np\n'), ((7290, 7305), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (7298, 7305), True, 'import numpy as np\n'), ((6952, 6994), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(-1)', 'high': '(1)', 'shape': '(1,)'}), '(low=-1, high=1, shape=(1,))\n', (6966, 6994), False, 'import gym\n')] |
from collections import Counter
import numpy as np
import tensorflow as tf
from copy import copy
from constraint.dfa import DFA
class Constraint(object):
def __init__(self,
name,
dfa_string,
is_hard,
violation_reward=None,
translation_fn=lambda x: x,
inv_translation_fn=None):
self.name = name
self.dfa = DFA.from_string(dfa_string)
if is_hard:
assert inv_translation_fn is not None
else:
assert violation_reward is not None
self.violation_reward = violation_reward
self.translation_fn = translation_fn
self.is_hard = is_hard
self.inv_translation_fn = inv_translation_fn
def step(self, obs, action, done):
token = self.translation_fn(obs, action, done)
is_viol = self.dfa.step(token)
if is_viol and self.is_hard:
raise Exception('Hard violation')
rew_mod = self.violation_reward if is_viol else 0.
return is_viol, rew_mod
def reset(self):
self.dfa.reset()
@property
def current_state(self):
return self.dfa.current_state
@property
def num_states(self):
return len(self.dfa.states)
def is_violating(self, obs, action, done):
return self.dfa.step(self.translation_fn(obs, action, done),
hypothetical=True)
def violating_mask(self, num_actions):
mask = np.zeros(num_actions)
for v in self.dfa.violating_inputs:
for i in self.inv_translation_fn(v):
mask += np.eye(num_actions)[i]
return mask
class LinearConstraint(Constraint):
def __init__(self, name, dfa_string, violation_reward, translation_fn,
gamma):
super(LinearConstraint,
self).__init__(name, dfa_string, False, violation_reward,
translation_fn)
self.gamma = gamma
# counters for tracking value of each DFA state
self.prev_state = self.current_state
def step(self, obs, action, done):
is_viol, _ = super().step(obs, action, done)
# update reward
current_cost_val = self.dfa._steps_to_accept[
self.dfa.current_state] / self.dfa._max_steps_to_accept
prev_cost_val = self.dfa._steps_to_accept[
self.prev_state] / self.dfa._max_steps_to_accept
cost_mod = (self.gamma * current_cost_val -
prev_cost_val) * self.violation_reward
self.prev_state = self.current_state
return is_viol, cost_mod
class SoftDenseConstraint(Constraint):
def __init__(self,
name,
dfa_string,
violation_reward,
translation_fn,
gamma,
alpha=0.025,
target_time=40):
super(SoftDenseConstraint,
self).__init__(name, dfa_string, False, violation_reward,
translation_fn)
self.alpha = alpha
self.gamma = 1.05
self.target_time = target_time
# counters for tracking value of each DFA state
self.prev_state = self.current_state
self.current_step = 0
self.state_buffer = list()
self.expected_hitting_time = np.ones(self.num_states) * target_time
for accept_state in self.dfa.accepting_states:
self.expected_hitting_time[accept_state] = 0.
self.empirical_hitting_times = []
def step(self, obs, action, done):
is_viol, _ = super().step(obs, action, done)
# record state
self.state_buffer.append(self.current_state)
# update reward
current_cost_val = (1 / 2)**(
self.expected_hitting_time[self.current_state] / self.target_time)
prev_cost_val = (1 / 2)**(self.expected_hitting_time[self.prev_state] /
self.target_time)
if self.prev_state in self.dfa.accepting_states: prev_cost_val = 0.
rew_mod = (self.gamma * current_cost_val -
prev_cost_val) * self.violation_reward
# update hitting times
if is_viol:
self.state_buffer = np.array(self.state_buffer)
ep_expected_hitting_time = np.zeros(self.num_states)
for i in range(self.num_states):
ep_expected_hitting_time[i] = len(self.state_buffer) - np.mean(
np.argwhere(self.state_buffer == i)) - 1
self.empirical_hitting_times.append(ep_expected_hitting_time)
self.state_buffer = list()
self.prev_state = self.current_state
if done:
if len(self.empirical_hitting_times) == 0:
self.state_buffer = list()
self.empirical_hitting_times = list()
return is_viol, rew_mod
if len(self.empirical_hitting_times) == 1:
self.empirical_hitting_times = np.array(
self.empirical_hitting_times)
else:
self.empirical_hitting_times = np.stack(
self.empirical_hitting_times)
ep_hitting_times = [
np.mean([
v for v in self.empirical_hitting_times[:, i]
if not np.isnan(v)
]) for i in range(self.num_states)
]
for i in reversed(range(len(self.expected_hitting_time))):
if not np.isnan(ep_hitting_times[i]):
self.expected_hitting_time[i] = (
self.alpha * ep_hitting_times[i]) + (
(1 - self.alpha) * self.expected_hitting_time[i])
if i != len(self.expected_hitting_time):
self.expected_hitting_time[i] = max(
self.expected_hitting_time[i],
max(self.expected_hitting_time[i:]))
self.state_buffer = list()
self.empirical_hitting_times = list()
return is_viol, rew_mod
def reset(self):
self.dfa.reset()
self.prev_state = self.current_state
def float_counter_mul(f, c):
for (s, v) in dict(c).items():
c[s] = f * v
return c
| [
"constraint.dfa.DFA.from_string",
"numpy.eye",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.stack",
"numpy.isnan",
"numpy.argwhere"
] | [((431, 458), 'constraint.dfa.DFA.from_string', 'DFA.from_string', (['dfa_string'], {}), '(dfa_string)\n', (446, 458), False, 'from constraint.dfa import DFA\n'), ((1507, 1528), 'numpy.zeros', 'np.zeros', (['num_actions'], {}), '(num_actions)\n', (1515, 1528), True, 'import numpy as np\n'), ((3368, 3392), 'numpy.ones', 'np.ones', (['self.num_states'], {}), '(self.num_states)\n', (3375, 3392), True, 'import numpy as np\n'), ((4275, 4302), 'numpy.array', 'np.array', (['self.state_buffer'], {}), '(self.state_buffer)\n', (4283, 4302), True, 'import numpy as np\n'), ((4342, 4367), 'numpy.zeros', 'np.zeros', (['self.num_states'], {}), '(self.num_states)\n', (4350, 4367), True, 'import numpy as np\n'), ((5025, 5063), 'numpy.array', 'np.array', (['self.empirical_hitting_times'], {}), '(self.empirical_hitting_times)\n', (5033, 5063), True, 'import numpy as np\n'), ((5150, 5188), 'numpy.stack', 'np.stack', (['self.empirical_hitting_times'], {}), '(self.empirical_hitting_times)\n', (5158, 5188), True, 'import numpy as np\n'), ((1646, 1665), 'numpy.eye', 'np.eye', (['num_actions'], {}), '(num_actions)\n', (1652, 1665), True, 'import numpy as np\n'), ((5534, 5563), 'numpy.isnan', 'np.isnan', (['ep_hitting_times[i]'], {}), '(ep_hitting_times[i])\n', (5542, 5563), True, 'import numpy as np\n'), ((4513, 4548), 'numpy.argwhere', 'np.argwhere', (['(self.state_buffer == i)'], {}), '(self.state_buffer == i)\n', (4524, 4548), True, 'import numpy as np\n'), ((5363, 5374), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (5371, 5374), True, 'import numpy as np\n')] |
import os
import zipfile
import csv
import pandas as pd
import requests
import json
from itertools import islice
import sklearn.preprocessing
from lightfm.data import Dataset
import pandas
import numpy as np
from lightfm import LightFM
# restaurant_metadata = pd.read_json('rating_final.json', lines=True)
from scipy import sparse
from lightfm.evaluation import auc_score
# ************************************************************************************
def create_interaction_matrix(df, user_col, item_col, rating_col, norm=False, threshold=None):
'''
Function to create an interaction matrix dataframe from transactional type interactions
Required Input -
- df = Pandas DataFrame containing user-item interactions
- user_col = column name containing user's identifier
- item_col = column name containing item's identifier
- rating col = column name containing user feedback on interaction with a given item
- norm (optional) = True if a normalization of ratings is needed
- threshold (required if norm = True) = value above which the rating is favorable
Expected output -
- Pandas dataframe with user-item interactions ready to be fed in a recommendation algorithm
'''
interactions = df.groupby([user_col, item_col])[rating_col] \
.sum().unstack().reset_index(). \
fillna(0).set_index(user_col)
if norm:
interactions = interactions.applymap(lambda x: 1 if x > threshold else 0)
return interactions
# ****************************************************************************************
def create_item_dict(df, id_col, name_col):
"""
Function to create an item dictionary based on their item_id and item name
Required Input -
- df = Pandas dataframe with Item information
- id_col = Column name containing unique identifier for an item
- name_col = Column name containing name of the item
Expected Output -
item_dict = Dictionary type output containing item_id as key and item_name as value
"""
item_dict = {}
for i in range(df.shape[0]):
item_dict[(df.loc[i, id_col])] = df.loc[i, name_col]
return item_dict
# *******************************************************************************
def create_user_dict(interactions):
'''
Function to create a user dictionary based on their index and number in interaction dataset
Required Input -
interactions - dataset create by create_interaction_matrix
Expected Output -
user_dict - Dictionary type output containing interaction_index as key and user_id as value
'''
user_id = list(interactions.index)
user_dict = {}
counter = 0
for i in user_id:
user_dict[i] = counter
counter += 1
return user_dict
# **************************************************************************************
def sample_recommendation_user(model, interactions, user_id, user_dict,
item_dict, threshold=0, nrec_items=10, show=True):
'''
Function to produce user recommendations
Required Input -
- model = Trained matrix factorization model
- interactions = dataset used for training the model
- user_id = user ID for which we need to generate recommendation
- user_dict = Dictionary type input containing interaction_index as key and user_id as value
- item_dict = Dictionary type input containing item_id as key and item_name as value
- threshold = value above which the rating is favorable in new interaction matrix
- nrec_items = Number of output recommendation needed
Expected Output -
- Prints list of items the given user has already bought
- Prints list of N recommended items which user hopefully will be interested in
'''
n_users, n_items = interactions.shape
user_x = user_dict[user_id]
scores = pd.Series(model.predict(user_x, np.arange(n_items)))
scores.index = interactions.columns
scores = list(pd.Series(scores.sort_values(ascending=False).index))
known_items = list(pd.Series(interactions.loc[user_id, :] \
[interactions.loc[user_id, :] > threshold].index) \
.sort_values(ascending=False))
scores = [x for x in scores if x not in known_items]
return_score_list = scores[0:nrec_items]
known_items = list(pd.Series(known_items).apply(lambda x: item_dict[x]))
scores = list(pd.Series(return_score_list).apply(lambda x: item_dict[x]))
if show == True:
print("Known Likes:")
counter = 1
for i in known_items:
print(str(counter) + '- ' + i)
counter += 1
print("\n Recommended Items:")
counter = 1
for i in scores:
print(str(counter) + '- ' + i)
counter += 1
return return_score_list
f = open('rating_final_U1011.json', )
ff = open('userprofile.json', )
df = open(r'geoplaces2.json')
data_User = json.load(ff)
data_item = json.load(df)
data = json.load(f)
dataset = Dataset()
dataset.fit((x['userID'] for x in data),
(x['placeID'] for x in data), (x['budget'] for x in data_User),(x['price'] for x in data_item))
(interactions, weights) = dataset.build_interactions(((x['userID'], x['placeID']) for x in data))
print(repr(interactions))
user_interactions = dataset.build_user_features((x['userID'], [x['budget']]) for x in data_User)
print(repr(user_interactions))
item_interactions = dataset.build_item_features((x['placeID'], [x['price']]) for x in data_item)
print(repr(item_interactions))
NUM_THREADS = 2
NUM_COMPONENTS = 30
NUM_EPOCHS = 3
ITEM_ALPHA = 1e-6
model = LightFM(loss='warp',
item_alpha=ITEM_ALPHA,
no_components=NUM_COMPONENTS)
model = model.fit(interactions,user_features=user_interactions,item_features=item_interactions,epochs=NUM_EPOCHS,num_threads=NUM_THREADS)
df = pd.read_json(r'geoplaces2.json')
dfm = pd.read_json(r'rating_final_U1011.json')
interactions1 = create_interaction_matrix(dfm, "userID", "placeID", "rating", norm=False, threshold=None)
item_dict = create_item_dict(df, "placeID", "name")
user_dict = create_user_dict(interactions1)
sample_recommendation_user(model, interactions1, "U1011", user_dict,
item_dict, threshold=0, nrec_items=5, show=True)
f.close()
| [
"pandas.Series",
"lightfm.data.Dataset",
"lightfm.LightFM",
"json.load",
"pandas.read_json",
"numpy.arange"
] | [((5044, 5057), 'json.load', 'json.load', (['ff'], {}), '(ff)\n', (5053, 5057), False, 'import json\n'), ((5070, 5083), 'json.load', 'json.load', (['df'], {}), '(df)\n', (5079, 5083), False, 'import json\n'), ((5091, 5103), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5100, 5103), False, 'import json\n'), ((5114, 5123), 'lightfm.data.Dataset', 'Dataset', ([], {}), '()\n', (5121, 5123), False, 'from lightfm.data import Dataset\n'), ((5732, 5805), 'lightfm.LightFM', 'LightFM', ([], {'loss': '"""warp"""', 'item_alpha': 'ITEM_ALPHA', 'no_components': 'NUM_COMPONENTS'}), "(loss='warp', item_alpha=ITEM_ALPHA, no_components=NUM_COMPONENTS)\n", (5739, 5805), False, 'from lightfm import LightFM\n'), ((5983, 6014), 'pandas.read_json', 'pd.read_json', (['"""geoplaces2.json"""'], {}), "('geoplaces2.json')\n", (5995, 6014), True, 'import pandas as pd\n'), ((6022, 6061), 'pandas.read_json', 'pd.read_json', (['"""rating_final_U1011.json"""'], {}), "('rating_final_U1011.json')\n", (6034, 6061), True, 'import pandas as pd\n'), ((3980, 3998), 'numpy.arange', 'np.arange', (['n_items'], {}), '(n_items)\n', (3989, 3998), True, 'import numpy as np\n'), ((4137, 4228), 'pandas.Series', 'pd.Series', (['interactions.loc[user_id, :][interactions.loc[user_id, :] > threshold].index'], {}), '(interactions.loc[user_id, :][interactions.loc[user_id, :] >\n threshold].index)\n', (4146, 4228), True, 'import pandas as pd\n'), ((4447, 4469), 'pandas.Series', 'pd.Series', (['known_items'], {}), '(known_items)\n', (4456, 4469), True, 'import pandas as pd\n'), ((4519, 4547), 'pandas.Series', 'pd.Series', (['return_score_list'], {}), '(return_score_list)\n', (4528, 4547), True, 'import pandas as pd\n')] |
# coding: utf-8
""" Some photometry tools for stellar spectroscopists """
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import numpy as np
from scipy import interpolate
from astropy.io import ascii
from .robust_polyfit import polyfit
import logging
import os, sys, time
logger = logging.getLogger(__name__)
__all__ = []
from .read_data import datapath
from .read_data import load_parsec_isochrones, load_dartmouth_isochrones
def eval_BC(Teff,logg,FeH,filt="g",allBCs=None):
"""
Default is alpha/Fe = +0.4
"""
if allBCs is None: allBCs = read_bc_table()
BCs = allBCs[filt]
points = np.atleast_2d([np.ravel(Teff),np.ravel(logg),np.ravel(FeH)]).T
points[points[:,2] < -2.5,2] = -2.5
out = interpolate.griddata(BCs[:,0:3], BCs[:,3], points, method='linear')
return out
def read_bc_table(fname=datapath+"/bolometric_corrections/bc_p04_ugriz.data"):
"""
Load a Casagrande+Vandenberg 2014 BC table
"""
with open(fname,'r') as fp:
lines = fp.readlines()
s = lines[1].split()
NTeff, Nlogg, NMH, Nfilt = int(s[0]), int(s[2]), int(s[5]), int(s[7])
allBCs = {}
Teffs = list(map(float, "".join(lines[2:5]).replace("\n"," ").split()))
loggs = list(map(float, lines[5].split()))
Nlist = list(map(int, lines[6].split()))
iline = 7
allBCs = {}
for ifilt in range(Nfilt):
BCtable = np.zeros((np.sum(Nlist)*NMH,4))
itable = 0
for iMH in range(NMH):
s = lines[iline].split()
FeH = float(s[2]); aFe = float(s[5]); filter = s[9]
iline += 1
for ilogg,logg in enumerate(loggs):
BCrow = []
while len(BCrow) < Nlist[ilogg]:
line = lines[iline]
iline += 1
BCrow += list(map(float, line.split()))
for iTeff,Teff in enumerate(Teffs[0:Nlist[ilogg]]):
BCtable[itable,0] = Teff
BCtable[itable,1] = logg
BCtable[itable,2] = FeH
BCtable[itable,3] = BCrow[iTeff]
itable += 1
allBCs[filter] = BCtable
return allBCs
##################################################################
# From Drlica-Wagner et al. 2018 (https://arxiv.org/abs/1708.01531)
# g_{des} = g_{sdss} - 0.104 \times (g-r)_{sdss} + 0.01
# r_{des} = r_{sdss} - 0.102 \times (g-r)_{sdss} + 0.02
# i_{des} = i_{sdss} - 0.256 \times (i-z)_{sdss} + 0.02
# z_{des} = z_{sdss} - 0.086 \times (i-z)_{sdss} + 0.01
##################################################################
def gr_sdss2des(gsdss,rsdss):
gmrsdss = gsdss - rsdss
gdes = gsdss - 0.104 * gmrsdss + 0.01
rdes = rsdss - 0.102 * gmrsdss + 0.02
return gdes, rdes
def iz_sdss2des(isdss,zsdss):
imzsdss = isdss - zsdss
ides = isdss - 0.256 * imzsdss + 0.02
zdes = zsdss - 0.086 * imzsdss + 0.01
return ides, zdes
def gr_des2sdss(gdes,rdes):
gmrdes = gdes-rdes
gmrsdss = (gmrdes + 0.01)/0.998
gsdss = gdes + 0.104 * gmrsdss - 0.01
rsdss = rdes + 0.102 * gmrsdss - 0.02
return gsdss, rsdss
def iz_des2sdss(ides,zdes):
imzdes = ides-zdes
imzsdss = (imzdes - 0.01)/0.830
isdss = ides + 0.256 * imzsdss - 0.02
zsdss = zdes + 0.086 * imzsdss - 0.01
return isdss, zsdss
def griz_des2sdss(gdes,rdes,ides,zdes):
gsdss, rsdss = gr_des2sdss(gdes,rdes)
isdss, zsdss = iz_des2sdss(ides,zdes)
return gsdss, rsdss, isdss, zsdss
### Setup Jordi06
# http://www.sdss3.org/dr8/algorithms/sdssUBVRITransform.php#Jordi2006
def get_jordi06_coeffs(type):
if type==0: # Combined Pop I/Pop II
a_Bmg = 0.313; e_a_Bmg = 0.003
b_Bmg = 0.219; e_b_Bmg = 0.002
a_Vmg =-0.565; e_a_Vmg = 0.001
b_Vmg =-0.016; e_b_Vmg = 0.001
elif type==1: # Pop I
a_Bmg = 0.312; e_a_Bmg = 0.003
b_Bmg = 0.219; e_b_Bmg = 0.002
a_Vmg =-0.573; e_a_Vmg = 0.002
b_Vmg =-0.016; e_b_Vmg = 0.002
elif type==2: # Pop II
a_Bmg = 0.349; e_a_Bmg = 0.009
b_Bmg = 0.245; e_b_Bmg = 0.006
a_Vmg =-0.569; e_a_Vmg = 0.007
b_Vmg = 0.021; e_b_Vmg = 0.004
else:
raise ValueError("Type must be 0, 1, 2 (got {})".format(type))
return a_Bmg, b_Bmg, a_Vmg, b_Vmg, e_a_Bmg, e_b_Bmg, e_a_Vmg, e_b_Vmg
def jordi06_gmi_to_VmI(gmi,geterr=True):
assert np.all(np.logical_or(np.ravel(gmi) < 2.1, np.isnan(np.ravel(gmi))))
VmI = 0.674 * gmi + 0.406
if geterr:
VmImin = (0.674-0.005)*gmi + (0.406 - 0.004)
VmImax = (0.674+0.005)*gmi + (0.406 + 0.004)
return VmImin, VmI, VmImax
return VmI
def _gmr_to_BmV(gmr,geterr=True,type=0):
a_Bmg, b_Bmg, a_Vmg, b_Vmg, e_a_Bmg, e_b_Bmg, e_a_Vmg, e_b_Vmg = get_jordi06_coeffs(type)
# Calculate middle
Bmg = a_Bmg*gmr + b_Bmg
Vmg = a_Vmg*gmr + b_Vmg
BmV = Bmg - Vmg
if not geterr: return BmV
# Calculate 1 sigma error estimate
if gmr >= 0:
Bmg_max = (a_Bmg+e_a_Bmg)*gmr+(b_Bmg+e_b_Bmg)
Bmg_min = (a_Bmg-e_a_Bmg)*gmr+(b_Bmg-e_b_Bmg)
Vmg_max = (a_Vmg+e_a_Vmg)*gmr+(b_Vmg+e_b_Vmg)
Vmg_min = (a_Vmg-e_a_Vmg)*gmr+(b_Vmg-e_b_Vmg)
else:
Bmg_max = (a_Bmg-e_a_Bmg)*gmr+(b_Bmg+e_b_Bmg)
Bmg_min = (a_Bmg+e_a_Bmg)*gmr+(b_Bmg-e_b_Bmg)
Vmg_max = (a_Vmg-e_a_Vmg)*gmr+(b_Vmg+e_b_Vmg)
Vmg_min = (a_Vmg+e_a_Vmg)*gmr+(b_Vmg-e_b_Vmg)
BmV_max = Bmg_max-Vmg_min
BmV_min = Bmg_min-Vmg_max
return BmV_min,BmV,BmV_max
jordi06_gmr_to_BmV = np.vectorize(_gmr_to_BmV)
###################################################################
# From Casagrande et al. 2010, applicable to dwarfs and subgiants #
###################################################################
def C10_Teff_BmV(BmV, FeH):
""" 73K scatter """
a0, a1, a2, a3, a4, a5 = .5665, .4809, -.0060, -.0613, -.0042, -.0055
theta = a0 + a1*BmV + a2*BmV*BmV + a3*BmV*FeH + a4*FeH + a5*FeH*FeH
Teff = 5040./theta
return Teff
def C10_Teff_VmI(VmI, FeH):
""" 59K scatter """
a0, a1, a2, a3, a4, a5 = .4033, .8171, -.1987, -.0409, .0319, .0012
theta = a0 + a1*VmI + a2*VmI*VmI + a3*VmI*FeH + a4*FeH + a5*FeH*FeH
Teff = 5040./theta
return Teff
##################################
# From Alonso et al. 1999: F0-K5 #
##################################
def A99_BC_V(Teff, FeH):
"""
Typical scatter is 0.025 for cool stars, 0.009 for warm stars (dividing at T=4500K)
Limits of applicability are 3.5 < logT < 3.96, though different for different [Fe/H] ranges
"""
X = np.ravel(np.log10(Teff) - 3.52); FeH = np.ravel(FeH)
# Equations 17 and 18
BC17 = -5.531e-2/X - 0.6177 + 4.420*X - 2.669*X**2. + 0.6943*X*FeH - 0.1071*FeH - 8.612e-3*FeH**2.
BC18 = -9.930e-2/X + 2.887e-2 + 2.275*X - 4.425*X**2. + 0.3505*X*FeH - 5.558e-2*FeH - 5.375e-3*FeH**2
BC = BC17.copy()
ii = np.log10(Teff) >= 3.65
BC[ii] = BC18[ii]
return BC
def B79_VmI_C2J(VmI):
""" Convert V-I in Cousins' mags to V-I in Johnson's mags from Bessell 1979 """
VmI = np.ravel(VmI)
out = VmI.copy()/0.778
out[VmI < 0] = VmI[VmI < 0]/0.713
ii = out > 2.0
out[ii] = (VmI[ii]+0.13)/0.835
return out
def A99_Teff_VmI(VmI):
"""
Johnson's V, Johnson's (NOT Cousins') I
125K scatter, no dependence on Fe/H.
I have assumed that VmI is given in Johnson-Cousins, and
"""
VmI = B79_VmI_C2J(VmI)
theta = 0.5379 + 0.3981 * VmI + 4.432e-2 * VmI**2 - 2.693e-2 * VmI**3
Teff = 5040./theta
return Teff
def _A99_function(X, FeH, a0, a1, a2, a3, a4, a5):
return a0 + a1*X + a2*X**2. + a3*X*FeH + a4*FeH + a5*FeH**2.
def _A99_Teff_BmV_3(BmV, FeH):
""" 167K scatter, B-V < 0.7 """
a0, a1, a2, a3, a4, a5 = 0.5716, 0.5404, -6.126e-2, -4.862e-2, -1.777e-2, -7.969e-3
return _A99_function(BmV, FeH, a0, a1, a2, a3, a4, a5)
def _A99_Teff_BmV_4(BmV, FeH):
""" 96K scatter, B-V > 0.8 """
a0, a1, a2, a3, a4, a5 = 0.6177, 0.4354, -4.025e-3, 5.204e-2, -0.1127, -1.385e-2
return _A99_function(BmV, FeH, a0, a1, a2, a3, a4, a5)
def A99_Teff_BmV(BmV, FeH):
"""
Johnson's B and V
Using equations 3 and 4 of A99, scatter is 167K
Linearly interpolating in theta = 5040/Teff for 0.7 < B-V < 0.8
"""
BmV = np.ravel(BmV); FeH = np.ravel(FeH)
t3 = _A99_Teff_BmV_3(BmV, FeH)
t4 = _A99_Teff_BmV_4(BmV, FeH)
# Bluest stars, Eq 3
t = t3.copy()
# Reddest stars, Eq 4
t[BmV > 0.8] = t4[BmV > 0.8]
# In between: 0.7 < B-V < 0.8, linear interpolate
ii = np.logical_and(BmV > 0.7, BmV <= 0.8)
x1, x2 = 0.7, 0.8
y1 = _A99_Teff_BmV_3(x1, FeH)
y2 = _A99_Teff_BmV_4(x2, FeH)
m = (y2 - y1)/(x2 - x1)
y = m * (BmV - x1) + y1
t[ii] = y[ii]
return 5040./t
def phot_logg(Teff,mag0,BCmag,distmod,Mstar=0.75):
"""
Using solar values from Venn et al. 2017
"""
return 4.44 + np.log10(Mstar) + 4*np.log10(Teff/5780) + 0.4 * (mag0 - distmod + BCmag - 4.75)
def iterate_find_logg(Teff,mag0,FeH,dmod,filt,maxiter=10,tol=.005):
""" Assumes [alpha/Fe] = +0.4, sdss mags for filt """
# Initialize BC and logg
BC = 0.0
logg = phot_logg(Teff,mag0,BC,dmod)
for iter in range(maxiter):
BC = eval_BC(Teff, logg, FeH, filt=filt)
new_logg = phot_logg(Teff,mag0,BC,dmod)
if np.all(np.abs(new_logg - logg) < tol):
break
logg = new_logg
else:
print("WARNING: Reached max iters")
return logg
def phot_logg_error(Tfracerr, dmoderr, masserr=0.05, magerr=0.0, BCerr=0.03):
"""
Estimate 1 sigma error in logg
Tfracerr: temperature error divided by temperature
dmoderr: distance modulus error in mag
masserr (0.05 mag): from assuming a mass, 0.05 is 0.7-0.8 Msun
magerr: assume this is negligible by default
BCerr: estimated about 0.03 mag from running CV14 several times
"""
Terr_mag = 4*Tfracerr # from a taylor expansion
magerr = 0.4*magerr
BCerr = 0.4*BCerr
dmoderr = 0.4*dmoderr
return np.sqrt(masserr**2 + Terr_mag**2 + magerr**2 + dmoderr**2 + BCerr**2)
###################
## Y2 isochrones ##
###################
def get_logT_to_logg(FeH=-3.0):
assert FeH in [-2.0, -2.5, -3.0]
if FeH == -2.0:
iso = ascii.read(datapath+'/stellar_param_data/afe040feh200set1_12gyr.txt')
elif FeH == -2.5:
iso = ascii.read(datapath+'/stellar_param_data/afe040feh250set1_12gyr.txt')
elif FeH == -3.0:
iso = ascii.read(datapath+'/stellar_param_data/afe040feh300set1_12gyr.txt')
ii_max_logT = np.argmax(iso['logT'])
max_logT = iso[ii_max_logT]['logT']
max_logg = iso[ii_max_logT]['logg']
#print max_logT, max_logg
ii = iso['logg'] < max_logg
logT = iso[ii]['logT']
logg = iso[ii]['logg']
logT_to_logg = interpolate.interp1d(logT,logg)
return logT_to_logg
_my_interps = [get_logT_to_logg(FeH) for FeH in [-2.0,-2.5,-3.0]]
def _logTFeH_to_logg(logT,FeH):
if FeH > -2.0: return _my_interps[0](logT)
elif FeH <= -3.0: return _my_interps[2](logT)
elif FeH <= -2.0 and FeH > -2.5:
x = (FeH+2.5)*2.0
assert x <= 1 and x >= 0
logg1 = _my_interps[0](logT)
logg2 = _my_interps[1](logT)
return logg1 * x + logg2 * (1-x)
elif FeH <= -2.5 and FeH > -3.5:
x = (FeH+3.0)*2.0
assert x <= 1 and x >= 0
logg1 = _my_interps[1](logT)
logg2 = _my_interps[2](logT)
return logg1 * x + logg2 * (1-x)
else:
raise ValueError("FeH = {}".format(FeH))
logTFeH_to_logg = np.vectorize(_logTFeH_to_logg)
###############################
## Microturbulence Relations ##
###############################
def get_logg_to_vt_B05():
b = ascii.read(datapath+'/stellar_param_data/barklem.txt')
## This fails in the newer version of scipy
#iisort = np.argsort(b['logg'])
#fit = interpolate.UnivariateSpline(b['logg'][iisort],b['Vt'][iisort],k=2)
coeff, sigma = polyfit(b['logg'],b['Vt'],2)
fit = lambda x: np.polyval(coeff, x)
return fit
def logg_to_vt_B05(logg):
fit = get_logg_to_vt_B05()
return fit(logg)
def logg_to_vt_K09(logg):
""" Kirby et al. 2009 ApJ 705, 328 (uncertainty is ~ 0.05 + 0.03*logg) """
return 2.13 - 0.23 * logg
def logg_to_vt_M08(logg):
""" Marino et al. 2008 A&A 490, 625 (from Gratton et al. 1996) """
return 2.22 - 0.322 * logg
#################
## Dereddening ##
#################
def deredden(EBV,filt):
""" Subtract this value from the observed magnitude to get the dereddened mags """
conversion_data = ascii.read(datapath+"/stellar_param_data/sf11.txt")
assert filt in conversion_data["filter"], (filt, conversion_data["filter"])
return EBV * float(conversion_data["AB_EBV"][np.where(conversion_data["filter"]==filt)[0]])
"""
Notes about filter conversions and definitions.
Johnson-Cousins system: UBV in Johnson, RI in Cousins. I think this is the same as the Landolt system.
Jordi+2006: converts from SDSS (as observed at APO, with primes???) to UBV(RI)c.
Alonso+1999: converts JOHNSON'S ONLY colors to Teff. So RI need to go to (RI)c if you use V-I.
Casagrande+2010: converts Johnson-Cousins to Teff
So the most consistent thing for DES mags is to go from griz_DES -> griz_SDSS -> UBV(RI)c -> Casagrande+2010
Note Casagrande+2010 is not calibrated to very red giants (<4500K).
For E(B-V)=0.02, I found the order in which you deredden makes <1 mmag difference in the final color.
"""
def determine_stellar_params(gmag,rmag,imag,zmag,
MH,dmod,
EBV=0,EBVerr=0.0,dmoderr=0.1,
gerr=0.02,rerr=0.02,ierr=0.02,zerr=0.02,
verbose=True,fp=sys.stdout,
Teff_color="VmI", Teff_calib="C10",
logg_mag="r", full_output=False):
"""
[g,r,i,z]mag: DES magnitudes
MH: input metallicity
dmod: distance modulus
[g,r,i,z]err: magnitude error
default 0.02 mag in each band, the absolute calibration uncertainty (ADW+2017 arxiv:1708.01531)
(The internal calibration uncertainty is <4mmag)
Effective temperature error includes:
[g/r/i]err, EBVerr, Jordi06 err
"""
assert Teff_color in ["BmV","VmI"], Teff_color
assert Teff_calib in ["C10","A99"], Teff_calib
assert logg_mag in ["g","r","i"], logg_mag
out = griz_des2sdss(gmag,rmag,imag,zmag)
g,r,i,z = out
if verbose:
fp.write("g-r={:.2f}->{:.2f}\n".format(gmag-rmag,g-r))
fp.write("g-i={:.2f}->{:.2f}\n".format(gmag-imag,g-i))
logg_mag_dict = {"g":g,"r":r,"i":i}
logg_magerr_dict = {"g":gerr,"r":rerr,"i":ierr}
## Determine Effective Temperature and Error
## Output: Teff, Teff_err, color, color_err
if Teff_color=="BmV":
BmV1, BmV, BmV2 = jordi06_gmr_to_BmV(g-r, geterr=True)
BmVerr = max(abs(BmV2-BmV), abs(BmV-BmV1))
BmVerr = np.sqrt(BmVerr**2. + gerr**2 + rerr**2 + EBVerr**2)
BmV = BmV + EBV
if Teff_calib=="C10":
Teff = C10_Teff_BmV(BmV, MH)
Teff1 = C10_Teff_BmV(BmV-BmVerr, MH)
Teff2 = C10_Teff_BmV(BmV+BmVerr, MH)
Teff_syserr = 73.
elif Teff_calib=="A99":
Teff = A99_Teff_BmV(BmV, MH)
Teff1 = A99_Teff_BmV(BmV-BmVerr, MH)
Teff2 = A99_Teff_BmV(BmV+BmVerr, MH)
Teff_syserr = 167.
color_err = BmVerr
color = BmV
elif Teff_color=="VmI":
EVI = deredden(EBV, "LandoltV") - deredden(EBV, "LandoltI")
VmI1, VmI, VmI2 = jordi06_gmi_to_VmI(g-i, geterr=True)
VmIerr = max(abs(VmI2 - VmI), abs(VmI - VmI1))
VmIerr = np.sqrt(VmIerr**2 + gerr**2 + ierr**2 + EBVerr**2)
VmI = VmI + EVI
if Teff_calib=="C10":
Teff = C10_Teff_VmI(VmI, MH)
Teff1 = C10_Teff_VmI(VmI-VmIerr, MH)
Teff2 = C10_Teff_VmI(VmI+VmIerr, MH)
Teff_syserr = 59.
elif Teff_calib=="A99":
Teff = A99_Teff_VmI(VmI)
Teff1 = A99_Teff_VmI(VmI-VmIerr)
Teff2 = A99_Teff_VmI(VmI+VmIerr)
Teff_syserr = 125.
color_err = VmIerr
color = VmI
if verbose: fp.write("{}={:.2f}±{:.2f}\n".format(Teff_color, color, color_err))
Teff_err = max(abs(Teff-Teff1), abs(Teff-Teff2))
if verbose: fp.write("Teff={:.0f} ± {:.0f} (stat) ± {:.0f} (sys)\n".format(Teff,Teff_err,Teff_syserr))
Teff_err = np.sqrt(Teff_err**2 + Teff_syserr**2)
logg = iterate_find_logg(Teff, logg_mag_dict[logg_mag], MH, dmod, logg_mag)
try:
logg = logg[0]
except:
pass
logg_err = phot_logg_error(Teff_err/Teff, dmoderr, magerr=logg_magerr_dict[logg_mag])
if verbose: fp.write("logg ({})={:.2f} ± {:.2f} (stat)\n".format(logg_mag, logg, logg_err))
vt_syserr = 0.13 # from scatter around B05 relation
vt = logg_to_vt_B05(logg)
vt1 = logg_to_vt_B05(logg-logg_err)
vt2 = logg_to_vt_B05(logg+logg_err)
vt_err = max(abs(vt-vt1),abs(vt-vt2))
if verbose: fp.write("vt={:.2f} ± {:.2f} (stat) ± {:.2f} (sys)\n".format(vt, vt_err, vt_syserr))
vt_err = np.sqrt(vt_syserr**2 + vt_err**2)
if full_output:
return Teff, Teff_err, logg, logg_err, vt, vt_err, color, color_err, g, r, i, z
return Teff, Teff_err, logg, logg_err, vt, vt_err
def parsec_des_stellar_params(dmod=0):
""" Uses label=2 and 3 (subgiant/RGB) to create gmag, rmag->Teff,logg """
isos = load_parsec_isochrones("DECAM")
g_Teff_funcs = {}
g_logg_funcs = {}
r_Teff_funcs = {}
r_logg_funcs = {}
gmr_Teff_funcs = {}
gmr_logg_funcs = {}
interp_kwargs = {"bounds_error":False,"fill_value":np.nan}
for key in isos.keys():
tab = isos[key]
tab = tab[(tab["label"]==2) | (tab["label"]==3)]
gmag, rmag = tab["gmag"], tab["rmag"]
logT, logg = tab["logTe"], tab["logg"]
Teff = 10**logT
g_Teff_funcs[key] = interpolate.interp1d(gmag+dmod,Teff,**interp_kwargs)
g_logg_funcs[key] = interpolate.interp1d(gmag+dmod,logg,**interp_kwargs)
r_Teff_funcs[key] = interpolate.interp1d(rmag+dmod,Teff,**interp_kwargs)
r_logg_funcs[key] = interpolate.interp1d(rmag+dmod,logg,**interp_kwargs)
gmr_Teff_funcs[key] = interpolate.interp1d(gmag-rmag,Teff,**interp_kwargs)
gmr_logg_funcs[key] = interpolate.interp1d(gmag-rmag,logg,**interp_kwargs)
return g_Teff_funcs, g_logg_funcs, r_Teff_funcs, r_logg_funcs, gmr_Teff_funcs, gmr_logg_funcs
def dartmouth_des_stellar_params(dmod=0,ages=[10.0,11.0,12.0,13.0,14.0],logZs=[-2.5,-2.0,-1.5],alpha="ap4"):
""" Uses label=2 and 3 (subgiant/RGB) to create gmag, rmag->Teff,logg """
isos = {}
for MH in [-2.5,-2.0,-1.5]:
_isos = load_dartmouth_isochrones(MH,alpha,"DECAM")
for key in _isos.keys():
tab = _isos[key]
tab = tab[tab["EEP"] > 111]
isos[key] = tab
g_Teff_funcs = {}
g_logg_funcs = {}
r_Teff_funcs = {}
r_logg_funcs = {}
gmr_Teff_funcs = {}
gmr_logg_funcs = {}
interp_kwargs = {"bounds_error":False,"fill_value":np.nan}
for key in isos.keys():
tab = isos[key]
gmag, rmag = tab["gmag"], tab["rmag"]
logT, logg = tab["logTe"], tab["logg"]
Teff = 10**logT
g_Teff_funcs[key] = interpolate.interp1d(gmag+dmod,Teff,**interp_kwargs)
g_logg_funcs[key] = interpolate.interp1d(gmag+dmod,logg,**interp_kwargs)
r_Teff_funcs[key] = interpolate.interp1d(rmag+dmod,Teff,**interp_kwargs)
r_logg_funcs[key] = interpolate.interp1d(rmag+dmod,logg,**interp_kwargs)
gmr_Teff_funcs[key] = interpolate.interp1d(gmag-rmag,Teff,**interp_kwargs)
gmr_logg_funcs[key] = interpolate.interp1d(gmag-rmag,logg,**interp_kwargs)
return g_Teff_funcs, g_logg_funcs, r_Teff_funcs, r_logg_funcs, gmr_Teff_funcs, gmr_logg_funcs
def photometric_stellarparam_derivatives(Teff, logg,
dTdcolor,dvtdlogg=None,
color=None, dTdcolor_func=None):
"""
Computes dTeff/dlogg, dvt/dlogg assuming purely photometric determinations
This can be used to get the stellar parameter covariances/correlations.
Input:
Teff: effective temperature in Kelvin
logg: surface gravity
dTdcolor: derivative of effective temperature with respect to color (e.g., g-r)
Currently you have to compute outside and specify it
dvtdlogg: derivative of microturbulence with respect to logg
By default, computes dvt/dlogg using B05 relation. You can specify a number
here to overwrite the behavior.
Returns:
dloggdTeff, dvtdlogg
You can convert these to covariances with these formulas:
Cov(T,g) = dg/dT * sigma_T^2
Cov(v,g) = dv/dg * sigma_g^2
Cov(T,v) = dv/dg * dg/dT * sigma_T^2
Or correlations:
Corr(T,g) = dg/dT * sigma_T/sigma_g
Corr(v,g) = dv/dg * sigma_g/sigma_v
Corr(T,v) = Corr(T,g) * Corr(v,g)
"""
dloggdT = 4/(np.log(10) * Teff) + 0.4/dTdcolor
if dvtdlogg is None or dvtdlogg=="B05":
# This is the analytic derivative of the Barklem+2005 relation
dvtdlogg = 0.173 * logg - 0.6897
elif dvtdlogg == "M08":
dvdtdlogg = -0.322
elif dvtdlogg == "K09":
dvdtdlogg = -0.23
return dloggdT, dvtdlogg
| [
"logging.getLogger",
"numpy.abs",
"numpy.log10",
"numpy.sqrt",
"numpy.logical_and",
"numpy.where",
"scipy.interpolate.griddata",
"numpy.log",
"numpy.argmax",
"scipy.interpolate.interp1d",
"numpy.sum",
"numpy.polyval",
"numpy.ravel",
"numpy.vectorize",
"astropy.io.ascii.read"
] | [((343, 370), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (360, 370), False, 'import logging\n'), ((5600, 5625), 'numpy.vectorize', 'np.vectorize', (['_gmr_to_BmV'], {}), '(_gmr_to_BmV)\n', (5612, 5625), True, 'import numpy as np\n'), ((11620, 11650), 'numpy.vectorize', 'np.vectorize', (['_logTFeH_to_logg'], {}), '(_logTFeH_to_logg)\n', (11632, 11650), True, 'import numpy as np\n'), ((800, 869), 'scipy.interpolate.griddata', 'interpolate.griddata', (['BCs[:, 0:3]', 'BCs[:, 3]', 'points'], {'method': '"""linear"""'}), "(BCs[:, 0:3], BCs[:, 3], points, method='linear')\n", (820, 869), False, 'from scipy import interpolate\n'), ((6680, 6693), 'numpy.ravel', 'np.ravel', (['FeH'], {}), '(FeH)\n', (6688, 6693), True, 'import numpy as np\n'), ((7139, 7152), 'numpy.ravel', 'np.ravel', (['VmI'], {}), '(VmI)\n', (7147, 7152), True, 'import numpy as np\n'), ((8350, 8363), 'numpy.ravel', 'np.ravel', (['BmV'], {}), '(BmV)\n', (8358, 8363), True, 'import numpy as np\n'), ((8371, 8384), 'numpy.ravel', 'np.ravel', (['FeH'], {}), '(FeH)\n', (8379, 8384), True, 'import numpy as np\n'), ((8620, 8657), 'numpy.logical_and', 'np.logical_and', (['(BmV > 0.7)', '(BmV <= 0.8)'], {}), '(BmV > 0.7, BmV <= 0.8)\n', (8634, 8657), True, 'import numpy as np\n'), ((10098, 10177), 'numpy.sqrt', 'np.sqrt', (['(masserr ** 2 + Terr_mag ** 2 + magerr ** 2 + dmoderr ** 2 + BCerr ** 2)'], {}), '(masserr ** 2 + Terr_mag ** 2 + magerr ** 2 + dmoderr ** 2 + BCerr ** 2)\n', (10105, 10177), True, 'import numpy as np\n'), ((10632, 10654), 'numpy.argmax', 'np.argmax', (["iso['logT']"], {}), "(iso['logT'])\n", (10641, 10654), True, 'import numpy as np\n'), ((10870, 10902), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['logT', 'logg'], {}), '(logT, logg)\n', (10890, 10902), False, 'from scipy import interpolate\n'), ((11781, 11837), 'astropy.io.ascii.read', 'ascii.read', (["(datapath + '/stellar_param_data/barklem.txt')"], {}), "(datapath + '/stellar_param_data/barklem.txt')\n", (11791, 11837), False, 'from astropy.io import ascii\n'), ((12633, 12686), 'astropy.io.ascii.read', 'ascii.read', (["(datapath + '/stellar_param_data/sf11.txt')"], {}), "(datapath + '/stellar_param_data/sf11.txt')\n", (12643, 12686), False, 'from astropy.io import ascii\n'), ((16564, 16605), 'numpy.sqrt', 'np.sqrt', (['(Teff_err ** 2 + Teff_syserr ** 2)'], {}), '(Teff_err ** 2 + Teff_syserr ** 2)\n', (16571, 16605), True, 'import numpy as np\n'), ((17257, 17294), 'numpy.sqrt', 'np.sqrt', (['(vt_syserr ** 2 + vt_err ** 2)'], {}), '(vt_syserr ** 2 + vt_err ** 2)\n', (17264, 17294), True, 'import numpy as np\n'), ((6959, 6973), 'numpy.log10', 'np.log10', (['Teff'], {}), '(Teff)\n', (6967, 6973), True, 'import numpy as np\n'), ((10332, 10403), 'astropy.io.ascii.read', 'ascii.read', (["(datapath + '/stellar_param_data/afe040feh200set1_12gyr.txt')"], {}), "(datapath + '/stellar_param_data/afe040feh200set1_12gyr.txt')\n", (10342, 10403), False, 'from astropy.io import ascii\n'), ((12067, 12087), 'numpy.polyval', 'np.polyval', (['coeff', 'x'], {}), '(coeff, x)\n', (12077, 12087), True, 'import numpy as np\n'), ((15037, 15097), 'numpy.sqrt', 'np.sqrt', (['(BmVerr ** 2.0 + gerr ** 2 + rerr ** 2 + EBVerr ** 2)'], {}), '(BmVerr ** 2.0 + gerr ** 2 + rerr ** 2 + EBVerr ** 2)\n', (15044, 15097), True, 'import numpy as np\n'), ((18072, 18128), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['(gmag + dmod)', 'Teff'], {}), '(gmag + dmod, Teff, **interp_kwargs)\n', (18092, 18128), False, 'from scipy import interpolate\n'), ((18153, 18209), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['(gmag + dmod)', 'logg'], {}), '(gmag + dmod, logg, **interp_kwargs)\n', (18173, 18209), False, 'from scipy import interpolate\n'), ((18234, 18290), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['(rmag + dmod)', 'Teff'], {}), '(rmag + dmod, Teff, **interp_kwargs)\n', (18254, 18290), False, 'from scipy import interpolate\n'), ((18315, 18371), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['(rmag + dmod)', 'logg'], {}), '(rmag + dmod, logg, **interp_kwargs)\n', (18335, 18371), False, 'from scipy import interpolate\n'), ((18398, 18454), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['(gmag - rmag)', 'Teff'], {}), '(gmag - rmag, Teff, **interp_kwargs)\n', (18418, 18454), False, 'from scipy import interpolate\n'), ((18481, 18537), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['(gmag - rmag)', 'logg'], {}), '(gmag - rmag, logg, **interp_kwargs)\n', (18501, 18537), False, 'from scipy import interpolate\n'), ((19451, 19507), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['(gmag + dmod)', 'Teff'], {}), '(gmag + dmod, Teff, **interp_kwargs)\n', (19471, 19507), False, 'from scipy import interpolate\n'), ((19532, 19588), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['(gmag + dmod)', 'logg'], {}), '(gmag + dmod, logg, **interp_kwargs)\n', (19552, 19588), False, 'from scipy import interpolate\n'), ((19613, 19669), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['(rmag + dmod)', 'Teff'], {}), '(rmag + dmod, Teff, **interp_kwargs)\n', (19633, 19669), False, 'from scipy import interpolate\n'), ((19694, 19750), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['(rmag + dmod)', 'logg'], {}), '(rmag + dmod, logg, **interp_kwargs)\n', (19714, 19750), False, 'from scipy import interpolate\n'), ((19777, 19833), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['(gmag - rmag)', 'Teff'], {}), '(gmag - rmag, Teff, **interp_kwargs)\n', (19797, 19833), False, 'from scipy import interpolate\n'), ((19860, 19916), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['(gmag - rmag)', 'logg'], {}), '(gmag - rmag, logg, **interp_kwargs)\n', (19880, 19916), False, 'from scipy import interpolate\n'), ((6650, 6664), 'numpy.log10', 'np.log10', (['Teff'], {}), '(Teff)\n', (6658, 6664), True, 'import numpy as np\n'), ((10438, 10509), 'astropy.io.ascii.read', 'ascii.read', (["(datapath + '/stellar_param_data/afe040feh250set1_12gyr.txt')"], {}), "(datapath + '/stellar_param_data/afe040feh250set1_12gyr.txt')\n", (10448, 10509), False, 'from astropy.io import ascii\n'), ((15794, 15852), 'numpy.sqrt', 'np.sqrt', (['(VmIerr ** 2 + gerr ** 2 + ierr ** 2 + EBVerr ** 2)'], {}), '(VmIerr ** 2 + gerr ** 2 + ierr ** 2 + EBVerr ** 2)\n', (15801, 15852), True, 'import numpy as np\n'), ((702, 716), 'numpy.ravel', 'np.ravel', (['Teff'], {}), '(Teff)\n', (710, 716), True, 'import numpy as np\n'), ((717, 731), 'numpy.ravel', 'np.ravel', (['logg'], {}), '(logg)\n', (725, 731), True, 'import numpy as np\n'), ((732, 745), 'numpy.ravel', 'np.ravel', (['FeH'], {}), '(FeH)\n', (740, 745), True, 'import numpy as np\n'), ((4467, 4480), 'numpy.ravel', 'np.ravel', (['gmi'], {}), '(gmi)\n', (4475, 4480), True, 'import numpy as np\n'), ((4497, 4510), 'numpy.ravel', 'np.ravel', (['gmi'], {}), '(gmi)\n', (4505, 4510), True, 'import numpy as np\n'), ((8972, 8987), 'numpy.log10', 'np.log10', (['Mstar'], {}), '(Mstar)\n', (8980, 8987), True, 'import numpy as np\n'), ((8992, 9013), 'numpy.log10', 'np.log10', (['(Teff / 5780)'], {}), '(Teff / 5780)\n', (9000, 9013), True, 'import numpy as np\n'), ((9407, 9430), 'numpy.abs', 'np.abs', (['(new_logg - logg)'], {}), '(new_logg - logg)\n', (9413, 9430), True, 'import numpy as np\n'), ((10544, 10615), 'astropy.io.ascii.read', 'ascii.read', (["(datapath + '/stellar_param_data/afe040feh300set1_12gyr.txt')"], {}), "(datapath + '/stellar_param_data/afe040feh300set1_12gyr.txt')\n", (10554, 10615), False, 'from astropy.io import ascii\n'), ((21212, 21222), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (21218, 21222), True, 'import numpy as np\n'), ((1461, 1474), 'numpy.sum', 'np.sum', (['Nlist'], {}), '(Nlist)\n', (1467, 1474), True, 'import numpy as np\n'), ((12814, 12857), 'numpy.where', 'np.where', (["(conversion_data['filter'] == filt)"], {}), "(conversion_data['filter'] == filt)\n", (12822, 12857), True, 'import numpy as np\n')] |
import copy
import itertools
import seaborn as sns
import glob
import os
import math
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import imutils
import numpy as np
import time
from pre_processing import Pre_Processing
import cv2
class Roads():
def __init__(self):
self.road_parm = True
self.pre_process = Pre_Processing()
self.height = None
self.width = None
self.show_image = None
self.output_folder = None
self.process_number = None
self.roads = dict()
def extractContours(self, morph_img, road_image, car_length, car_width):
contours, hierarchy = cv2.findContours(morph_img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
small_contours = []
large_contours = []
image = road_image.copy()
# print(len(contours), hierarchy)
for i, contour in enumerate(contours):
# cnt = max(contours, key=cv2.contourArea)
# if cv2.contourArea(contour) :
# epsilon = 0.01*cv2.arcLength(contour, False)
# approx = cv2.approxPolyDP(contour, epsilon, False)
if cv2.arcLength(contour, False) < car_length * 2.5:
small_contours.append(contour)
else:
contour = contour.reshape(contour.shape[0], 2)
large_contours.append(contour)
cv2.drawContours(image, [contour], 0, (255, 0, 0), -1)
# # cv2.polylines(image, [approx], False, (0, 255, 0), 2)
# print("contour # {}, Shape = {}, Area = {}, Arc_Length = {} ".format(i, contour.shape, cv2.contourArea(contour), cv2.arcLength(contour, closed=False)))
if self.show_image:
self.pre_process.showImage("Contours Visualization", image, time=500)
cv2.imwrite(self.output_folder + "{}_Contour_Viz_image.jpg".format(self.process_number), image)
self.process_number += 1
print(" large contours = {}, small contours = {} ".format(len(large_contours), len(small_contours)))
return small_contours, large_contours
def extractLengthOfRoads(self, lane_midpoints):
""" Calculating the arc length of road midpoints of the lane"""
lenght_of_lanes = list()
for road in lane_midpoints:
road = np.array(road)
# npts = len(road)
x = road[:, 0]
y = road[:, 1]
arc = 0
for k in range(0, len(road)-1): # or road.shape[0]
arc = arc + np.sqrt((x[k+1] - x[k])**2 + (y[k+1] - y[k])**2)
lenght_of_lanes.append(arc)
return lenght_of_lanes
# Midpoints of the Lane Calculation
def midpointOfTheLane(self, image, sample_size, lane_contour):
print("\n")
print("----- Extracting the Midpoints of the Lane -----")
print("\n")
midpoint_of_lane = []
values = []
euc_dist_bet_lanes = list()
width_of_lane = list()
max_road_point = (min(lane_contour[0].shape[0], lane_contour[1].shape[0]))
print("max_road_point for lane", max_road_point)
for i in range(0, max_road_point, sample_size):
### Midpoint between two points = (X2+X1)^2 / (Y2+Y1)^2)
# print("midpoints :", midpoint)
midpoint = [(lane_contour[1][i][0] + lane_contour[0][i][0]) / 2,
(lane_contour[1][i][1] + lane_contour[0][i][1]) / 2]
midpoint_of_lane.append(midpoint)
### Lane width in pixels
dist = cv2.norm(lane_contour[0][i] - lane_contour[1][i], cv2.NORM_L2)
euc_dist_bet_lanes.append(dist)
# cv2.circle(image, tuple(midpoint), 3, (0, 255, 0), -1)
values.append(i)
midpoint_of_lane.sort()
midpoints_sorted = list()
for i, point in enumerate(midpoint_of_lane):
if(i % 2 == 0):
midpoints_sorted.append(point)
elif (i == len(midpoint_of_lane) - 1):
value = midpoint_of_lane[-1]
# midpoints_sorted.append((value[0], value[1] - 0)
midpoints_sorted.append((value[0], value[1]))
adjusted_midpoints_of_lane = self.removeRedundantMidpointsOfLane(midpoints_sorted, sample_size)
# adjusted_midpoints_of_lane = self.removeRedundantMidpointsOfLane(self.removeRedundantMidpointsOfLane(midpoints_sorted))
### Lane width
width_of_lane.append(max(euc_dist_bet_lanes))
### Lane length
length_of_lane = self.extractLengthOfRoads([adjusted_midpoints_of_lane])
for point in adjusted_midpoints_of_lane:
cv2.circle(image, tuple([int(point[0]), int(point[1])]), 3, (0, 255, 0), -1)
cv2.imwrite(self.output_folder + "{}_midpoints_of_lane.jpg".format(self.process_number), image)
self.process_number += 1
return width_of_lane, length_of_lane, adjusted_midpoints_of_lane
def midpointOfFourWayAndTSection(self, canvas, large_contours):
print("\n")
print("----- Extracting the Midpoints of the Lane -----")
print("\n")
final_midpoints_lanes = list()
euc_dist_bet_lanes = list()
width_of_lanes = list()
min_euc_dist_bet_lane = list()
ordered_canvas = canvas.copy()
for lane_1, lane_2 in itertools.combinations(large_contours, 2):
ed_bet_two_lanes = list()
final_lane_dist = list()
midpoint_of_lane = list()
# print("lane_1", lane_1.shape)
# print("lane_2", lane_2.shape)
for point_lane_1 in lane_1[0::15, :]:
temp_list = list()
for point_lane_2 in lane_2[0::15, :]:
# dist = cv2.norm(pts - dst, cv2.NORM_L2)
euclidean_distance = math.sqrt(
math.pow((point_lane_2[0] - point_lane_1[0]), 2) +
math.pow((point_lane_2[1] - point_lane_1[1]), 2))
temp_list.append([euclidean_distance, point_lane_1.tolist(), point_lane_2.tolist()])
ed_bet_two_lanes.append(min(temp_list))
min_ed_point = min(ed_bet_two_lanes)
min_euc_dist_bet_lane.append(min_ed_point)
# print("min_euc_dist_bet_lane = ", min_euc_dist_bet_lane)
# max_ed_point = max(ed_bet_two_lanes)
# road_width_range = max_ed_point[0] - min_ed_point[0]
# print("\n \n")
# print("min_ed_point", min_ed_point)
# print("max_ed_point", max_ed_point)
for pt in ed_bet_two_lanes:
if (pt[0] == min_ed_point[0]) or (pt[0] <= min_ed_point[0] + 6):
final_lane_dist.append(pt)
if (len(final_lane_dist) < 17): # 9
final_lane_dist = list()
for pt in ed_bet_two_lanes:
if (pt[0] == min_ed_point[0]) or (pt[0] <= min_ed_point[0] + 20):
final_lane_dist.append(pt)
# final_lane_dist = sorted(final_lane_dist)
euc_dist_bet_lanes.append(final_lane_dist)
avg_road_width = 0
lane_count = 0
for i, lane in enumerate(euc_dist_bet_lanes):
avg_road_width = avg_road_width + min(lane)[0] # avg_road_width = avg_road_width + max(lane)[0]
lane_count += 1
avg_road_width = avg_road_width / lane_count
for i, final_lane_dist in enumerate(euc_dist_bet_lanes):
ed_bet_two_lanes = list()
midpoint_of_lane = list()
# print("final_lane_dist", final_lane_dist)
## if (min_euc_dist_bet_lane[i][0] * 1.4 >= min(final_lane_dist)[0])
## if (min(min_euc_dist_bet_lane)[0] * 1.4 >= min(final_lane_dist)[0]):
if (avg_road_width * 1.24 >= min(final_lane_dist)[0]):
for ed, pt1, pt2 in final_lane_dist[:len(final_lane_dist) // 2]:
cv2.circle(canvas, tuple([int(pt1[0]), int(pt1[1])]), 4, (0, 0, 255), -1)
cv2.circle(canvas, tuple([int(pt2[0]), int(pt2[1])]), 4, (255, 0, 0), -1)
# print("min(min_euc_dist_bet_lane)[0] * 1.04", min_euc_dist_bet_lane[i][0] * 1.04)
break
# cv2.imshow("Points on the circle", canvas)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
for ed, pt1, pt2 in final_lane_dist[:len(final_lane_dist) // 2]:
midpoint = [(pt1[0] + pt2[0]) // 2,
(pt1[1] + pt2[1]) // 2]
cv2.circle(canvas, tuple([round(midpoint[0]), round(midpoint[1])]), 5, (0, 255, 0), -1)
midpoint_of_lane.append(midpoint)
## removing the points that are near to each other extracted in the midpoints of a lane
# print("len of the midpoint_of_lane", len(midpoint_of_lane))
adjusted_midpoints_of_lane = self.removeRedundantMidpointsOfLane(midpoint_of_lane, 15)
# adjusted_midpoints_of_lane = self.removeRedundantMidpointsOfLane( self.removeRedundantMidpointsOfLane(midpoint_of_lane))
for point in adjusted_midpoints_of_lane:
cv2.circle(canvas, tuple([round(point[0]), round(point[1])]), 2, (0, 0, 255), -1)
final_midpoints_lanes.append(adjusted_midpoints_of_lane)
# print("hello")
width_of_lanes.append(min_euc_dist_bet_lane[i][0] * 1.04)
# final_midpoints_lanes.append(midpoint_of_lane)
# print("midpoint_of_lane = ", len(midpoint_of_lane))
# for point in midpoint_of_lane:
# cv2.circle(canvas, tuple([int(point[0]), int(point[1])]), 5, (0, 255, ), -1)
else:
continue
# cv2.imshow("canvas_1", canvas)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
cv2.imwrite(self.output_folder + "{}_midpoints_of_lane.jpg".format(self.process_number), canvas)
self.process_number += 1
number_of_lanes = len(large_contours)
if len(final_midpoints_lanes) > number_of_lanes:
lanes_count = list()
print(" final lane midpoint are larger in length ")
while(1):
for lane in final_midpoints_lanes:
lanes_count.append(len(lane))
index = lanes_count.index(min(lanes_count))
final_midpoints_lanes.pop(index)
if len(final_midpoints_lanes) == number_of_lanes:
break
return euc_dist_bet_lanes, width_of_lanes, self.orderedMidpointsOfTheLanes(final_midpoints_lanes, ordered_canvas)
else:
return euc_dist_bet_lanes, width_of_lanes, self.orderedMidpointsOfTheLanes(final_midpoints_lanes, ordered_canvas)
# return euc_dist_bet_lanes, width_of_lanes, self.orderedMidpointsOfTheLanes(final_midpoints_lanes, ordered_canvas)
def removeRedundantMidpointsOfLane(self, midpoint_of_lane, sample_size):
## Removing the points that are near to each other neareast neighbours in the vicnity of 10 euclidean points
while(1):
adjusted_midpoints_of_lane = list()
final_road_points = list()
adjust = False
## Removing Duplicate Lane Midpoints
for pt in midpoint_of_lane:
if pt not in final_road_points:
final_road_points.append(pt)
# print("Redundant midpoints of lane are removed")
check_redundancy = list()
for i, point in enumerate(final_road_points):
# print("neighbouring midepoint are removed as well")
if (i != len(final_road_points) - 1):
# if (not adjust):
ref_point = np.array(final_road_points[i])
road_points = np.array(final_road_points)
# index = np.where(road_points, ref_point)
# road_points = np.delete(road_points, index)
edist = np.sqrt(( (road_points[:, 0] - ref_point[0]) ** 2 + (road_points[:, 1] - ref_point[1]) ** 2))
edist = edist.reshape(edist.shape[0], -1)
road_points = road_points.reshape(road_points.shape[0], -1)
edist_road_points = np.hstack([edist, road_points])
# print("edist_last_points", edist_last_points)
# furthest_point = edist_road_points[edist_last_points[:,0].argsort()][-1]
nearest_point = edist_road_points[edist_road_points[:,0].argsort()][1]
nearest_point_dist = nearest_point.tolist()[0]
nearest_point_value = nearest_point.tolist()[1:]
# # dist = cv2.norm(np.array(point - road_points), cv2.NORM_L2)
# print("nearest_point = ", nearest_point)
# print("nearest_point = ", nearest_point_value)
current_point = ref_point.tolist()
if nearest_point_dist <= (sample_size - 2) : # 10 , 15
point = [(current_point[0] + nearest_point_value[0]) / 2,
(current_point[1] + nearest_point_value[1]) / 2]
adjusted_midpoints_of_lane.append(point)
# print("midpoint_of_lane[i] = {} and midpoint_of_lane[i+1] = {} adjusted to be point = {}".format(final_road_points[i], final_road_points[i+1], mid_value))
check_redundancy.append(1)
# adjust = True
else:
adjusted_midpoints_of_lane.append(point)
check_redundancy.append(0)
# print("midpoint_of_lane[i] = {}".format(final_road_points[i]))
# else:
# None
# # adjust = False
else:
### The lane has reached its end point adding the last point if its greater at distance from second last point
dist = cv2.norm(np.array(final_road_points[i]) - np.array(final_road_points[i-1]), cv2.NORM_L2)
if (dist > (sample_size - 2)):
adjusted_midpoints_of_lane.append(point)
# break
# print("The lane has reached its endpoint ")
midpoint_of_lane = adjusted_midpoints_of_lane
# print("check redundancy = ", check_redundancy)
if sum(check_redundancy) < 1:
break
return midpoint_of_lane
def orderedMidpointsOfTheLanes(self, final_midpoints_lanes, ordered_canvas):
"""
Ordering of the midpoints of the lane
"""
print("\n")
print("----- Ordering the Midpoints of the 3 or 4 Legged Lane -----")
print("\n")
final_ordered_midpoint = list()
for lane_midpoints in final_midpoints_lanes:
## creating list for accumulation of distance of lowest distance a point has towards its side for aligning and sorting them
extreme_dist_lane_points = list()
lane_midpoints = np.array(lane_midpoints)
for point in lane_midpoints:
### creating reference points to for distance calculation between point( on the midpoint of a lane)
### its extreme boundary points.
### e.g (10, 15) x_low = (0, 15), x_high = (w, 15), y_low = (10, 0), y_high = (10, h)
x_low = [0, point[1]]
x_high = [self.width, point[1]]
y_low = [point[0], 0]
y_high = [point[0], self.height]
### for vectorized computation and avoiding for loops overhead
extreme_sides = np.array([x_low, x_high, y_low, y_high])
edist = np.sqrt(((extreme_sides[:, 0] - point[0]) ** 2 + (extreme_sides[:, 1] - point[1]) ** 2))
### extracting the minimum distance a point have towards its extreme four sides
min_dist = min(edist)
extreme_dist_lane_points.append([min_dist, point.tolist()])
# print("lane_midpoints", lane_midpoints)
# print("extreme_dist_lane_points = ", extreme_dist_lane_points)
### finding the point among the all midpoints of a lane which has lowest possible euclidean distance
extreme_point = min(extreme_dist_lane_points)[1]
# print("extreme_point ", extreme_point)
### calculating euclidean distance of a lowest point to the every other point in the midpoint of the lane
### reshaping the array to for horizontal stacking or merging with the lane midpoints
### now as we have an array e.g. like array([euclidean_distace of midpoint to the lowest point to the side, [midpoint]])
### using this format sort the array is acending order of the euclidean distance to the side.
edist_lane = np.sqrt(((lane_midpoints[:, 0] - extreme_point[0]) ** 2 + (lane_midpoints[:, 1] - extreme_point[1]) ** 2))
edist_lane = edist_lane.reshape(edist_lane.shape[0], -1)
edist_lane_midpoints = np.hstack([edist_lane, lane_midpoints])
edist_lane_midpoints = edist_lane_midpoints[edist_lane_midpoints[:, 0].argsort()]
### extracting the sorted point and appending them to the final_list of midpoints
### sorted midpoints of the lane can be vizualed on the picture.
final_lane_midpoints = edist_lane_midpoints[:, 1:].tolist()
final_ordered_midpoint.append(final_lane_midpoints)
for midpoint in final_lane_midpoints:
cv2.circle(ordered_canvas, tuple([round(midpoint[0]), round(midpoint[1])]), 5, (0, 255, 0), -1)
# cv2.imshow("image", ordered_canvas)
# cv2.waitKey(20)
# cv2.destroyAllWindows()
cv2.imwrite(self.output_folder + "{}_midpoints_in_order.jpg".format(self.process_number), ordered_canvas)
self.process_number += 1
return final_ordered_midpoint
def centroidBetweenLanes(self, orderd_lane_points):
last_points = list()
midpoint = None
# print("orderd_lane_points", orderd_lane_points)
for lane in orderd_lane_points:
last_points.append(lane[-1])
# print("last points", lane[-1])
# print("last_points list", last_points)
if len(orderd_lane_points) == 3:
midpoint = [(round(last_points[0][0]) + round(last_points[1][0]) + round(last_points[2][0])) // len(orderd_lane_points),
(round(last_points[0][1]) + round(last_points[1][1]) + round(last_points[2][1])) // len(orderd_lane_points)]
elif len(orderd_lane_points) == 4:
midpoint = [(round(last_points[0][0]) + round(last_points[1][0]) + round(last_points[2][0]) +
round(last_points[3][0])) // len(orderd_lane_points),
(round(last_points[0][1]) + round(last_points[1][1]) + round(last_points[2][1]) +
round(last_points[3][1])) // len(orderd_lane_points)]
else:
midpoint = None
return midpoint
def getExtrapolatedPointMidpoints(self, canvas, traverse_parameter, ordered_mid_lane):
print("\n \n ")
print("----- Extrapolating the lane -----")
print("\n \n ")
ord_mid_lane = ordered_mid_lane.copy()
centroid = self.centroidBetweenLanes(ord_mid_lane)
cv2.circle(canvas, tuple(centroid), 5, (255, 0, 0), -1)
extrapolated_ordered_midpoint = []
for lane_midpoints in ordered_mid_lane:
x_sim = [lane_midpoints[2][0] in i for i in lane_midpoints[2:6]]
x_sim = np.array(x_sim).all()
y_sim = [lane_midpoints[2][1] in i for i in lane_midpoints[2:6]]
y_sim = np.array(y_sim).all()
# print("original midpoints", lane_midpoints)
if x_sim:
x = lane_midpoints[-1][0]
y = lane_midpoints[-1][1]
change_y = centroid[1] - lane_midpoints[-1][1]
dy = math.sqrt(math.pow(change_y, 2))
lane_midpoints.pop()
# print("centroid = ", centroid)
if change_y < 0:
for i in range(0, int(dy * 1.3), traverse_parameter):
point = [x, y - i]
# print("point", point)
lane_midpoints.append(point)
elif change_y == 0:
print("No change in y")
else:
for i in range(0, int(dy * 1.5), traverse_parameter):
point = [x, y + i]
lane_midpoints.append(point)
elif y_sim:
x = lane_midpoints[-1][0]
y = lane_midpoints[-1][1]
change_x = centroid[0] - lane_midpoints[-1][0]
dx = math.sqrt(math.pow(change_x, 2))
lane_midpoints.pop()
# print("centroid = {} and dx = {} change_x = {} ".format(centroid, dx, change_x))
if change_x < 0: # 1
for i in range(0, int(dx * 1.5), traverse_parameter):
point = [x - i, y]
lane_midpoints.append(point)
# print("point", point)
elif change_x == 0: # 2
print("No change in x")
else:
for i in range(0, int(dx), traverse_parameter):
point = [x + i, y]
lane_midpoints.append(point)
else:
"""
T-section or Merge-into or Fork-into or 4-way type of road variant
"""
### The Points on the T-section or 4-Way lane are not horizontal or vertical to the axis to be interpolated
### The points are constantly changing their x,y position so, e.g. diagnal points, curve points on T-section and 4-way.
###
number_of_roads = len(ordered_mid_lane)
current_last_point = np.array(lane_midpoints[-1])
# print("The Road does not have constant values on the same axis either on the x or y")
# print("\n \n ")
reference_last_points = list()
for lane in ordered_mid_lane:
reference_last_points.append(lane[-1])
reference_last_points = np.array(reference_last_points)
edist_last_points = np.sqrt(((reference_last_points[:, 0] - current_last_point[0]) ** 2 + (reference_last_points[:, 1] - current_last_point[1]) ** 2))
edist_last_points = edist_last_points.reshape(edist_last_points.shape[0], -1)
reference_last_points = reference_last_points.reshape(reference_last_points.shape[0], -1)
edist_last_points = np.hstack([edist_last_points, reference_last_points])
# print("edist_last_points", edist_last_points)
furthest_point = edist_last_points[edist_last_points[:, 0].argsort()][-1]
neareast_point = edist_last_points[edist_last_points[:, 0].argsort()][1]
# print("furthest_point", furthest_point.tolist())
# print("current_last_point", current_last_point.tolist())
point_1 = lane_midpoints[-2]
point_2 = current_last_point.tolist()
x = [round(point_1[0]), round(point_2[0])]
y = [round(point_1[1]), round(point_2[1])]
dy = y[1] - y[0]
dx = x[1] - x[0]
rads = math.atan2(dy, dx)
angle = math.degrees(rads)
print("Angle of the lane = ", angle)
length = neareast_point.tolist()[0]
extrap_point_x = int(round(point_2[0] + length * 1.1 * math.cos(angle * np.pi / 180.0)))
extrap_point_y = int(round(point_2[1] + length * 1.1 * math.sin(angle * np.pi / 180.0)))
cv2.line(canvas, tuple([int(point_2[0]), int(point_2[1])]), tuple([extrap_point_x, extrap_point_y]), (255, 0, 0), 2)
lane_midpoints.extend([(extrap_point_x, extrap_point_y)])
# print("new extrapolated midpoints", lane_midpoints)
extrapolated_ordered_midpoint.append(lane_midpoints)
return extrapolated_ordered_midpoint
def calculateAspectRatio(self, car_length, car_length_sim):
return car_length_sim / car_length # flaot values required
def distortionMappingVizualization(self, road_image, aspect_ratio, lane_contours, number_of_roads):
if number_of_roads == 2:
lane_midpoints = np.array(lane_contours)
# print("lane_midpoints = ", lane_midpoints)
distortionMapping = lambda x, r: x * r
result = distortionMapping(lane_midpoints, aspect_ratio)
# result = np.array(np.asarray(lane_midpoints) * aspect_ratio, np.uint8)
# result = np.array(np.array(lane_midpoints) * aspect_ratio)
# print("results of distortion and mapping", result)
# new_lane_contours.append(result)
for pt in result:
## Visualizing the distorted road
ptx = round(pt[0])
pty = round(pt[1])
cv2.circle(road_image, (ptx, pty), 2, (255,0,0), -1)
cv2.imwrite(self.output_folder + "{}_distortion_mapping.jpg".format(self.process_number), road_image)
self.process_number += 1
return result.tolist()
else:
new_lane_contours = []
for lane in lane_contours:
distortionMapping = lambda x, r: x * r
result = distortionMapping(lane, aspect_ratio)
# print("results of distortion and mapping",result)
new_lane_contours.append(result)
for pt in result:
## Visualizing the distorted road
ptx = round(pt[0])
pty = round(pt[1])
cv2.circle(road_image, (ptx, pty), 2, (255,0,0), -1)
cv2.imwrite(self.output_folder + "{}_distortion_mapping.jpg".format(self.process_number), road_image)
self.process_number += 1
return new_lane_contours
def adjustRoadToSimulation(self, dist_height):
""" Adjusting the Road for the simulation settings """
adjusted_lane_midpoints = list()
for lane in self.roads["small_lane_midpoints"]:
lane = np.array(lane)
adjusted_lane_midpoints.append(np.hstack((lane[:, 0].reshape(lane.shape[0], 1), dist_height - lane[:, 1].reshape(lane.shape[0], 1))).tolist())
self.roads["simulation_lane_midpoints"] = adjusted_lane_midpoints
def settingRoadToBeamNG(self):
""" Setting the roads in BeamNG format """
lane_nodes = list()
for i, lane in enumerate(self.roads["simulation_lane_midpoints"]):
nodes = list()
road_lane_width = self.roads["scaled_lane_width"][i]
for j, node in enumerate(lane):
point = node.copy()
# (car_length * a_ratio / 4 * 0.9) * 2 can be use istead
point.extend([0, round(road_lane_width, 3)])
nodes.append(tuple(point))
lane_nodes.append(nodes)
return lane_nodes
def extractRoadInformation(self, image_path, time_efficiency, show_image, output_folder, car_length, car_width, car_length_sim):
## Read the image and create a blank mask
image = self.pre_process.readImage(image_path=image_path)
print("\n------------Road-------------\n")
print("Image Dimensions", image.shape[:2])
self.height, self.width = image.shape[:2]
self.show_image = show_image
self.output_folder = os.path.join(output_folder, "road/")
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
self.process_number = 0
""" Resize the image """
# image = self.pre_process.resize(image=image)
# print("Image Dimensions after resizing", image.shape[:2])
""" Get Mask for the Image dimension """
mask = self.pre_process.getMask(image=image)
"""Transform to gray colorspace and threshold the image"""
gray = self.pre_process.changeColorSpace(image=image, color_code=cv2.COLOR_BGR2GRAY)
blur = self.pre_process.blurImage(image=gray, kernel_size=(3, 3), sigmaX= 0)
thresh = self.pre_process.threshold(gray, 50, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
''' Use of Horizontal and Vertical Morphological Kernal for increasing the pixel intesities along X and Y-axis'''
# dilate_kernel = np.ones((10, 10), np.uint8) # note this is a horizontal kernel
dilate_image = self.pre_process.dilate(thresh, kernel_window=(10, 10))
erode_image = self.pre_process.erode(dilate_image, kernel_window=(10, 10))
""" Morphological Operations """
morph_img = erode_image.copy()
## rect_kernel = cv2.getStructuringElement( cv2.MORPH_RECT,(10,10))
## ellipse_kernel = cv2.getStructuringElement( cv2.MORPH_ELLIPSE,(15,15))
## morph_img = self.pre_process.applyMorphologicalOperation(thresh, cv2.MORPH_CLOSE, ellipse_kernel)
## kernel = np.ones((20,20),np.uint8)
# opening = self.pre_process.applyMorphologicalOperation(thresh, cv2.MORPH_OPEN, kernel_window=(20,20))
""" Show and Plot images """
# self.pre_process.showImage('image original', image)
# self.pre_process.showImage(title="thresholded image", image=thresh)
# self.pre_process.showImage("dilate", dilate_image)
# self.pre_process.showImage("erode", erode_image)
# self.pre_process.showImage("Morphological Close Operation ", erode_image)
# if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
# self.pre_process.plotFigure(thresh)
# self.pre_process.plotFigure(morph_img, cmap="brg", title="Morphological Close Operation ")
""" Saving the figure"""
cv2.imwrite(self.output_folder + "{}_gray_image.jpg".format(self.process_number), gray)
self.process_number += 1
cv2.imwrite(self.output_folder + "{}_blur_image.jpg".format(self.process_number), blur)
self.process_number += 1
cv2.imwrite(self.output_folder + "{}_threshold_image.jpg".format(self.process_number), thresh)
self.process_number += 1
cv2.imwrite(self.output_folder + "{}_dilate_image.jpg".format(self.process_number), dilate_image)
self.process_number += 1
cv2.imwrite(self.output_folder + "{}_erode_image.jpg".format(self.process_number), erode_image)
self.process_number += 1
road_image = image.copy()
small_contours, large_contours = self.extractContours(morph_img, road_image, car_length, car_width)
canvas = cv2.merge((morph_img, morph_img, morph_img))
canvas = canvas.copy()
canvas = cv2.bitwise_not(canvas)
"""
if length of car in simulation that is 4m is divided by the length of the car in the sketch then the road cannot be curved as majority of the points will
be omitted by the system in order to approximate the area as shown in the sketch to to the simulation.
e.g car_length_simulation / car_length_sketch :
4 / 82 = 0.048
0.048 mutiplied by all pixel values to get a scale down version of the map that conforms to the simulation in sketch
will affect the curve road by making it straight and whereas the straight road will have no affect at all.
"""
t0 = time.time()
if (len(large_contours) == 2):
## its a one road or lane with seperation
print("\nStraight or Curve road")
number_of_lanes = len(large_contours)
width_of_lanes, length_of_lanes, large_lane_midpoints = self.midpointOfTheLane(road_image,
sample_size=round(car_length) // 4,
lane_contour=large_contours)
# self.pre_process.showImage("drawing contours", road_image)
a_ratio = self.calculateAspectRatio(car_length=car_length, car_length_sim=car_length_sim)
small_lane_midpoints = self.distortionMappingVizualization(aspect_ratio=a_ratio,
road_image=road_image,
lane_contours=large_lane_midpoints,
number_of_roads=len(large_contours)) # a_ratio / 2
small_lane_midpoints = [small_lane_midpoints]
scaled_lane_width = [width_of_lanes[0] * a_ratio]
scaled_lane_length = [length_of_lanes[0] * a_ratio]
self.roads["sketch_lane_width"] = width_of_lanes
self.roads["sketch_lane_length"] = length_of_lanes
self.roads["large_lane_midpoints"] = large_lane_midpoints
self.roads["small_lane_midpoints"] = small_lane_midpoints
self.roads["scaled_lane_width"] = scaled_lane_width
self.roads["scaled_lane_length"] = scaled_lane_length
# self.pre_process.showImage("distortion and mapping", road_image)
elif(len(large_contours) == 3 or len(large_contours) == 4):
## Its T-Section road or two roads or lane with seperation
print("\nT-Section road or Four way road")
number_of_lanes = len(large_contours)
ed_dist_bet_lanes, width_of_lanes, ordered_midpoints_lanes = self.midpointOfFourWayAndTSection(canvas, large_contours)
# self.pre_process.showImage("drawing contours", road_image)
### If its a double straight road and not a four-way or merge-into
try:
extrapolated_ordered_midpoint = self.getExtrapolatedPointMidpoints(canvas=road_image,
traverse_parameter=round(car_length) // 3,
ordered_mid_lane=ordered_midpoints_lanes)
except:
extrapolated_ordered_midpoint = ordered_midpoints_lanes
# length_of_lanes = extractLengthOfRoads(extrapolated_ordered_midpoint)
# else:
# print("Nothing went wrong")
extrap_ord_mid_lanes = list()
small_lane_midpoints = list()
length_of_lanes = self.extractLengthOfRoads(extrapolated_ordered_midpoint)
for lane_midpoints in extrapolated_ordered_midpoint:
extrap_ord_mid_lanes.append(np.array(lane_midpoints, dtype="float32"))
a_ratio = self.calculateAspectRatio(car_length=car_length, car_length_sim=car_length_sim)
small_lane_contours = self.distortionMappingVizualization(aspect_ratio=a_ratio,
road_image=road_image,
lane_contours=large_contours,
number_of_roads=len(large_contours))
small_lane_mids = self.distortionMappingVizualization(aspect_ratio=a_ratio,
road_image=road_image,
lane_contours=extrap_ord_mid_lanes,
number_of_roads=len(large_contours))
for lane_midpoints in small_lane_mids:
small_lane = lane_midpoints.astype(np.float)
small_lane_midpoints.append(small_lane.tolist())
for ordered_mids in extrapolated_ordered_midpoint:
count = 0
for pt in ordered_mids:
# print( tuple([pt[0] + 5, pt[1] + 5]))
cv2.circle(road_image, tuple( [int(pt[0]), int(pt[1])]), 3, (0, 0, 255), -1)
cv2.putText(road_image, str(count), tuple( [int(pt[0]) + 15, int(pt[1]) + 10]), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0), 1)
# self.pre_process.showImage("canvas 3", road_image)
# cv2.waitKey(10)
count += 1
cv2.imwrite(self.output_folder + "{}_extrapolated_ordered_midpoint.jpg".format(self.process_number), road_image)
self.process_number += 1
# cv2.destroyAllWindows()
# break
scaled_lane_width = [width * a_ratio for width in width_of_lanes]
scaled_lane_length = [length * a_ratio for length in length_of_lanes]
self.roads["large_lane_midpoints"] = extrapolated_ordered_midpoint
self.roads["sketch_lane_width"] = width_of_lanes
self.roads["sketch_lane_length"] = length_of_lanes
self.roads["small_lane_midpoints"] = small_lane_midpoints
self.roads["scaled_lane_width"] = scaled_lane_width
self.roads["scaled_lane_length"] = scaled_lane_length
self.roads["sequence_of_lanes"] = list(zip(small_lane_midpoints, scaled_lane_length, scaled_lane_width))
# self.pre_process.showImage("distortion and mapping", road_image)
t1 = time.time()
time_efficiency["road_ext"] = t1-t0
# print("total time = ", t1-t0)
if self.show_image:
self.pre_process.showImage("Final Road with Distortion and Mapping", road_image, time=1000)
cv2.imwrite(self.output_folder + "{}_final_result.jpg".format(self.process_number), road_image)
self.process_number += 1
# self.pre_process.plotFigure(road_image, cmap="brg", title="Final Road with Distortion and Mapping")
# self.pre_process.saveFigure('road_distorted.jpg', dpi=300)
distorted_height = road_image.shape[0] * (car_length_sim / car_length)
self.adjustRoadToSimulation(distorted_height)
final_lane_nodes = self.settingRoadToBeamNG()
return self.roads, final_lane_nodes
| [
"cv2.norm",
"numpy.sqrt",
"numpy.hstack",
"math.cos",
"numpy.array",
"pre_processing.Pre_Processing",
"os.path.exists",
"cv2.arcLength",
"cv2.merge",
"cv2.drawContours",
"math.degrees",
"cv2.circle",
"math.atan2",
"time.time",
"os.makedirs",
"math.pow",
"os.path.join",
"itertools.c... | [((355, 371), 'pre_processing.Pre_Processing', 'Pre_Processing', ([], {}), '()\n', (369, 371), False, 'from pre_processing import Pre_Processing\n'), ((5401, 5442), 'itertools.combinations', 'itertools.combinations', (['large_contours', '(2)'], {}), '(large_contours, 2)\n', (5423, 5442), False, 'import itertools\n'), ((28404, 28440), 'os.path.join', 'os.path.join', (['output_folder', '"""road/"""'], {}), "(output_folder, 'road/')\n", (28416, 28440), False, 'import os\n'), ((31629, 31673), 'cv2.merge', 'cv2.merge', (['(morph_img, morph_img, morph_img)'], {}), '((morph_img, morph_img, morph_img))\n', (31638, 31673), False, 'import cv2\n'), ((31722, 31745), 'cv2.bitwise_not', 'cv2.bitwise_not', (['canvas'], {}), '(canvas)\n', (31737, 31745), False, 'import cv2\n'), ((32402, 32413), 'time.time', 'time.time', ([], {}), '()\n', (32411, 32413), False, 'import time\n'), ((38385, 38396), 'time.time', 'time.time', ([], {}), '()\n', (38394, 38396), False, 'import time\n'), ((1425, 1479), 'cv2.drawContours', 'cv2.drawContours', (['image', '[contour]', '(0)', '(255, 0, 0)', '(-1)'], {}), '(image, [contour], 0, (255, 0, 0), -1)\n', (1441, 1479), False, 'import cv2\n'), ((2369, 2383), 'numpy.array', 'np.array', (['road'], {}), '(road)\n', (2377, 2383), True, 'import numpy as np\n'), ((3606, 3668), 'cv2.norm', 'cv2.norm', (['(lane_contour[0][i] - lane_contour[1][i])', 'cv2.NORM_L2'], {}), '(lane_contour[0][i] - lane_contour[1][i], cv2.NORM_L2)\n', (3614, 3668), False, 'import cv2\n'), ((15449, 15473), 'numpy.array', 'np.array', (['lane_midpoints'], {}), '(lane_midpoints)\n', (15457, 15473), True, 'import numpy as np\n'), ((17272, 17380), 'numpy.sqrt', 'np.sqrt', (['((lane_midpoints[:, 0] - extreme_point[0]) ** 2 + (lane_midpoints[:, 1] -\n extreme_point[1]) ** 2)'], {}), '((lane_midpoints[:, 0] - extreme_point[0]) ** 2 + (lane_midpoints[:,\n 1] - extreme_point[1]) ** 2)\n', (17279, 17380), True, 'import numpy as np\n'), ((17484, 17523), 'numpy.hstack', 'np.hstack', (['[edist_lane, lane_midpoints]'], {}), '([edist_lane, lane_midpoints])\n', (17493, 17523), True, 'import numpy as np\n'), ((25145, 25168), 'numpy.array', 'np.array', (['lane_contours'], {}), '(lane_contours)\n', (25153, 25168), True, 'import numpy as np\n'), ((27066, 27080), 'numpy.array', 'np.array', (['lane'], {}), '(lane)\n', (27074, 27080), True, 'import numpy as np\n'), ((28456, 28490), 'os.path.exists', 'os.path.exists', (['self.output_folder'], {}), '(self.output_folder)\n', (28470, 28490), False, 'import os\n'), ((28504, 28535), 'os.makedirs', 'os.makedirs', (['self.output_folder'], {}), '(self.output_folder)\n', (28515, 28535), False, 'import os\n'), ((1187, 1216), 'cv2.arcLength', 'cv2.arcLength', (['contour', '(False)'], {}), '(contour, False)\n', (1200, 1216), False, 'import cv2\n'), ((16067, 16107), 'numpy.array', 'np.array', (['[x_low, x_high, y_low, y_high]'], {}), '([x_low, x_high, y_low, y_high])\n', (16075, 16107), True, 'import numpy as np\n'), ((16132, 16222), 'numpy.sqrt', 'np.sqrt', (['((extreme_sides[:, 0] - point[0]) ** 2 + (extreme_sides[:, 1] - point[1]) ** 2)'], {}), '((extreme_sides[:, 0] - point[0]) ** 2 + (extreme_sides[:, 1] -\n point[1]) ** 2)\n', (16139, 16222), True, 'import numpy as np\n'), ((25782, 25836), 'cv2.circle', 'cv2.circle', (['road_image', '(ptx, pty)', '(2)', '(255, 0, 0)', '(-1)'], {}), '(road_image, (ptx, pty), 2, (255, 0, 0), -1)\n', (25792, 25836), False, 'import cv2\n'), ((2582, 2638), 'numpy.sqrt', 'np.sqrt', (['((x[k + 1] - x[k]) ** 2 + (y[k + 1] - y[k]) ** 2)'], {}), '((x[k + 1] - x[k]) ** 2 + (y[k + 1] - y[k]) ** 2)\n', (2589, 2638), True, 'import numpy as np\n'), ((11985, 12015), 'numpy.array', 'np.array', (['final_road_points[i]'], {}), '(final_road_points[i])\n', (11993, 12015), True, 'import numpy as np\n'), ((12050, 12077), 'numpy.array', 'np.array', (['final_road_points'], {}), '(final_road_points)\n', (12058, 12077), True, 'import numpy as np\n'), ((12256, 12350), 'numpy.sqrt', 'np.sqrt', (['((road_points[:, 0] - ref_point[0]) ** 2 + (road_points[:, 1] - ref_point[1\n ]) ** 2)'], {}), '((road_points[:, 0] - ref_point[0]) ** 2 + (road_points[:, 1] -\n ref_point[1]) ** 2)\n', (12263, 12350), True, 'import numpy as np\n'), ((12532, 12563), 'numpy.hstack', 'np.hstack', (['[edist, road_points]'], {}), '([edist, road_points])\n', (12541, 12563), True, 'import numpy as np\n'), ((20098, 20113), 'numpy.array', 'np.array', (['x_sim'], {}), '(x_sim)\n', (20106, 20113), True, 'import numpy as np\n'), ((20218, 20233), 'numpy.array', 'np.array', (['y_sim'], {}), '(y_sim)\n', (20226, 20233), True, 'import numpy as np\n'), ((20500, 20521), 'math.pow', 'math.pow', (['change_y', '(2)'], {}), '(change_y, 2)\n', (20508, 20521), False, 'import math\n'), ((22539, 22567), 'numpy.array', 'np.array', (['lane_midpoints[-1]'], {}), '(lane_midpoints[-1])\n', (22547, 22567), True, 'import numpy as np\n'), ((22898, 22929), 'numpy.array', 'np.array', (['reference_last_points'], {}), '(reference_last_points)\n', (22906, 22929), True, 'import numpy as np\n'), ((22966, 23099), 'numpy.sqrt', 'np.sqrt', (['((reference_last_points[:, 0] - current_last_point[0]) ** 2 + (\n reference_last_points[:, 1] - current_last_point[1]) ** 2)'], {}), '((reference_last_points[:, 0] - current_last_point[0]) ** 2 + (\n reference_last_points[:, 1] - current_last_point[1]) ** 2)\n', (22973, 23099), True, 'import numpy as np\n'), ((23333, 23386), 'numpy.hstack', 'np.hstack', (['[edist_last_points, reference_last_points]'], {}), '([edist_last_points, reference_last_points])\n', (23342, 23386), True, 'import numpy as np\n'), ((24082, 24100), 'math.atan2', 'math.atan2', (['dy', 'dx'], {}), '(dy, dx)\n', (24092, 24100), False, 'import math\n'), ((24125, 24143), 'math.degrees', 'math.degrees', (['rads'], {}), '(rads)\n', (24137, 24143), False, 'import math\n'), ((26560, 26614), 'cv2.circle', 'cv2.circle', (['road_image', '(ptx, pty)', '(2)', '(255, 0, 0)', '(-1)'], {}), '(road_image, (ptx, pty), 2, (255, 0, 0), -1)\n', (26570, 26614), False, 'import cv2\n'), ((21335, 21356), 'math.pow', 'math.pow', (['change_x', '(2)'], {}), '(change_x, 2)\n', (21343, 21356), False, 'import math\n'), ((35658, 35699), 'numpy.array', 'np.array', (['lane_midpoints'], {'dtype': '"""float32"""'}), "(lane_midpoints, dtype='float32')\n", (35666, 35699), True, 'import numpy as np\n'), ((5941, 5987), 'math.pow', 'math.pow', (['(point_lane_2[0] - point_lane_1[0])', '(2)'], {}), '(point_lane_2[0] - point_lane_1[0], 2)\n', (5949, 5987), False, 'import math\n'), ((6032, 6078), 'math.pow', 'math.pow', (['(point_lane_2[1] - point_lane_1[1])', '(2)'], {}), '(point_lane_2[1] - point_lane_1[1], 2)\n', (6040, 6078), False, 'import math\n'), ((14351, 14381), 'numpy.array', 'np.array', (['final_road_points[i]'], {}), '(final_road_points[i])\n', (14359, 14381), True, 'import numpy as np\n'), ((14384, 14418), 'numpy.array', 'np.array', (['final_road_points[i - 1]'], {}), '(final_road_points[i - 1])\n', (14392, 14418), True, 'import numpy as np\n'), ((24320, 24351), 'math.cos', 'math.cos', (['(angle * np.pi / 180.0)'], {}), '(angle * np.pi / 180.0)\n', (24328, 24351), False, 'import math\n'), ((24425, 24456), 'math.sin', 'math.sin', (['(angle * np.pi / 180.0)'], {}), '(angle * np.pi / 180.0)\n', (24433, 24456), False, 'import math\n')] |
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense, Activation, Flatten, SimpleRNN, Dropout
from tensorflow.keras.models import Sequential
import os
import json
import pickle
import scipy.io as sio
import matplotlib.pyplot as plt
from keras.utils import np_utils
from sklearn.preprocessing import MinMaxScaler
import time
import noise
from sklearn.metrics import mean_squared_error, mean_absolute_error
import pandas as pd
import math
#一些基本设定
checkpoint_flag = int(input("是否进行断点续训?是:1;否:0")) #询问是否使用以往最优的参数进行测试
snr=0 #目标领域添加高斯白噪音后的信噪比
#样本参数与预测长度选择
train_len=100 #训练集,一个训练(验证)样本的长度
pre_len=50 #将要预测的值的长度(同时也是验证集数据长度)
train_num=3000 #训练集样本个数
test_num=300 #验证集样本数
batch_size=256
epochs=50
data = sio.loadmat(r'data\0HP\normal_0_97.mat')
de_data = data['X097_DE_time']
training_set = de_data[0:train_len + train_num] #训练集(还未划分训练样本)
test_set = de_data[train_len + train_num:2*train_len + train_num + test_num] #测试集(还未划分测试样本)
# 归一化
sc = MinMaxScaler(feature_range=(0, 1)) # 定义归一化:归一化到(0,1)之间
training_set_scaled = sc.fit_transform(training_set) # 求得训练集的最大值,最小值这些训练集固有的属性,并在训练集上进行归一化
test_set = sc.transform(test_set) # 利用训练集的属性对测试集进行归一化
x_train = []
y_train = []
x_test = []
y_test = []
# 训练集:2048样本点+3000样本数=5048个点数据
# 利用for循环,遍历整个训练集,提取训练集中连续2048个点作为输入特征x_train,第2049个数据点作为标签,for循环共构建5048-2048=3000个训练样本
for i in range(train_len, len(training_set_scaled)):
x_train.append(training_set_scaled[i - train_len:i, 0])
y_train.append(training_set_scaled[i, 0])
# 对训练集进行打乱
np.random.seed(7)
np.random.shuffle(x_train)
np.random.seed(7)
np.random.shuffle(y_train)
tf.random.set_seed(7)
# 将训练集由list格式变为array格式
x_train, y_train = np.array(x_train), np.array(y_train)
# 使x_train符合RNN输入要求:[送入样本数, 循环核时间展开步数, 每个时间步输入特征个数]。
# 此处整个数据集送入,送入样本数为x_train.shape[0]即3000组数据;输入2048个点,预测出第2049个点,循环核时间展开步数为2048; 每个时间步送入的特征是某一个采样时刻的值,,只有1个数据,故每个时间步输入特征个数为1
x_train = np.reshape(x_train, (x_train.shape[0], train_len, 1))
# 验证集:300个样本
for i in range(train_len, len(test_set)):
x_test.append(test_set[i - train_len:i, 0])
y_test.append(test_set[i, 0])
# 测试集变array并reshape为符合RNN输入要求:[送入样本数, 循环核时间展开步数, 每个时间步输入特征个数]
x_test, y_test = np.array(x_test), np.array(y_test)
x_test = np.reshape(x_test, (x_test.shape[0], train_len, 1))
model_name = "predict-1-RNN"
# 实例化一个Sequential
model = Sequential()
#第一层RNN
model.add(SimpleRNN(80, activation='tanh', kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal',
bias_initializer='zeros', return_sequences=True))
model.add(Dropout(0.2))
#第二层RNN
model.add(SimpleRNN(100, return_sequences=False))
model.add(Dropout(0.2))
# # #第三层RNN
# model.add(SimpleRNN(120, return_sequences=False))
# model.add(Dropout(0.2))
model.add(Dense(units=1))
# model.add(Dropout(0.2))
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
loss='mean_squared_error') # 损失函数用均方误差
# 该应用只观测loss数值,不观测准确率,所以删去metrics选项,一会在每个epoch迭代显示时只显示loss值
logdir = os.path.join('.\logs\predict-RNN-1_logs')
summary = tf.keras.callbacks.TensorBoard(log_dir=logdir, histogram_freq=1)
now_time = time.time() #记录训练开始时间
if checkpoint_flag :
print('开始断点续训')
checkpoint_save_path = "./checkpoint/predict-1-RNN.ckpt"
if os.path.exists(checkpoint_save_path + '.index'):
print('-------------load the model-----------------')
model.load_weights(checkpoint_save_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,
save_weights_only=True,
save_best_only=True)
# 开始模型训练
history = model.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=epochs,
verbose=1, validation_data=(x_test, y_test), shuffle=True,
callbacks=[cp_callback,summary])
else :#开始模型训练
print('未进行断点续训')
history = model.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=epochs,
verbose=1, validation_data=(x_test, y_test), shuffle=True,
callbacks=[summary])
total_time = time.time() - now_time #记录训练总时间
#打印训练、测试耗时
print("训练总耗时/s:", total_time) #打印训练总耗时
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.show()
################## predict ######################
predicted_data=[] #保存预测数据
source_3=x_test[0:1] #把第一个测试集样本抽出来,作预测.三维数组
source_1=x_test[0].ravel() #第一个测试集样本,一维数组
for i in range(pre_len):
pre = model.predict(source_3) #用一个三维样本输入模型
pre=pre.ravel()
predicted_data = np.append(predicted_data, pre[0]) #将新预测得到的结果保存进预测数组
source_1 = np.append(source_1[1:train_len+1], pre[0]) #更新一维样本
source_3 = np.reshape(source_1, (1, train_len, 1)) #将一维样本升高至三维,以供下次输入模型
predicted_data = np.reshape(predicted_data, (pre_len, 1))
# 对原始数据还原---从(0,1)反归一化到原始范围
source_data = sc.inverse_transform(test_set[0:train_len])
# 对预测数据还原---从(0,1)反归一化到原始范围
predicted_data = sc.inverse_transform(predicted_data)
# 对真实数据还原---从(0,1)反归一化到原始范围
real_data = sc.inverse_transform(test_set[train_len:train_len+pre_len])
# 画出真实数据和预测数据的对比曲线
plt.plot(real_data, color='red', label='real_data')
plt.plot(predicted_data, color='blue', label='Predicted_data')
plt.title('data Prediction')
plt.xlabel('Time')
plt.ylabel('accelarate')
plt.legend()
plt.show()
#保存预测数据与真实数据
np.savetxt(r'save_txt\source_data.txt',source_data)
np.savetxt(r'save_txt\predict_data.txt',predicted_data)
np.savetxt(r'save_txt\real_data.txt',real_data)
##########evaluate##############
# calculate MSE 均方误差 ---> E[(预测值-真实值)^2] (预测值减真实值求平方后求均值)
mse = mean_squared_error(predicted_data, real_data)
# calculate RMSE 均方根误差--->sqrt[MSE] (对均方误差开方)
rmse = math.sqrt(mean_squared_error(predicted_data, real_data))
# calculate MAE 平均绝对误差----->E[|预测值-真实值|](预测值减真实值求绝对值后求均值)
mae = mean_absolute_error(predicted_data, real_data)
print('均方误差: %.6f' % mse)
print('均方根误差: %.6f' % rmse)
print('平均绝对误差: %.6f' % mae)
| [
"matplotlib.pyplot.ylabel",
"scipy.io.loadmat",
"numpy.array",
"tensorflow.keras.layers.Dense",
"os.path.exists",
"numpy.reshape",
"tensorflow.keras.layers.SimpleRNN",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.random.seed",
"sklearn.metrics.mean_absolute_error",
"tensorflow.... | [((770, 811), 'scipy.io.loadmat', 'sio.loadmat', (['"""data\\\\0HP\\\\normal_0_97.mat"""'], {}), "('data\\\\0HP\\\\normal_0_97.mat')\n", (781, 811), True, 'import scipy.io as sio\n'), ((1021, 1055), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (1033, 1055), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1588, 1605), 'numpy.random.seed', 'np.random.seed', (['(7)'], {}), '(7)\n', (1602, 1605), True, 'import numpy as np\n'), ((1607, 1633), 'numpy.random.shuffle', 'np.random.shuffle', (['x_train'], {}), '(x_train)\n', (1624, 1633), True, 'import numpy as np\n'), ((1635, 1652), 'numpy.random.seed', 'np.random.seed', (['(7)'], {}), '(7)\n', (1649, 1652), True, 'import numpy as np\n'), ((1654, 1680), 'numpy.random.shuffle', 'np.random.shuffle', (['y_train'], {}), '(y_train)\n', (1671, 1680), True, 'import numpy as np\n'), ((1682, 1703), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(7)'], {}), '(7)\n', (1700, 1703), True, 'import tensorflow as tf\n'), ((1976, 2029), 'numpy.reshape', 'np.reshape', (['x_train', '(x_train.shape[0], train_len, 1)'], {}), '(x_train, (x_train.shape[0], train_len, 1))\n', (1986, 2029), True, 'import numpy as np\n'), ((2301, 2352), 'numpy.reshape', 'np.reshape', (['x_test', '(x_test.shape[0], train_len, 1)'], {}), '(x_test, (x_test.shape[0], train_len, 1))\n', (2311, 2352), True, 'import numpy as np\n'), ((2415, 2427), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2425, 2427), False, 'from tensorflow.keras.models import Sequential\n'), ((3070, 3113), 'os.path.join', 'os.path.join', (['""".\\\\logs\\\\predict-RNN-1_logs"""'], {}), "('.\\\\logs\\\\predict-RNN-1_logs')\n", (3082, 3113), False, 'import os\n'), ((3123, 3187), 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', ([], {'log_dir': 'logdir', 'histogram_freq': '(1)'}), '(log_dir=logdir, histogram_freq=1)\n', (3153, 3187), True, 'import tensorflow as tf\n'), ((3204, 3215), 'time.time', 'time.time', ([], {}), '()\n', (3213, 3215), False, 'import time\n'), ((4417, 4454), 'matplotlib.pyplot.plot', 'plt.plot', (['loss'], {'label': '"""Training Loss"""'}), "(loss, label='Training Loss')\n", (4425, 4454), True, 'import matplotlib.pyplot as plt\n'), ((4456, 4499), 'matplotlib.pyplot.plot', 'plt.plot', (['val_loss'], {'label': '"""Validation Loss"""'}), "(val_loss, label='Validation Loss')\n", (4464, 4499), True, 'import matplotlib.pyplot as plt\n'), ((4501, 4542), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Loss"""'], {}), "('Training and Validation Loss')\n", (4510, 4542), True, 'import matplotlib.pyplot as plt\n'), ((4544, 4556), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4554, 4556), True, 'import matplotlib.pyplot as plt\n'), ((4558, 4568), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4566, 4568), True, 'import matplotlib.pyplot as plt\n'), ((5085, 5125), 'numpy.reshape', 'np.reshape', (['predicted_data', '(pre_len, 1)'], {}), '(predicted_data, (pre_len, 1))\n', (5095, 5125), True, 'import numpy as np\n'), ((5421, 5472), 'matplotlib.pyplot.plot', 'plt.plot', (['real_data'], {'color': '"""red"""', 'label': '"""real_data"""'}), "(real_data, color='red', label='real_data')\n", (5429, 5472), True, 'import matplotlib.pyplot as plt\n'), ((5474, 5536), 'matplotlib.pyplot.plot', 'plt.plot', (['predicted_data'], {'color': '"""blue"""', 'label': '"""Predicted_data"""'}), "(predicted_data, color='blue', label='Predicted_data')\n", (5482, 5536), True, 'import matplotlib.pyplot as plt\n'), ((5538, 5566), 'matplotlib.pyplot.title', 'plt.title', (['"""data Prediction"""'], {}), "('data Prediction')\n", (5547, 5566), True, 'import matplotlib.pyplot as plt\n'), ((5568, 5586), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (5578, 5586), True, 'import matplotlib.pyplot as plt\n'), ((5588, 5612), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accelarate"""'], {}), "('accelarate')\n", (5598, 5612), True, 'import matplotlib.pyplot as plt\n'), ((5614, 5626), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5624, 5626), True, 'import matplotlib.pyplot as plt\n'), ((5628, 5638), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5636, 5638), True, 'import matplotlib.pyplot as plt\n'), ((5656, 5708), 'numpy.savetxt', 'np.savetxt', (['"""save_txt\\\\source_data.txt"""', 'source_data'], {}), "('save_txt\\\\source_data.txt', source_data)\n", (5666, 5708), True, 'import numpy as np\n'), ((5709, 5765), 'numpy.savetxt', 'np.savetxt', (['"""save_txt\\\\predict_data.txt"""', 'predicted_data'], {}), "('save_txt\\\\predict_data.txt', predicted_data)\n", (5719, 5765), True, 'import numpy as np\n'), ((5766, 5814), 'numpy.savetxt', 'np.savetxt', (['"""save_txt\\\\real_data.txt"""', 'real_data'], {}), "('save_txt\\\\real_data.txt', real_data)\n", (5776, 5814), True, 'import numpy as np\n'), ((5918, 5963), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['predicted_data', 'real_data'], {}), '(predicted_data, real_data)\n', (5936, 5963), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((6145, 6191), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['predicted_data', 'real_data'], {}), '(predicted_data, real_data)\n', (6164, 6191), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((1748, 1765), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (1756, 1765), True, 'import numpy as np\n'), ((1767, 1784), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (1775, 1784), True, 'import numpy as np\n'), ((2256, 2272), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (2264, 2272), True, 'import numpy as np\n'), ((2274, 2290), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (2282, 2290), True, 'import numpy as np\n'), ((2448, 2610), 'tensorflow.keras.layers.SimpleRNN', 'SimpleRNN', (['(80)'], {'activation': '"""tanh"""', 'kernel_initializer': '"""glorot_uniform"""', 'recurrent_initializer': '"""orthogonal"""', 'bias_initializer': '"""zeros"""', 'return_sequences': '(True)'}), "(80, activation='tanh', kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal', bias_initializer='zeros',\n return_sequences=True)\n", (2457, 2610), False, 'from tensorflow.keras.layers import Dense, Activation, Flatten, SimpleRNN, Dropout\n'), ((2636, 2648), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2643, 2648), False, 'from tensorflow.keras.layers import Dense, Activation, Flatten, SimpleRNN, Dropout\n'), ((2670, 2708), 'tensorflow.keras.layers.SimpleRNN', 'SimpleRNN', (['(100)'], {'return_sequences': '(False)'}), '(100, return_sequences=False)\n', (2679, 2708), False, 'from tensorflow.keras.layers import Dense, Activation, Flatten, SimpleRNN, Dropout\n'), ((2721, 2733), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2728, 2733), False, 'from tensorflow.keras.layers import Dense, Activation, Flatten, SimpleRNN, Dropout\n'), ((2839, 2853), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(1)'}), '(units=1)\n', (2844, 2853), False, 'from tensorflow.keras.layers import Dense, Activation, Flatten, SimpleRNN, Dropout\n'), ((3340, 3387), 'os.path.exists', 'os.path.exists', (["(checkpoint_save_path + '.index')"], {}), "(checkpoint_save_path + '.index')\n", (3354, 3387), False, 'import os\n'), ((3521, 3635), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', ([], {'filepath': 'checkpoint_save_path', 'save_weights_only': '(True)', 'save_best_only': '(True)'}), '(filepath=checkpoint_save_path,\n save_weights_only=True, save_best_only=True)\n', (3555, 3635), True, 'import tensorflow as tf\n'), ((4255, 4266), 'time.time', 'time.time', ([], {}), '()\n', (4264, 4266), False, 'import time\n'), ((4862, 4895), 'numpy.append', 'np.append', (['predicted_data', 'pre[0]'], {}), '(predicted_data, pre[0])\n', (4871, 4895), True, 'import numpy as np\n'), ((4931, 4975), 'numpy.append', 'np.append', (['source_1[1:train_len + 1]', 'pre[0]'], {}), '(source_1[1:train_len + 1], pre[0])\n', (4940, 4975), True, 'import numpy as np\n'), ((4999, 5038), 'numpy.reshape', 'np.reshape', (['source_1', '(1, train_len, 1)'], {}), '(source_1, (1, train_len, 1))\n', (5009, 5038), True, 'import numpy as np\n'), ((6032, 6077), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['predicted_data', 'real_data'], {}), '(predicted_data, real_data)\n', (6050, 6077), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((2909, 2940), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.001)'], {}), '(0.001)\n', (2933, 2940), True, 'import tensorflow as tf\n')] |
import matplotlib.pyplot as plt
import numpy as np
class RefDataType:
def __init__(self,length,steps,coverage,cv,marker,color,label):
self.length = length
self.steps = steps
self.coverage = coverage
self.cv = cv
self.marker = marker
self.color = color
self.label = label
def get_prop(self, prop_str):
if prop_str == 'length':
return self.length
elif prop_str == 'steps':
return self.steps
elif prop_str == 'coverage':
return self.coverage
elif prop_str == 'cv':
return self.cv
elif prop_str == 'marker':
return self.marker
elif prop_str == 'color':
return self.color
elif prop_str == 'label':
return self.label
else:
return "oh crap!"
def filter_prob_vals(vals, cnts):
""" replaces cases having no instances
with last value observed, notes where they occur
for plotting
"""
prob_vals = []
for ind, v in enumerate(vals):
if v == 0 and ind != 0 and cnts[ind]==0:
vals[ind] = vals[ind-1]
prob_vals.append(ind)
return vals, prob_vals
def ref_prop_plot(ref, prop, prop_ind, ranges):
if prop == 'steps':
x_vals = get_exp_x_mids(ranges[prop_ind],2)
elif prop == 'coverage':
x_vals = get_exp_x_mids(ranges[prop_ind],10)
else:
x_vals = get_x_mids(ranges[prop_ind])
num_meas = sum([b[1] for b in ref.get_prop(prop)])
# s=[a*1000./num_meas for a in [b[1] for b in ref.get_prop(prop)]]
y_vals = [b[2] for b in ref.get_prop(prop)]
y_vals, probs = filter_prob_vals(y_vals, [b[1] for b in ref.get_prop(prop)])
for i in range(len(y_vals)):
if i == len(y_vals)-1 and prop_ind==3:
axarr[0][prop_ind].scatter(x_vals[i], y_vals[i],
marker=ref.marker, label = ref.get_prop('label'),
facecolors=ref.color, edgecolors=ref.color, s=100)
elif i not in probs:
axarr[0][prop_ind].scatter(x_vals[i], y_vals[i],
marker=ref.marker,
facecolors=ref.color, edgecolors=ref.color, s=100)
else:
axarr[0][prop_ind].scatter(x_vals[i], y_vals[i],
marker=ref.marker,
facecolors='none', edgecolors=ref.color, s=1000)
axarr[0][prop_ind].plot(x_vals, y_vals, c=ref.color)
def ref_counts_plot(ref, prop, prop_ind, ranges):
x_vals = get_x_mids(ranges[prop_ind])
row = ref.get_prop(prop)
instances = np.array([a[1] for a in row])
instances = np.true_divide(instances, sum(instances))
axarr[1][prop_ind].scatter(x_vals, instances,
c=ref.color, marker=ref.marker, s=100)
axarr[1][prop_ind].plot(x_vals, instances, c=ref.color)
def get_x_mids(rng):
return 0.5 * ( np.array(rng[:-1]) + np.array(rng[1:]) )
def get_exp_x_mids(rng, base):
if base == 10:
vals = np.log10(rng)
else:
vals = np.log2(rng)
return base**get_x_mids(vals)
length_x_rng = [0,4000,8000,12000,16000,20000]
step_x_rng = [1,2,4,8,16,32]
cov_x_rng = [1,10,100,1e3,1e4,1e5]
cv_x_rng = [0, 0.05, 0.1, 0.15, 0.20, 0.25]
ranges = [length_x_rng, step_x_rng, cov_x_rng, cv_x_rng]
# input data - from alignment summary (makefile) output
# rows 0 = length, 1 = steps, 2 = coverage, 3 = CV
# plotting rows 4 = marker, 5 = color
ref_100 = RefDataType(
[(40, 46, 0.87), (19, 21, 0.9), (3, 6, 0.5), (4, 6, 0.67), (6, 7, 0.86)],
[(59, 59, 1.0), (6, 11, 0.55), (3, 10, 0.3), (3, 4, 0.75), (1, 2, 0.5)],
[(4, 4, 1.0), (39, 49, 0.8), (22, 26, 0.85), (7, 7, 1.0), (0, 0, 0)],
[(67, 73, 0.92), (4, 5, 0.8), (1, 4, 0.25), (0, 3, 0.0), (0, 1, 0.0)],
'^', 'y', '100'
)
ref_200 = RefDataType(
[(59, 73, 0.81), (35, 46, 0.76), (6, 11, 0.55), (3, 6, 0.5), (4, 6, 0.67)],
[(82, 83, 0.99), (3, 12, 0.25), (12, 24, 0.5), (7, 16, 0.44), (3, 7, 0.43)],
[(6, 8, 0.75), (55, 71, 0.77), (37, 52, 0.71), (8, 10, 0.8), (1, 1, 1.0)],
[(90, 95, 0.95), (7, 17, 0.41), (4, 11, 0.36), (3, 7, 0.43), (3, 11, 0.27)],
'v', 'g', '200'
)
ref_400 = RefDataType(
[(98, 118, 0.83), (62, 79, 0.78), (22, 27, 0.81), (13, 18, 0.72), (10, 12, 0.83)],
[(146, 147, 0.99), (24, 35, 0.69), (17, 39, 0.44), (11, 19, 0.58), (7, 13, 0.54)],
[(17, 22, 0.77), (105, 135, 0.78), (67, 77, 0.87), (11, 14, 0.79), (5, 6, 0.83)],
[(174, 188, 0.93), (13, 23, 0.57), (7, 14, 0.5), (8, 14, 0.57), (3, 15, 0.2)],
'h', 'c', '400'
)
ref_800 = RefDataType(
[(193, 236, 0.82), (107, 145, 0.74), (39, 55, 0.71), (18, 28, 0.64), (8, 13, 0.62)],
[(271, 278, 0.97), (39, 83, 0.47), (30, 58, 0.52), (10, 24, 0.42), (13, 27, 0.48)],
[(30, 46, 0.65), (174, 230, 0.76), (127, 162, 0.78), (21, 26, 0.81), (6, 6, 1.0)],
[(310, 345, 0.9), (25, 53, 0.47), (12, 27, 0.44), (7, 23, 0.3), (11, 27, 0.41)],
's', 'r', '800'
)
ref_1600 = RefDataType(
[(325, 404, 0.8), (181, 225, 0.8), (46, 72, 0.64), (35, 53, 0.66), (19, 25, 0.76)],
[(432, 442, 0.98), (72, 130, 0.55), (48, 95, 0.51), (29, 58, 0.5), (19, 40, 0.47)],
[(70, 104, 0.67), (253, 328, 0.77), (134, 173, 0.77), (119, 137, 0.87), (20, 23, 0.87)],
[(500, 548, 0.91), (46, 71, 0.65), (25, 50, 0.5), (23, 62, 0.37), (12, 48, 0.25)],
'o', 'b', '1600'
)
# plots
props = ['length', 'steps', 'coverage', 'cv']
f, axarr = plt.subplots(2, len(props), sharey='row', sharex='col')
# axarr[0].set_xticklabels(labels)
for ind, prop in enumerate(props):
for ref in [ref_100, ref_200, ref_400, ref_800, ref_1600]:
# print ref, prop, ind
ref_prop_plot(ref, prop, ind, ranges)
ref_counts_plot(ref, prop, ind, ranges)
if prop == 'cv':
axarr[0][ind].set_xlim(left=0, right=0.25)
axarr[1][ind].set_xlim(left=0, right=0.25)
if prop == 'steps':
axarr[0][ind].set_xscale('log', basex=2)
axarr[0][ind].set_xlim(1,32)
axarr[1][ind].set_xscale('log', basex=2)
axarr[1][ind].set_xlim(1,32)
if prop == 'coverage':
axarr[0][ind].set_xscale('log', basex=10)
axarr[1][ind].set_xscale('log', basex=10)
axarr[0][ind].set_title(prop.upper())
# x_vals = get_x_mids(ranges[ind])
# row = ref_1600.get_prop(prop)
# instances = np.array([a[1] for a in row])
# instances = np.true_divide(instances, sum(instances))
# axarr[1][ind].scatter(x_vals, instances,
# c=ref_1600.color, marker=ref_1600.marker, s=100)
# axarr[1][ind].plot(x_vals, instances, c=ref_1600.color)
# legend - put it on the rightmost
axarr[0][ind].legend()
plt.show()
| [
"numpy.array",
"numpy.log2",
"numpy.log10",
"matplotlib.pyplot.show"
] | [((6007, 6017), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6015, 6017), True, 'import matplotlib.pyplot as plt\n'), ((2195, 2224), 'numpy.array', 'np.array', (['[a[1] for a in row]'], {}), '([a[1] for a in row])\n', (2203, 2224), True, 'import numpy as np\n'), ((2564, 2577), 'numpy.log10', 'np.log10', (['rng'], {}), '(rng)\n', (2572, 2577), True, 'import numpy as np\n'), ((2594, 2606), 'numpy.log2', 'np.log2', (['rng'], {}), '(rng)\n', (2601, 2606), True, 'import numpy as np\n'), ((2465, 2483), 'numpy.array', 'np.array', (['rng[:-1]'], {}), '(rng[:-1])\n', (2473, 2483), True, 'import numpy as np\n'), ((2486, 2503), 'numpy.array', 'np.array', (['rng[1:]'], {}), '(rng[1:])\n', (2494, 2503), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import cv2
from pathlib import Path
from skimage import io
import matplotlib.animation as ani
from IPython.display import HTML
import matplotlib
source_dir = Path('./data/source/test_img')
target_dir = Path('./results/target/test_latest/images')
#target_dir = Path('./results/full_fake')
label_dir = Path('./data/source/show_label')
source_img_paths = sorted(source_dir.iterdir())
target_synth_paths = sorted(target_dir.glob('*synthesized*'))
#target_synth_paths = sorted(target_dir.iterdir())
target_label_paths = sorted(label_dir.iterdir())
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('result/output.avi',fourcc, 25.0, (1024,512))
for nframe in range(len(target_label_paths)):
print(str(nframe)+'/'+str(len(target_label_paths)))
source_img = cv2.imread(str(source_img_paths[nframe]))
target_label = cv2.imread(str(target_label_paths[nframe]))
target_synth = cv2.imread(str(target_synth_paths[nframe]))
source_img = cv2.resize(source_img,(512,512))
target_label = cv2.resize(target_label,(512,512))
target_synth = cv2.resize(target_synth,(512,512))
# res=np.hstack((source_img,target_label))
res=np.hstack((source_img,target_synth))
# cv2.imwrite("result/"+str(nframe)+".jpg",res)
out.write(res)
out.release()
| [
"numpy.hstack",
"pathlib.Path",
"cv2.VideoWriter",
"cv2.VideoWriter_fourcc",
"cv2.resize"
] | [((211, 241), 'pathlib.Path', 'Path', (['"""./data/source/test_img"""'], {}), "('./data/source/test_img')\n", (215, 241), False, 'from pathlib import Path\n'), ((255, 298), 'pathlib.Path', 'Path', (['"""./results/target/test_latest/images"""'], {}), "('./results/target/test_latest/images')\n", (259, 298), False, 'from pathlib import Path\n'), ((353, 385), 'pathlib.Path', 'Path', (['"""./data/source/show_label"""'], {}), "('./data/source/show_label')\n", (357, 385), False, 'from pathlib import Path\n'), ((656, 687), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (678, 687), False, 'import cv2\n'), ((694, 757), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""result/output.avi"""', 'fourcc', '(25.0)', '(1024, 512)'], {}), "('result/output.avi', fourcc, 25.0, (1024, 512))\n", (709, 757), False, 'import cv2\n'), ((1063, 1097), 'cv2.resize', 'cv2.resize', (['source_img', '(512, 512)'], {}), '(source_img, (512, 512))\n', (1073, 1097), False, 'import cv2\n'), ((1115, 1151), 'cv2.resize', 'cv2.resize', (['target_label', '(512, 512)'], {}), '(target_label, (512, 512))\n', (1125, 1151), False, 'import cv2\n'), ((1169, 1205), 'cv2.resize', 'cv2.resize', (['target_synth', '(512, 512)'], {}), '(target_synth, (512, 512))\n', (1179, 1205), False, 'import cv2\n'), ((1259, 1296), 'numpy.hstack', 'np.hstack', (['(source_img, target_synth)'], {}), '((source_img, target_synth))\n', (1268, 1296), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import rospy
import os
from move_base_msgs.msg import MoveBaseActionResult
from numpy.random import choice
# Taken from icanhazdadjoke.com
jokes = [
"I'm tired of following my dreams. I'm just going to ask them where they are going and meet up with them later."
"Did you hear about the guy whose whole left side was cut off? He's all right now.",
"Why didn't the skeleton cross the road? Because he had no guts.",
"What did one nut say as he chased another nut? I'm a cashew!",
"Chances are if you' ve seen one shopping center, you've seen a mall.",
"I knew I shouldn't steal a mixer from work, but it was a whisk I was willing to take.",
"How come the stadium got hot after the game? Because all of the fans left.",
"Why was it called the dark ages? Because of all the knights. ",
"A steak pun is a rare medium well done.",
"Why did the tomato blush? Because it saw the salad dressing.",
"Did you hear the joke about the wandering nun? She was a roman catholic.",
"What creature is smarter than a talking parrot? A spelling bee.",
"I'll tell you what often gets over looked... garden fences.",
"Why did the kid cross the playground? To get to the other slide.",
"Why do birds fly south for the winter? Because it's too far to walk.",
"What is a centipedes's favorite Beatle song? I want to hold your hand, hand, hand, hand...",
"My first time using an elevator was an uplifting experience. The second time let me down.",
"To be Frank, I'd have to change my name.",
"Slept like a log last night ... woke up in the fireplace.",
"Why does a Moon-rock taste better than an Earth-rock? Because it's a little meteor."
]
class DadJoke():
def __init__(self):
self.fiducial_pose_sub = rospy.Subscriber('/move_base/result', MoveBaseActionResult,
self.speak_joke)
self.ctrl_c = False
self.rate = rospy.Rate(10) # 10hz
rospy.on_shutdown(self.shutdownhook)
def speak_joke(self, result):
rospy.loginfo('deploy dad joke')
os.system('espeak "Hello, here is your coffee ..."')
os.system('espeak " ' + choice(jokes) + ' " ')
os.system('espeak ' + '"Goodbye"')
def shutdownhook(self):
# works better than the rospy.is_shutdown()
self.ctrl_c = True
if __name__ == '__main__':
rospy.init_node('dad_joke_node', anonymous=True)
joker = DadJoke()
rospy.loginfo('Ready for jokes')
try:
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo('exceptien...')
pass
rospy.loginfo('shutting down') | [
"rospy.on_shutdown",
"numpy.random.choice",
"rospy.init_node",
"rospy.Rate",
"rospy.spin",
"os.system",
"rospy.Subscriber",
"rospy.loginfo"
] | [((2455, 2503), 'rospy.init_node', 'rospy.init_node', (['"""dad_joke_node"""'], {'anonymous': '(True)'}), "('dad_joke_node', anonymous=True)\n", (2470, 2503), False, 'import rospy\n'), ((2530, 2562), 'rospy.loginfo', 'rospy.loginfo', (['"""Ready for jokes"""'], {}), "('Ready for jokes')\n", (2543, 2562), False, 'import rospy\n'), ((2689, 2719), 'rospy.loginfo', 'rospy.loginfo', (['"""shutting down"""'], {}), "('shutting down')\n", (2702, 2719), False, 'import rospy\n'), ((1813, 1889), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/move_base/result"""', 'MoveBaseActionResult', 'self.speak_joke'], {}), "('/move_base/result', MoveBaseActionResult, self.speak_joke)\n", (1829, 1889), False, 'import rospy\n'), ((1975, 1989), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (1985, 1989), False, 'import rospy\n'), ((2005, 2041), 'rospy.on_shutdown', 'rospy.on_shutdown', (['self.shutdownhook'], {}), '(self.shutdownhook)\n', (2022, 2041), False, 'import rospy\n'), ((2089, 2121), 'rospy.loginfo', 'rospy.loginfo', (['"""deploy dad joke"""'], {}), "('deploy dad joke')\n", (2102, 2121), False, 'import rospy\n'), ((2130, 2182), 'os.system', 'os.system', (['"""espeak "Hello, here is your coffee ...\\""""'], {}), '(\'espeak "Hello, here is your coffee ..."\')\n', (2139, 2182), False, 'import os\n'), ((2246, 2280), 'os.system', 'os.system', (['(\'espeak \' + \'"Goodbye"\')'], {}), '(\'espeak \' + \'"Goodbye"\')\n', (2255, 2280), False, 'import os\n'), ((2581, 2593), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (2591, 2593), False, 'import rospy\n'), ((2642, 2671), 'rospy.loginfo', 'rospy.loginfo', (['"""exceptien..."""'], {}), "('exceptien...')\n", (2655, 2671), False, 'import rospy\n'), ((2215, 2228), 'numpy.random.choice', 'choice', (['jokes'], {}), '(jokes)\n', (2221, 2228), False, 'from numpy.random import choice\n')] |
# Libraries
from random import choice, random, shuffle
import pandas as pd
import numpy as np
from math import exp, sqrt
# Import an Excel file into Python
file_name, sheet = "TSP.xlsx", "Arkusz1"
data = pd.read_excel(file_name, sheet_name = sheet, engine = 'openpyxl')
# Getting initial solution
solution = list(range(1, len(data) + 1)) # List of all job indexes
shuffle(solution)
m = np.delete(data.to_numpy(), 0, 1) # Delete the first array's column, because it contains indexes
def count_score(z): # Function responsible for counting score
time_travel, copied_solution = 0, z.copy()
time_travel = np.double(time_travel)
for i in range(0, len(m) - 1):
first_city, second_city = copied_solution[i], copied_solution[i + 1]
time_travel += m[first_city - 1, second_city - 1]
time_travel += m[copied_solution[0] - 1, copied_solution[-1] - 1]
return time_travel
score = count_score(solution)
temperature = 1000 # Parameter 1 - setting annealing temperature
format(temperature, ".3f")
iterations = 1000 # Parameter 2
temperature_start = temperature # Copy of temperature in order to present initial value of this parameter
cooling_method = "" # Parameter 3
neighbourhood_type = ""
def pick_two_towns(): # Function responsible for picking two random jobs
while True:
t1, t2 = choice(solution), choice(solution) # Picking two random jobs
if t1 != t2: # Checking if these jobs are not same
break
return t1, t2
def swap_method(x, y): # The first type of neighbourhood
copied_solution = new_solution.copy() # Copied solution which is used for experiments
copied_solution[x], copied_solution[y] = copied_solution[y], copied_solution[x] # Swapping positions of these jobs in list
neighbourhood_type = "swap"
return copied_solution, neighbourhood_type
def insertion_method(t1, y): # The second type of neighbourhood
copied_solution = new_solution.copy() # Copied solution which is used for experiments
copied_solution.remove(t1)
copied_solution.insert(y - 1, t1) # Inserting experimental solution with job 1 on the place of job 2
neighbourhood_type = "insertion"
return copied_solution, neighbourhood_type
# Temperature cooling methods
def arithmetic_cooling(x):
x = x - 1
cooling_method = "arithmetic"
return x, cooling_method
def geometric_cooling(x):
x = 0.999 * x
cooling_method = "geometric"
return x, cooling_method
def quadratic_multiplicative_cooling(x):
x = x/(1 + 0.0001*sqrt(iterations))
cooling_method = "quadratic multiplicative"
return x, cooling_method
def linear_multiplicative_cooling(x):
x = x/(1 + 0.0001*iterations)
cooling_method = "linear multiplicative"
return x, cooling_method
while round(temperature, 3) != 0.000: # Termination criteria
i = 0
while i != iterations: # Testing neighborhood - the number of generated possible solutions
new_solution = solution.copy()
t1, t2 = pick_two_towns() # Insertion mechanism - picking two random jobs
x, y = solution.index(t1), solution.index(t2) # Obtaining index of each job
new_solution, neighbourhood_type = insertion_method(t1, y) # swap method can be also used
new_score = count_score(new_solution) # Calculating new score
change_cost = new_score - score # Probability of accteptance difference betwieen energy and new energy = dE
np.double(change_cost)
if change_cost < 0: # Acceptance criterion
solution, score = new_solution.copy(), new_score
else:
if random() < exp(-change_cost/temperature): # Generating random number from interval [0, 1]
solution, score = new_solution.copy(), new_score # Accepting worse solution
i += 1
temperature, cooling_method = arithmetic_cooling(temperature) # Reducing temperature - arithmetic approach
print(f"The results of Simulated Annealing algorithm for {file_name} file")
print(f"Solution: {solution}")
print(f"Score: {round(score, 3)}")
print(f"Start temperature: {temperature_start}")
print(f"Neighbourhood type: {neighbourhood_type}")
print(f"Temperature reduction method: {cooling_method}")
print(f"End temperature: {round(temperature, 3)}")
print(f"Neighbourhood size: {iterations}")
| [
"random.choice",
"numpy.double",
"random.shuffle",
"math.sqrt",
"pandas.read_excel",
"random.random",
"math.exp"
] | [((213, 274), 'pandas.read_excel', 'pd.read_excel', (['file_name'], {'sheet_name': 'sheet', 'engine': '"""openpyxl"""'}), "(file_name, sheet_name=sheet, engine='openpyxl')\n", (226, 274), True, 'import pandas as pd\n'), ((378, 395), 'random.shuffle', 'shuffle', (['solution'], {}), '(solution)\n', (385, 395), False, 'from random import choice, random, shuffle\n'), ((629, 651), 'numpy.double', 'np.double', (['time_travel'], {}), '(time_travel)\n', (638, 651), True, 'import numpy as np\n'), ((3505, 3527), 'numpy.double', 'np.double', (['change_cost'], {}), '(change_cost)\n', (3514, 3527), True, 'import numpy as np\n'), ((1360, 1376), 'random.choice', 'choice', (['solution'], {}), '(solution)\n', (1366, 1376), False, 'from random import choice, random, shuffle\n'), ((1378, 1394), 'random.choice', 'choice', (['solution'], {}), '(solution)\n', (1384, 1394), False, 'from random import choice, random, shuffle\n'), ((2581, 2597), 'math.sqrt', 'sqrt', (['iterations'], {}), '(iterations)\n', (2585, 2597), False, 'from math import exp, sqrt\n'), ((3674, 3682), 'random.random', 'random', ([], {}), '()\n', (3680, 3682), False, 'from random import choice, random, shuffle\n'), ((3685, 3716), 'math.exp', 'exp', (['(-change_cost / temperature)'], {}), '(-change_cost / temperature)\n', (3688, 3716), False, 'from math import exp, sqrt\n')] |
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import torch.utils.data
import numpy as np
import math
import time
import os
import pickle
import random
import nmslib
import sys
from scipy.sparse import csr_matrix, lil_matrix, load_npz, hstack, vstack
from xclib.data import data_utils
from xclib.utils.sparse import normalize
import xclib.evaluation.xc_metrics as xc_metrics
from data import *
from utils import *
from network import HNSW
def predict(net, pred_batch):
"""
head shorty None means predict OvA on head
"""
net.eval()
torch.set_grad_enabled(False)
out_ans = net.forward(pred_batch, False)
out_ans = out_ans.detach().cpu().numpy()
if(pred_batch["label_ids"] is None):
return out_ans, None
return out_ans, pred_batch["label_ids"].detach().cpu().numpy()
def update_predicted(row_indices, predicted_batch_labels,
predicted_labels, remapping, top_k):
batch_size = row_indices.shape[0]
top_values, top_indices = predicted_batch_labels.topk(
k=top_k, dim=1, sorted=False)
ind = np.zeros((top_k * batch_size, 2), dtype=np.int64)
ind[:, 0] = np.repeat(row_indices, [top_k] * batch_size)
if(remapping is not None):
ind[:, 1] = [remapping[x]
for x in top_indices.cpu().numpy().flatten('C')]
else:
ind[:, 1] = [x for x in top_indices.cpu().numpy().flatten('C')]
vals = top_values.cpu().detach().numpy().flatten('C')
predicted_labels[ind[:, 0], ind[:, 1]] = vals
def update_predicted_shortlist(
row_indices, predicted_batch_labels, predicted_labels, shortlist, remapping, top_k=10):
if(len(predicted_batch_labels.shape) == 1):
predicted_batch_labels = predicted_batch_labels[None, :]
m = predicted_batch_labels.shape[0]
top_indices = np.argsort(predicted_batch_labels, axis=1)[
:, ::-1][:, :top_k]
top_values = predicted_batch_labels[np.arange(m)[:, None], top_indices]
batch_size, shortlist_size = shortlist.shape
ind = np.zeros((top_k * batch_size, 2), dtype=np.int)
ind[:, 0] = np.repeat(row_indices, [top_k] * batch_size)
if(remapping is not None):
ind[:, 1] = [remapping[x]
for x in np.ravel(shortlist[np.arange(m)[:, None], top_indices])]
else:
ind[:, 1] = [x for x in np.ravel(
shortlist[np.arange(m)[:, None], top_indices])]
predicted_labels[ind[:, 0], ind[:, 1]] = np.ravel(top_values)
def run_validation(val_predicted_labels, tst_X_Y_val,
tst_exact_remove, tst_X_Y_trn, inv_prop,dir):
data = []
indptr = [0]
indices = []
for i in range(val_predicted_labels.shape[0]):
_indices1 = val_predicted_labels.indices[val_predicted_labels.indptr[i]: val_predicted_labels.indptr[i + 1]]
_vals1 = val_predicted_labels.data[val_predicted_labels.indptr[i]: val_predicted_labels.indptr[i + 1]]
_indices, _vals = [], []
for _ind, _val in zip(_indices1, _vals1):
if (_ind not in tst_exact_remove[i]) and (
_ind not in tst_X_Y_trn.indices[tst_X_Y_trn.indptr[i]: tst_X_Y_trn.indptr[i + 1]]):
_indices.append(_ind)
_vals.append(_val)
indices += list(_indices)
data += list(_vals)
indptr.append(len(indices))
_pred = csr_matrix(
(data, indices, indptr), shape=(
val_predicted_labels.shape))
print()
# acc = xc_metrics.Metrics(tst_X_Y_val, inv_psp=inv_prop)
# acc = acc.eval(_pred, 5)
recall_lis =[]
prec_lis =[]
for num in [5,10,20,30,50,100]:
_rec = recall(tst_X_Y_val, _pred, num,dir)
recall_lis.append(_rec)
_prec = precision(tst_X_Y_val,_pred,num,dir)
prec_lis.append(_prec)
return recall_lis,prec_lis
def encode_nodes(net, context):
net.eval()
torch.set_grad_enabled(False)
embed3 = net.third_layer_enc(context["encoder"])
embed2 = net.second_layer_enc(context["encoder"]["node_feats"])
embed1 = net.first_layer_enc(
context["encoder"]["node_feats"]["node_feats"])
# embed = torch.stack((net.transform1(embed1.t()), net.transform2(embed2.t()), net.transform3(embed3.t())), dim=1)
embed = torch.stack((embed1.t(), embed2.t(), embed3.t()), dim=1)
embed = torch.mean(embed, dim=1)
return embed
def validate(head_net, params, partition_indices, label_remapping,
label_embs, tst_point_embs, tst_X_Y_val, tst_exact_remove, tst_X_Y_trn, use_graph_embs, topK,dir):
_start = params["num_trn"]
_end = _start + params["num_tst"]
if(use_graph_embs):
label_nodes = [label_remapping[i] for i in range(len(label_remapping))]
val_dataset = DatasetGraphPredictionEncode(label_nodes)
hce = GraphCollator(head_net, params["num_labels"], None, train=0)
encode_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=500,
num_workers=10,
collate_fn=hce,
shuffle=False,
pin_memory=True)
label_embs_graph = np.zeros(
(len(label_nodes), params["hidden_dims"]), dtype=np.float32)
cnt = 0
for batch in encode_loader:
# print (len(label_nodes), cnt*512)
cnt += 1
encoded = encode_nodes(head_net, batch)
encoded = encoded.detach().cpu().numpy()
label_embs_graph[batch["indices"]] = encoded
val_dataset = DatasetGraphPredictionEncode(
[i for i in range(_start, _end)])
hce = GraphCollator(head_net, params["num_labels"], None, train=0)
encode_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=500,
num_workers=10,
collate_fn=hce,
shuffle=False,
pin_memory=True)
tst_point_embs_graph = np.zeros(
(params["num_tst"], params["hidden_dims"]), dtype=np.float32)
for batch in encode_loader:
encoded = encode_nodes(head_net, batch)
encoded = encoded.detach().cpu().numpy()
tst_point_embs_graph[batch["indices"]] = encoded
label_features = label_embs_graph
tst_point_features = tst_point_embs_graph
else:
label_features = label_embs
tst_point_features = tst_point_embs[:params["num_tst"]]
prediction_shortlists = []
BATCH_SIZE = 2000000
t1 = time.time()
for i in range(len(partition_indices)):
print("building ANNS for partition = ", i)
label_NGS = HNSW(
M=100,
efC=300,
efS=params["num_shortlist"],
num_threads=24)
label_NGS.fit(
label_features[partition_indices[i][0]: partition_indices[i][1]])
print("Done in ", time.time() - t1)
t1 = time.time()
tst_label_nbrs = np.zeros(
(tst_point_features.shape[0],
params["num_shortlist"]),
dtype=np.int64)
for i in range(0, tst_point_features.shape[0], BATCH_SIZE):
print(i)
_tst_label_nbrs, _ = label_NGS.predict(
tst_point_features[i: i + BATCH_SIZE], params["num_shortlist"])
tst_label_nbrs[i: i + BATCH_SIZE] = _tst_label_nbrs
prediction_shortlists.append(tst_label_nbrs)
print("Done in ", time.time() - t1)
t1 = time.time()
if(len(partition_indices) == 1):
prediction_shortlist = prediction_shortlists[0]
else:
prediction_shortlist = np.hstack(prediction_shortlists)
print(prediction_shortlist.shape)
del(prediction_shortlists)
val_dataset = DatasetGraphPrediction(_start, _end, prediction_shortlist)
hcp = GraphCollator(head_net, params["num_labels"], None, train=0)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=512,
num_workers=10,
collate_fn=hcp,
shuffle=False,
pin_memory=True)
val_data = dict(val_labels=tst_X_Y_val[:params["num_tst"], :],
val_loader=val_loader)
val_predicted_labels = lil_matrix(val_data["val_labels"].shape)
with torch.set_grad_enabled(False):
for batch_idx, batch_data in enumerate(val_data["val_loader"]):
val_preds, val_short = predict(head_net, batch_data)
partition_length = val_short.shape[1] // len(partition_indices)
for i in range(1, len(partition_indices)):
val_short[:, i *
partition_length: (i +
1) *
partition_length] += partition_indices[i][0]
update_predicted_shortlist((batch_data["inputs"]) - _start, val_preds,
val_predicted_labels, val_short, None, topK)
acc = run_validation(val_predicted_labels.tocsr(
), val_data["val_labels"], tst_exact_remove, tst_X_Y_trn, params["inv_prop"],dir)
print("acc = {}".format(acc))
| [
"scipy.sparse.lil_matrix",
"numpy.repeat",
"numpy.hstack",
"torch.mean",
"numpy.argsort",
"numpy.zeros",
"torch.utils.data.DataLoader",
"numpy.ravel",
"torch.set_grad_enabled",
"scipy.sparse.csr_matrix",
"time.time",
"numpy.arange",
"network.HNSW"
] | [((688, 717), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (710, 717), False, 'import torch\n'), ((1209, 1258), 'numpy.zeros', 'np.zeros', (['(top_k * batch_size, 2)'], {'dtype': 'np.int64'}), '((top_k * batch_size, 2), dtype=np.int64)\n', (1217, 1258), True, 'import numpy as np\n'), ((1275, 1319), 'numpy.repeat', 'np.repeat', (['row_indices', '([top_k] * batch_size)'], {}), '(row_indices, [top_k] * batch_size)\n', (1284, 1319), True, 'import numpy as np\n'), ((2155, 2202), 'numpy.zeros', 'np.zeros', (['(top_k * batch_size, 2)'], {'dtype': 'np.int'}), '((top_k * batch_size, 2), dtype=np.int)\n', (2163, 2202), True, 'import numpy as np\n'), ((2219, 2263), 'numpy.repeat', 'np.repeat', (['row_indices', '([top_k] * batch_size)'], {}), '(row_indices, [top_k] * batch_size)\n', (2228, 2263), True, 'import numpy as np\n'), ((2575, 2595), 'numpy.ravel', 'np.ravel', (['top_values'], {}), '(top_values)\n', (2583, 2595), True, 'import numpy as np\n'), ((3472, 3541), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(data, indices, indptr)'], {'shape': 'val_predicted_labels.shape'}), '((data, indices, indptr), shape=val_predicted_labels.shape)\n', (3482, 3541), False, 'from scipy.sparse import csr_matrix, lil_matrix, load_npz, hstack, vstack\n'), ((4005, 4034), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (4027, 4034), False, 'import torch\n'), ((4448, 4472), 'torch.mean', 'torch.mean', (['embed'], {'dim': '(1)'}), '(embed, dim=1)\n', (4458, 4472), False, 'import torch\n'), ((6578, 6589), 'time.time', 'time.time', ([], {}), '()\n', (6587, 6589), False, 'import time\n'), ((7947, 8071), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': '(512)', 'num_workers': '(10)', 'collate_fn': 'hcp', 'shuffle': '(False)', 'pin_memory': '(True)'}), '(val_dataset, batch_size=512, num_workers=10,\n collate_fn=hcp, shuffle=False, pin_memory=True)\n', (7974, 8071), False, 'import torch\n'), ((8256, 8296), 'scipy.sparse.lil_matrix', 'lil_matrix', (["val_data['val_labels'].shape"], {}), "(val_data['val_labels'].shape)\n", (8266, 8296), False, 'from scipy.sparse import csr_matrix, lil_matrix, load_npz, hstack, vstack\n'), ((5010, 5134), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': '(500)', 'num_workers': '(10)', 'collate_fn': 'hce', 'shuffle': '(False)', 'pin_memory': '(True)'}), '(val_dataset, batch_size=500, num_workers=10,\n collate_fn=hce, shuffle=False, pin_memory=True)\n', (5037, 5134), False, 'import torch\n'), ((5796, 5920), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': '(500)', 'num_workers': '(10)', 'collate_fn': 'hce', 'shuffle': '(False)', 'pin_memory': '(True)'}), '(val_dataset, batch_size=500, num_workers=10,\n collate_fn=hce, shuffle=False, pin_memory=True)\n', (5823, 5920), False, 'import torch\n'), ((6022, 6092), 'numpy.zeros', 'np.zeros', (["(params['num_tst'], params['hidden_dims'])"], {'dtype': 'np.float32'}), "((params['num_tst'], params['hidden_dims']), dtype=np.float32)\n", (6030, 6092), True, 'import numpy as np\n'), ((6705, 6770), 'network.HNSW', 'HNSW', ([], {'M': '(100)', 'efC': '(300)', 'efS': "params['num_shortlist']", 'num_threads': '(24)'}), "(M=100, efC=300, efS=params['num_shortlist'], num_threads=24)\n", (6709, 6770), False, 'from network import HNSW\n'), ((6978, 6989), 'time.time', 'time.time', ([], {}), '()\n', (6987, 6989), False, 'import time\n'), ((7016, 7101), 'numpy.zeros', 'np.zeros', (["(tst_point_features.shape[0], params['num_shortlist'])"], {'dtype': 'np.int64'}), "((tst_point_features.shape[0], params['num_shortlist']), dtype=np.int64\n )\n", (7024, 7101), True, 'import numpy as np\n'), ((7531, 7542), 'time.time', 'time.time', ([], {}), '()\n', (7540, 7542), False, 'import time\n'), ((7678, 7710), 'numpy.hstack', 'np.hstack', (['prediction_shortlists'], {}), '(prediction_shortlists)\n', (7687, 7710), True, 'import numpy as np\n'), ((8307, 8336), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (8329, 8336), False, 'import torch\n'), ((1947, 1989), 'numpy.argsort', 'np.argsort', (['predicted_batch_labels'], {'axis': '(1)'}), '(predicted_batch_labels, axis=1)\n', (1957, 1989), True, 'import numpy as np\n'), ((2059, 2071), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (2068, 2071), True, 'import numpy as np\n'), ((6947, 6958), 'time.time', 'time.time', ([], {}), '()\n', (6956, 6958), False, 'import time\n'), ((7500, 7511), 'time.time', 'time.time', ([], {}), '()\n', (7509, 7511), False, 'import time\n'), ((2379, 2391), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (2388, 2391), True, 'import numpy as np\n'), ((2491, 2503), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (2500, 2503), True, 'import numpy as np\n')] |
import hypothesis.extra.numpy as hnp
import numpy as np
from hypothesis import settings
from numpy.testing import assert_allclose
from mygrad.tensor_base import Tensor
from ..custom_strategies import adv_integer_index, basic_indices
from ..wrappers.uber import backprop_test_factory, fwdprop_test_factory
def test_getitem():
x = Tensor([1, 2, 3])
a, b, c = x
f = 2 * a + 3 * b + 4 * c
f.backward()
assert a.data == 1
assert b.data == 2
assert c.data == 3
assert f.data == 20
assert_allclose(a.grad, np.array(2))
assert_allclose(b.grad, np.array(3))
assert_allclose(c.grad, np.array(4))
assert_allclose(x.grad, np.array([2, 3, 4]))
def get_item(*arrs, index, constant=False):
o = arrs[0][index]
if isinstance(o, Tensor):
o._constant = constant
return o
def basic_index_wrap(*arrs):
return basic_indices(arrs[0].shape)
def adv_index_int_wrap(*arrs):
return adv_integer_index(arrs[0].shape)
def adv_index_bool_wrap(*arrs):
return hnp.arrays(shape=arrs[0].shape, dtype=bool)
@fwdprop_test_factory(
mygrad_func=get_item,
true_func=get_item,
num_arrays=1,
index_to_arr_shapes={0: hnp.array_shapes(max_side=6, max_dims=4)},
kwargs=dict(index=basic_index_wrap),
)
def test_getitem_basicindex_fwdprop():
pass
@settings(deadline=None)
@backprop_test_factory(
mygrad_func=get_item,
true_func=get_item,
num_arrays=1,
index_to_arr_shapes={0: hnp.array_shapes(max_side=6, max_dims=4)},
kwargs=dict(index=basic_index_wrap),
vary_each_element=True,
)
def test_getitem_basicindex_bkwdprop():
pass
@fwdprop_test_factory(
mygrad_func=get_item,
true_func=get_item,
num_arrays=1,
index_to_arr_shapes={0: hnp.array_shapes(max_side=6, max_dims=4)},
kwargs=dict(index=adv_index_int_wrap),
)
def test_getitem_advindex_int_fwdprop():
pass
@settings(deadline=None)
@backprop_test_factory(
mygrad_func=get_item,
true_func=get_item,
num_arrays=1,
index_to_arr_shapes={0: hnp.array_shapes(max_side=6, max_dims=4)},
kwargs=dict(index=adv_index_int_wrap),
vary_each_element=True,
)
def test_getitem_advindex_int_bkwdprop():
pass
@fwdprop_test_factory(
mygrad_func=get_item,
true_func=get_item,
num_arrays=1,
index_to_arr_shapes={0: hnp.array_shapes(max_side=6, max_dims=4)},
kwargs=dict(index=adv_index_bool_wrap),
)
def test_getitem_advindex_bool_fwdprop():
pass
@settings(deadline=None)
@backprop_test_factory(
mygrad_func=get_item,
true_func=get_item,
num_arrays=1,
index_to_arr_shapes={0: hnp.array_shapes(max_side=6, max_dims=4)},
kwargs=dict(index=adv_index_bool_wrap),
vary_each_element=True,
)
def test_getitem_advindex_bool_bkwdprop():
pass
# test broadcast-compatible int-arrays
rows = np.array([0, 3], dtype=np.intp)
columns = np.array([0, 2], dtype=np.intp)
@fwdprop_test_factory(
mygrad_func=get_item,
true_func=get_item,
num_arrays=1,
index_to_arr_shapes={0: (4, 3)},
kwargs=dict(index=np.ix_(rows, columns)),
)
def test_getitem_broadcast_index_fwdprop():
pass
@settings(deadline=None)
@backprop_test_factory(
mygrad_func=get_item,
true_func=get_item,
num_arrays=1,
index_to_arr_shapes={0: (4, 3)},
kwargs=dict(index=np.ix_(rows, columns)),
vary_each_element=True,
)
def test_getitem_broadcast_index_bkprop():
pass
@fwdprop_test_factory(
mygrad_func=get_item,
true_func=get_item,
num_arrays=1,
index_to_arr_shapes={0: (3, 2, 4, 3)},
kwargs=dict(index=(Ellipsis, 2, 0)),
)
def test_getitem_ellipsis_index_fwdprop():
pass
@settings(deadline=None)
@backprop_test_factory(
mygrad_func=get_item,
true_func=get_item,
num_arrays=1,
index_to_arr_shapes={0: (3, 2, 4, 3)},
kwargs=dict(index=(Ellipsis, 2, 0)),
vary_each_element=True,
)
def test_getitem_ellipsis_index_bkprop():
pass
rows1 = np.array([False, True, False, True])
columns1 = [0, 2]
@fwdprop_test_factory(
mygrad_func=get_item,
true_func=get_item,
num_arrays=1,
index_to_arr_shapes={0: (4, 3)},
kwargs=dict(index=np.ix_(rows1, columns1)),
)
def test_getitem_bool_int_fwdprop():
pass
@settings(deadline=None)
@backprop_test_factory(
mygrad_func=get_item,
true_func=get_item,
num_arrays=1,
index_to_arr_shapes={0: (4, 3)},
kwargs=dict(index=np.ix_(rows1, columns1)),
vary_each_element=True,
)
def test_getitem_bool_int_bkprop():
pass
@fwdprop_test_factory(
mygrad_func=get_item,
true_func=get_item,
num_arrays=1,
index_to_arr_shapes={0: (4, 3)},
kwargs=dict(index=(slice(1, 2), [1, 2])),
)
def test_getitem_basic_w_adv_fwdprop():
pass
@settings(deadline=None)
@backprop_test_factory(
mygrad_func=get_item,
true_func=get_item,
num_arrays=1,
index_to_arr_shapes={0: (4, 3)},
kwargs=dict(index=(slice(1, 2), [1, 2])),
vary_each_element=True,
)
def test_getitem_basic_w_adv_bkprop():
pass
| [
"mygrad.tensor_base.Tensor",
"hypothesis.extra.numpy.arrays",
"numpy.ix_",
"numpy.array",
"hypothesis.settings",
"hypothesis.extra.numpy.array_shapes"
] | [((1323, 1346), 'hypothesis.settings', 'settings', ([], {'deadline': 'None'}), '(deadline=None)\n', (1331, 1346), False, 'from hypothesis import settings\n'), ((1892, 1915), 'hypothesis.settings', 'settings', ([], {'deadline': 'None'}), '(deadline=None)\n', (1900, 1915), False, 'from hypothesis import settings\n'), ((2467, 2490), 'hypothesis.settings', 'settings', ([], {'deadline': 'None'}), '(deadline=None)\n', (2475, 2490), False, 'from hypothesis import settings\n'), ((2828, 2859), 'numpy.array', 'np.array', (['[0, 3]'], {'dtype': 'np.intp'}), '([0, 3], dtype=np.intp)\n', (2836, 2859), True, 'import numpy as np\n'), ((2870, 2901), 'numpy.array', 'np.array', (['[0, 2]'], {'dtype': 'np.intp'}), '([0, 2], dtype=np.intp)\n', (2878, 2901), True, 'import numpy as np\n'), ((3136, 3159), 'hypothesis.settings', 'settings', ([], {'deadline': 'None'}), '(deadline=None)\n', (3144, 3159), False, 'from hypothesis import settings\n'), ((3651, 3674), 'hypothesis.settings', 'settings', ([], {'deadline': 'None'}), '(deadline=None)\n', (3659, 3674), False, 'from hypothesis import settings\n'), ((3942, 3978), 'numpy.array', 'np.array', (['[False, True, False, True]'], {}), '([False, True, False, True])\n', (3950, 3978), True, 'import numpy as np\n'), ((4226, 4249), 'hypothesis.settings', 'settings', ([], {'deadline': 'None'}), '(deadline=None)\n', (4234, 4249), False, 'from hypothesis import settings\n'), ((4732, 4755), 'hypothesis.settings', 'settings', ([], {'deadline': 'None'}), '(deadline=None)\n', (4740, 4755), False, 'from hypothesis import settings\n'), ((337, 354), 'mygrad.tensor_base.Tensor', 'Tensor', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (343, 354), False, 'from mygrad.tensor_base import Tensor\n'), ((1021, 1064), 'hypothesis.extra.numpy.arrays', 'hnp.arrays', ([], {'shape': 'arrs[0].shape', 'dtype': 'bool'}), '(shape=arrs[0].shape, dtype=bool)\n', (1031, 1064), True, 'import hypothesis.extra.numpy as hnp\n'), ((541, 552), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (549, 552), True, 'import numpy as np\n'), ((582, 593), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (590, 593), True, 'import numpy as np\n'), ((623, 634), 'numpy.array', 'np.array', (['(4)'], {}), '(4)\n', (631, 634), True, 'import numpy as np\n'), ((664, 683), 'numpy.array', 'np.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (672, 683), True, 'import numpy as np\n'), ((1186, 1226), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'max_side': '(6)', 'max_dims': '(4)'}), '(max_side=6, max_dims=4)\n', (1202, 1226), True, 'import hypothesis.extra.numpy as hnp\n'), ((1467, 1507), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'max_side': '(6)', 'max_dims': '(4)'}), '(max_side=6, max_dims=4)\n', (1483, 1507), True, 'import hypothesis.extra.numpy as hnp\n'), ((1751, 1791), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'max_side': '(6)', 'max_dims': '(4)'}), '(max_side=6, max_dims=4)\n', (1767, 1791), True, 'import hypothesis.extra.numpy as hnp\n'), ((2036, 2076), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'max_side': '(6)', 'max_dims': '(4)'}), '(max_side=6, max_dims=4)\n', (2052, 2076), True, 'import hypothesis.extra.numpy as hnp\n'), ((2324, 2364), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'max_side': '(6)', 'max_dims': '(4)'}), '(max_side=6, max_dims=4)\n', (2340, 2364), True, 'import hypothesis.extra.numpy as hnp\n'), ((2611, 2651), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'max_side': '(6)', 'max_dims': '(4)'}), '(max_side=6, max_dims=4)\n', (2627, 2651), True, 'import hypothesis.extra.numpy as hnp\n'), ((3054, 3075), 'numpy.ix_', 'np.ix_', (['rows', 'columns'], {}), '(rows, columns)\n', (3060, 3075), True, 'import numpy as np\n'), ((3311, 3332), 'numpy.ix_', 'np.ix_', (['rows', 'columns'], {}), '(rows, columns)\n', (3317, 3332), True, 'import numpy as np\n'), ((4149, 4172), 'numpy.ix_', 'np.ix_', (['rows1', 'columns1'], {}), '(rows1, columns1)\n', (4155, 4172), True, 'import numpy as np\n'), ((4401, 4424), 'numpy.ix_', 'np.ix_', (['rows1', 'columns1'], {}), '(rows1, columns1)\n', (4407, 4424), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
from __future__ import division
import numpy
import sympy
from ..helpers import untangle
class WissmannBecker(object):
"""
<NAME> and <NAME>,
Partially Symmetric Cubature Formulas for Even Degrees of Exactness,
SIAM J. Numer. Anal., 23(3), 676–685, 10 pages,
<https://doi.org/10.1137/0723043>.
"""
def __init__(self, index, symbolic=False):
frac = sympy.Rational if symbolic else lambda x, y: x / y
self.name = "WB({})".format(index)
if index == "4-1":
self.degree = 4
data = [
(frac(8, 7), _z(0)),
(0.439560439560440, _z(0.966091783079296)),
(0.566072207007532, _m(0.851914653304601, 0.455603727836193)),
(0.642719001783677, _m(0.630912788976754, -0.731629951573135)),
]
elif index == "4-2":
self.degree = 4
data = [
(1.286412084888852, _z(-0.356822089773090)),
(0.491365692888926, _z(0.934172358962716)),
(0.761883709085613, _m(0.774596669241483, 0.390885162530071)),
(0.349227402025498, _m(0.774596669241483, -0.852765377881771)),
]
elif index == "6-1":
self.degree = 6
data = [
(0.455343245714174, _z(0.836405633697626)),
(0.827395973202966, _z(-0.357460165391307)),
(0.144000884599645, _m(0.888764014654765, 0.872101531193131)),
(0.668259104262665, _m(0.604857639464685, 0.305985162155427)),
(0.225474004890679, _m(0.955447506641064, -0.410270899466658)),
(0.320896396788441, _m(0.565459993438754, -0.872869311156879)),
]
elif index == "6-2":
self.degree = 6
data = [
(0.392750590964348, _z(0.869833375250059)),
(0.754762881242610, _z(-0.479406351612111)),
(0.206166050588279, _m(0.863742826346154, 0.802837516207657)),
(0.689992138489864, _m(0.518690521392582, 0.262143665508058)),
(0.260517488732317, _m(0.933972544972849, -0.363096583148066)),
(0.269567586086061, _m(0.608977536016356, -0.896608632762453)),
]
elif index == "8-1":
self.degree = 8
data = [
(0.055364705621440, _z(0)),
(0.404389368726076, _z(0.757629177660505)),
(0.533546604952635, _z(-0.236871842255702)),
(0.117054188786739, _z(-0.989717929044527)),
(0.125614417613747, _m(0.639091304900370, 0.950520955645667)),
(0.136544584733588, _m(0.937069076924990, 0.663882736885633)),
(0.483408479211257, _m(0.537083530541494, 0.304210681724104)),
(0.252528506429544, _m(0.887188506449625, -0.236496718536120)),
(0.361262323882172, _m(0.494698820670197, -0.698953476086564)),
(0.085464254086247, _m(0.897495818279768, -0.900390774211580)),
]
else:
assert index == "8-2"
self.degree = 8
data = [
(0.450276776305590, _z(0.659560131960342)),
(0.166570426777813, _z(-0.949142923043125)),
(0.098869459933431, _m(0.952509466071562, 0.765051819557684)),
(0.153696747140812, _m(0.532327454074206, 0.936975981088416)),
(0.396686976072903, _m(0.684736297951735, 0.333656717735747)),
(0.352014367945695, _m(0.233143240801405, -0.079583272377397)),
(0.189589054577798, _m(0.927683319306117, -0.272240080612534)),
(0.375101001147587, _m(0.453120687403749, -0.613735353398028)),
(0.125618791640072, _m(0.837503640422812, -0.888477650535971)),
]
self.points, self.weights = untangle(data)
return
def _z(a):
return numpy.array([[0, a]])
def _m(a, b):
return numpy.array([[+a, +b], [-a, +b]])
| [
"numpy.array"
] | [((3965, 3986), 'numpy.array', 'numpy.array', (['[[0, a]]'], {}), '([[0, a]])\n', (3976, 3986), False, 'import numpy\n'), ((4014, 4047), 'numpy.array', 'numpy.array', (['[[+a, +b], [-a, +b]]'], {}), '([[+a, +b], [-a, +b]])\n', (4025, 4047), False, 'import numpy\n')] |
from keras.layers import Conv2D, Input,MaxPool2D, Reshape,Activation,Flatten, Dense
from keras.models import Model, Sequential
from keras.layers.advanced_activations import PReLU
from keras.optimizers import adam
from keras.utils import to_categorical
import matplotlib.pyplot as plt
import numpy as np
import keras.backend as K
import _pickle as pickle
import random
from keras.activations import relu
from keras.losses import mean_squared_error
import tensorflow as tf
import gc
import keras
import os
import msgpack
import msgpack_numpy as m
import tables
import sys
sys.path.append('/home/wk/e/mtcnn/keras-mtcnn/')
from MTCNNx import masked_cls, masked_bbox, masked_landmark, combine_cls_bbox_landmark
S=48
cache_file = 'cache{}w.h5'.format(S)
if os.path.exists(cache_file):
h5 = tables.open_file(cache_file, mode='r')
ims_all = h5.root.ims.read()
labels_all = h5.root.labels.read()
h5.close()
else:
with open(r'../48netw/48/pts.imdb', 'rb') as fid:
pts = pickle.load(fid)
with open(r'../48netw/48/cls.imdb','rb') as fid:
cls = pickle.load(fid)
with open(r'../48netw/48/roi.imdb', 'rb') as fid:
roi = pickle.load(fid)
ims_cls = []
ims_pts = []
ims_roi = []
cls_score = []
pts_score = []
roi_score = []
for (idx, dataset) in enumerate(cls) :
ims_cls.append( np.swapaxes(dataset[0],0,2))
cls_score.append(dataset[1])
for (idx,dataset) in enumerate(roi) :
ims_roi.append( np.swapaxes(dataset[0],0,2))
roi_score.append(dataset[2])
for (idx,dataset) in enumerate(pts) :
ims_pts.append( np.swapaxes(dataset[0],0,2))
pts_score.append(dataset[3])
ims_cls = np.array(ims_cls)
ims_pts = np.array(ims_pts)
ims_roi = np.array(ims_roi)
cls_score = np.array(cls_score)
pts_score = np.array(pts_score)
roi_score = np.array(roi_score)
one_hot_labels = to_categorical(cls_score, num_classes=2)
gc.collect()
ims_all,labels_all = combine_cls_bbox_landmark(ims_cls, one_hot_labels, ims_roi, roi_score, ims_pts, pts_score)
del ims_cls, one_hot_labels, ims_roi, roi_score, ims_pts, pts_score
h5 = tables.open_file(cache_file, mode='w', title='All')
h5.create_array(h5.root, 'ims', ims_all)
h5.create_array(h5.root, 'labels', labels_all)
h5.close()
from MTCNNx import create_Kao_Onet
model = create_Kao_Onet(r'model48.h5')
lr = 0.05
batch_size = 1024*10
for i_train in range(10):
print('round ', i_train)
lr = lr * 0.5
my_adam = adam(lr = lr)
loss_list = {
'cls':masked_cls,
'bbox':masked_bbox,
'landmark':masked_landmark
}
loss_weights_list = {
'cls': 0.5,
'bbox': 0.5,
'landmark': 0.0
}
metrics_list = {
'cls':'accuracy',
'bbox': masked_bbox,
'landmark':masked_landmark
}
#parallel_model = keras.utils.multi_gpu_model(model, gpus=2)
#parallel_model.compile(loss=loss_list, optimizer = my_adam, loss_weights=loss_weights_list, metrics=metrics_list)
#parallel_model.fit([ims_all], [labels_all, labels_all, labels_all], batch_size=batch_size, epochs=1)
model.compile(loss=loss_list, optimizer = my_adam, loss_weights=loss_weights_list, metrics=metrics_list)
history = model.fit([ims_all], [labels_all, labels_all, labels_all], batch_size=batch_size, epochs=2)
#print(history.history[''])
model.save_weights('model48.h5')
print('model saved')
| [
"os.path.exists",
"MTCNNx.create_Kao_Onet",
"MTCNNx.combine_cls_bbox_landmark",
"tables.open_file",
"_pickle.load",
"keras.utils.to_categorical",
"numpy.swapaxes",
"numpy.array",
"gc.collect",
"sys.path.append",
"keras.optimizers.adam"
] | [((571, 619), 'sys.path.append', 'sys.path.append', (['"""/home/wk/e/mtcnn/keras-mtcnn/"""'], {}), "('/home/wk/e/mtcnn/keras-mtcnn/')\n", (586, 619), False, 'import sys\n'), ((753, 779), 'os.path.exists', 'os.path.exists', (['cache_file'], {}), '(cache_file)\n', (767, 779), False, 'import os\n'), ((2377, 2406), 'MTCNNx.create_Kao_Onet', 'create_Kao_Onet', (['"""model48.h5"""'], {}), "('model48.h5')\n", (2392, 2406), False, 'from MTCNNx import create_Kao_Onet\n'), ((790, 828), 'tables.open_file', 'tables.open_file', (['cache_file'], {'mode': '"""r"""'}), "(cache_file, mode='r')\n", (806, 828), False, 'import tables\n'), ((1701, 1718), 'numpy.array', 'np.array', (['ims_cls'], {}), '(ims_cls)\n', (1709, 1718), True, 'import numpy as np\n'), ((1733, 1750), 'numpy.array', 'np.array', (['ims_pts'], {}), '(ims_pts)\n', (1741, 1750), True, 'import numpy as np\n'), ((1765, 1782), 'numpy.array', 'np.array', (['ims_roi'], {}), '(ims_roi)\n', (1773, 1782), True, 'import numpy as np\n'), ((1799, 1818), 'numpy.array', 'np.array', (['cls_score'], {}), '(cls_score)\n', (1807, 1818), True, 'import numpy as np\n'), ((1835, 1854), 'numpy.array', 'np.array', (['pts_score'], {}), '(pts_score)\n', (1843, 1854), True, 'import numpy as np\n'), ((1871, 1890), 'numpy.array', 'np.array', (['roi_score'], {}), '(roi_score)\n', (1879, 1890), True, 'import numpy as np\n'), ((1912, 1952), 'keras.utils.to_categorical', 'to_categorical', (['cls_score'], {'num_classes': '(2)'}), '(cls_score, num_classes=2)\n', (1926, 1952), False, 'from keras.utils import to_categorical\n'), ((1957, 1969), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1967, 1969), False, 'import gc\n'), ((1996, 2090), 'MTCNNx.combine_cls_bbox_landmark', 'combine_cls_bbox_landmark', (['ims_cls', 'one_hot_labels', 'ims_roi', 'roi_score', 'ims_pts', 'pts_score'], {}), '(ims_cls, one_hot_labels, ims_roi, roi_score,\n ims_pts, pts_score)\n', (2021, 2090), False, 'from MTCNNx import masked_cls, masked_bbox, masked_landmark, combine_cls_bbox_landmark\n'), ((2169, 2220), 'tables.open_file', 'tables.open_file', (['cache_file'], {'mode': '"""w"""', 'title': '"""All"""'}), "(cache_file, mode='w', title='All')\n", (2185, 2220), False, 'import tables\n'), ((2527, 2538), 'keras.optimizers.adam', 'adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (2531, 2538), False, 'from keras.optimizers import adam\n'), ((995, 1011), '_pickle.load', 'pickle.load', (['fid'], {}), '(fid)\n', (1006, 1011), True, 'import _pickle as pickle\n'), ((1079, 1095), '_pickle.load', 'pickle.load', (['fid'], {}), '(fid)\n', (1090, 1095), True, 'import _pickle as pickle\n'), ((1164, 1180), '_pickle.load', 'pickle.load', (['fid'], {}), '(fid)\n', (1175, 1180), True, 'import _pickle as pickle\n'), ((1356, 1385), 'numpy.swapaxes', 'np.swapaxes', (['dataset[0]', '(0)', '(2)'], {}), '(dataset[0], 0, 2)\n', (1367, 1385), True, 'import numpy as np\n'), ((1488, 1517), 'numpy.swapaxes', 'np.swapaxes', (['dataset[0]', '(0)', '(2)'], {}), '(dataset[0], 0, 2)\n', (1499, 1517), True, 'import numpy as np\n'), ((1620, 1649), 'numpy.swapaxes', 'np.swapaxes', (['dataset[0]', '(0)', '(2)'], {}), '(dataset[0], 0, 2)\n', (1631, 1649), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import json
import threading
import io
import numpy as np
import mlflow
from flask import send_file
from PIL import Image
from queue import Queue
from backwardcompatibilityml.helpers import training
from backwardcompatibilityml.metrics import model_accuracy
class SweepManager(object):
"""
The SweepManager class is used to manage an experiment that performs
training / updating a model h2, with respect to a reference model h1
in a way that preserves compatibility between the models. The experiment
performs a sweep of the parameter space of the regularization parameter
lambda_c, by performing compatibility trainings for small increments
in the value of lambda_c for some settable step size.
The sweep manager can run the sweep experiment either synchronously,
or within a separate thread. In the latter case, it provides some
helper functions that allow you to check on the percentage of the
sweep that is complete.
Args:
folder_name: A string value representing the full path of the
folder wehre the result of the compatibility sweep is to be stored.
number_of_epochs: The number of training epochs to use on each sweep.
h1: The reference model being used.
h2: The new model being traind / updated.
training_set: The list of training samples as (batch_ids, input, target).
test_set: The list of testing samples as (batch_ids, input, target).
batch_size_train: An integer representing batch size of the training set.
batch_size_test: An integer representing the batch size of the test set.
OptimizerClass: The class to instantiate an optimizer from for training.
optimizer_kwargs: A dictionary of the keyword arguments to be used to
instantiate the optimizer.
NewErrorLossClass: The class of the New Error style loss function to
be instantiated and used to perform compatibility constrained
training of our model h2.
StrictImitationLossClass: The class of the Strict Imitation style loss
function to be instantiated and used to perform compatibility
constrained training of our model h2.
performance_metric: Optional performance metric to be used when evaluating the model.
If not specified then accuracy is used.
lambda_c_stepsize: The increments of lambda_c to use as we sweep the parameter
space between 0.0 and 1.0.
get_instance_image_by_id: A function that returns an image representation of the data corresponding to the instance id, in PNG format. It should be a function of the form:
get_instance_image_by_id(instance_id)
instance_id: An integer instance id
And should return a PNG image.
get_instance_metadata: A function that returns a text string representation of some metadata corresponding to the instance id. It should be a function of the form:
get_instance_metadata(instance_id)
instance_id: An integer instance id
And should return a string.
device: A string with values either "cpu" or "cuda" to indicate the
device that Pytorch is performing training on. By default this
value is "cpu". But in case your models reside on the GPU, make sure
to set this to "cuda". This makes sure that the input and target
tensors are transferred to the GPU during training.
use_ml_flow: A boolean flag controlling whether or not to log the sweep
with MLFlow. If true, an MLFlow run will be created with the name
specified by ml_flow_run_name.
ml_flow_run_name: A string that configures the name of the MLFlow run.
"""
def __init__(self, folder_name, number_of_epochs, h1, h2, training_set, test_set,
batch_size_train, batch_size_test,
OptimizerClass, optimizer_kwargs,
NewErrorLossClass, StrictImitationLossClass, lambda_c_stepsize=0.25,
new_error_loss_kwargs=None,
strict_imitation_loss_kwargs=None,
performance_metric=model_accuracy,
get_instance_image_by_id=None,
get_instance_metadata=None,
device="cpu",
use_ml_flow=False,
ml_flow_run_name="compatibility_sweep"):
self.folder_name = folder_name
self.number_of_epochs = number_of_epochs
self.h1 = h1
self.h2 = h2
self.training_set = training_set
self.test_set = test_set
self.batch_size_train = batch_size_train
self.batch_size_test = batch_size_test
self.OptimizerClass = OptimizerClass
self.optimizer_kwargs = optimizer_kwargs
self.NewErrorLossClass = NewErrorLossClass
self.StrictImitationLossClass = StrictImitationLossClass
self.performance_metric = performance_metric
self.lambda_c_stepsize = lambda_c_stepsize
self.new_error_loss_kwargs = new_error_loss_kwargs
self.strict_imitation_loss_kwargs = strict_imitation_loss_kwargs
self.get_instance_image_by_id = get_instance_image_by_id
self.get_instance_metadata = get_instance_metadata
self.device = device
self.use_ml_flow = use_ml_flow
self.ml_flow_run_name = ml_flow_run_name
self.last_sweep_status = 0.0
self.percent_complete_queue = Queue()
self.sweep_thread = None
def start_sweep(self):
if self.is_running():
return
self.percent_complete_queue = Queue()
self.last_sweep_status = 0.0
self.sweep_thread = threading.Thread(
target=training.compatibility_sweep,
args=(self.folder_name, self.number_of_epochs, self.h1, self.h2,
self.training_set, self.test_set,
self.batch_size_train, self.batch_size_test,
self.OptimizerClass, self.optimizer_kwargs,
self.NewErrorLossClass, self.StrictImitationLossClass,
self.performance_metric,),
kwargs={
"lambda_c_stepsize": self.lambda_c_stepsize,
"percent_complete_queue": self.percent_complete_queue,
"new_error_loss_kwargs": self.new_error_loss_kwargs,
"strict_imitation_loss_kwargs": self.strict_imitation_loss_kwargs,
"get_instance_metadata": self.get_instance_metadata,
"device": self.device,
"use_ml_flow": self.use_ml_flow,
"ml_flow_run_name": self.ml_flow_run_name
})
self.sweep_thread.start()
def is_running(self):
if not self.sweep_thread:
return False
return self.sweep_thread.is_alive()
def start_sweep_synchronous(self):
training.compatibility_sweep(
self.folder_name, self.number_of_epochs, self.h1, self.h2, self.training_set, self.test_set,
self.batch_size_train, self.batch_size_test,
self.OptimizerClass, self.optimizer_kwargs,
self.NewErrorLossClass, self.StrictImitationLossClass,
lambda_c_stepsize=self.lambda_c_stepsize, percent_complete_queue=self.percent_complete_queue,
new_error_loss_kwargs=self.new_error_loss_kwargs,
strict_imitation_loss_kwargs=self.strict_imitation_loss_kwargs,
get_instance_metadata=self.get_instance_metadata,
device=self.device)
def get_sweep_status(self):
if not self.percent_complete_queue.empty():
while not self.percent_complete_queue.empty():
self.last_sweep_status = self.percent_complete_queue.get()
return self.last_sweep_status
def get_sweep_summary(self):
sweep_summary = {
"h1_performance": None,
"performance_metric": self.performance_metric.__name__,
"data": []
}
if os.path.exists(f"{self.folder_name}/sweep_summary.json"):
with open(f"{self.folder_name}/sweep_summary.json", "r") as sweep_summary_file:
loaded_sweep_summary = json.loads(sweep_summary_file.read())
sweep_summary.update(loaded_sweep_summary)
return sweep_summary
def get_evaluation(self, evaluation_id):
with open(f"{self.folder_name}/{evaluation_id}-evaluation-data.json", "r") as evaluation_data_file:
evaluation_data = json.loads(evaluation_data_file.read())
return evaluation_data
def get_instance_image(self, instance_id):
get_instance_image_by_id = self.get_instance_image_by_id
if get_instance_image_by_id is not None:
return get_instance_image_by_id(instance_id)
# Generate a blank white PNG image as the default
data = np.uint8(np.zeros((30, 30)) + 255)
image = Image.fromarray(data)
img_bytes = io.BytesIO()
image.save(img_bytes, format="PNG")
img_bytes.seek(0)
return send_file(img_bytes, mimetype="image/png")
| [
"os.path.exists",
"PIL.Image.fromarray",
"io.BytesIO",
"numpy.zeros",
"flask.send_file",
"threading.Thread",
"queue.Queue",
"backwardcompatibilityml.helpers.training.compatibility_sweep"
] | [((5589, 5596), 'queue.Queue', 'Queue', ([], {}), '()\n', (5594, 5596), False, 'from queue import Queue\n'), ((5745, 5752), 'queue.Queue', 'Queue', ([], {}), '()\n', (5750, 5752), False, 'from queue import Queue\n'), ((5818, 6569), 'threading.Thread', 'threading.Thread', ([], {'target': 'training.compatibility_sweep', 'args': '(self.folder_name, self.number_of_epochs, self.h1, self.h2, self.\n training_set, self.test_set, self.batch_size_train, self.\n batch_size_test, self.OptimizerClass, self.optimizer_kwargs, self.\n NewErrorLossClass, self.StrictImitationLossClass, self.performance_metric)', 'kwargs': "{'lambda_c_stepsize': self.lambda_c_stepsize, 'percent_complete_queue':\n self.percent_complete_queue, 'new_error_loss_kwargs': self.\n new_error_loss_kwargs, 'strict_imitation_loss_kwargs': self.\n strict_imitation_loss_kwargs, 'get_instance_metadata': self.\n get_instance_metadata, 'device': self.device, 'use_ml_flow': self.\n use_ml_flow, 'ml_flow_run_name': self.ml_flow_run_name}"}), "(target=training.compatibility_sweep, args=(self.\n folder_name, self.number_of_epochs, self.h1, self.h2, self.training_set,\n self.test_set, self.batch_size_train, self.batch_size_test, self.\n OptimizerClass, self.optimizer_kwargs, self.NewErrorLossClass, self.\n StrictImitationLossClass, self.performance_metric), kwargs={\n 'lambda_c_stepsize': self.lambda_c_stepsize, 'percent_complete_queue':\n self.percent_complete_queue, 'new_error_loss_kwargs': self.\n new_error_loss_kwargs, 'strict_imitation_loss_kwargs': self.\n strict_imitation_loss_kwargs, 'get_instance_metadata': self.\n get_instance_metadata, 'device': self.device, 'use_ml_flow': self.\n use_ml_flow, 'ml_flow_run_name': self.ml_flow_run_name})\n", (5834, 6569), False, 'import threading\n'), ((6994, 7576), 'backwardcompatibilityml.helpers.training.compatibility_sweep', 'training.compatibility_sweep', (['self.folder_name', 'self.number_of_epochs', 'self.h1', 'self.h2', 'self.training_set', 'self.test_set', 'self.batch_size_train', 'self.batch_size_test', 'self.OptimizerClass', 'self.optimizer_kwargs', 'self.NewErrorLossClass', 'self.StrictImitationLossClass'], {'lambda_c_stepsize': 'self.lambda_c_stepsize', 'percent_complete_queue': 'self.percent_complete_queue', 'new_error_loss_kwargs': 'self.new_error_loss_kwargs', 'strict_imitation_loss_kwargs': 'self.strict_imitation_loss_kwargs', 'get_instance_metadata': 'self.get_instance_metadata', 'device': 'self.device'}), '(self.folder_name, self.number_of_epochs, self.\n h1, self.h2, self.training_set, self.test_set, self.batch_size_train,\n self.batch_size_test, self.OptimizerClass, self.optimizer_kwargs, self.\n NewErrorLossClass, self.StrictImitationLossClass, lambda_c_stepsize=\n self.lambda_c_stepsize, percent_complete_queue=self.\n percent_complete_queue, new_error_loss_kwargs=self.\n new_error_loss_kwargs, strict_imitation_loss_kwargs=self.\n strict_imitation_loss_kwargs, get_instance_metadata=self.\n get_instance_metadata, device=self.device)\n', (7022, 7576), False, 'from backwardcompatibilityml.helpers import training\n'), ((8114, 8170), 'os.path.exists', 'os.path.exists', (['f"""{self.folder_name}/sweep_summary.json"""'], {}), "(f'{self.folder_name}/sweep_summary.json')\n", (8128, 8170), False, 'import os\n'), ((9030, 9051), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (9045, 9051), False, 'from PIL import Image\n'), ((9072, 9084), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (9082, 9084), False, 'import io\n'), ((9171, 9213), 'flask.send_file', 'send_file', (['img_bytes'], {'mimetype': '"""image/png"""'}), "(img_bytes, mimetype='image/png')\n", (9180, 9213), False, 'from flask import send_file\n'), ((8988, 9006), 'numpy.zeros', 'np.zeros', (['(30, 30)'], {}), '((30, 30))\n', (8996, 9006), True, 'import numpy as np\n')] |
import numpy as np
from noduleCADEvaluationLUNA16 import noduleCADEvaluation
import os
import csv
from multiprocessing import Pool
import functools
import SimpleITK as sitk
from config_testing import config
from layers import nms
annotations_filename = './labels/new_nodule.csv'
annotations_excluded_filename = './labels/new_non_nodule.csv'# path for excluded annotations for the fold
seriesuids_filename = './labels/LIDCTestID.csv'# path for seriesuid for the fold
datapath = config['LIDC_data']
sideinfopath = '/data/LunaProj/LIDC/processed/'
nmsthresh = 0.1
bboxpath = './test_results/baseline_se_focal_newparam/bbox/' #for baseline
frocpath = './test_results/baseline_se_focal_newparam/bbox/nms' + str(nmsthresh) + '/' #_focal
outputdir = './bboxoutput/se_focal/nms' + str(nmsthresh) + '/'
#detp = [0.3, 0.4, 0.5, 0.6, 0.7]
detp = [0.3]
nprocess = 38#4
firstline = ['seriesuid', 'coordX', 'coordY', 'coordZ', 'probability']
def VoxelToWorldCoord(voxelCoord, origin, spacing):
strechedVocelCoord = voxelCoord * spacing
worldCoord = strechedVocelCoord + origin
return worldCoord
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def convertcsv(bboxfname, bboxpath, detp):
resolution = np.array([1, 1, 1])
origin = np.load(sideinfopath+bboxfname[:-8]+'_origin.npy', mmap_mode='r')
spacing = np.load(sideinfopath+bboxfname[:-8]+'_spacing.npy', mmap_mode='r')
extendbox = np.load(sideinfopath+bboxfname[:-8]+'_extendbox.npy', mmap_mode='r')
pbb = np.load(bboxpath+bboxfname, mmap_mode='r')
diam = pbb[:,-1]
check = sigmoid(pbb[:,0]) > detp
pbbold = np.array(pbb[check])
# pbbold = np.array(pbb[pbb[:,0] > detp])
pbbold = np.array(pbbold[pbbold[:,-1] > 3]) # add new 9 15
pbb = nms(pbbold, nmsthresh)
pbb = np.array(pbb[:, :-1])
pbb[:, 1:] = np.array(pbb[:, 1:] + np.expand_dims(extendbox[:,0], 1).T)
pbb[:, 1:] = np.array(pbb[:, 1:] * np.expand_dims(resolution, 1).T / np.expand_dims(spacing, 1).T)
pos = VoxelToWorldCoord(pbb[:, 1:], origin, spacing)
rowlist = []
for nk in range(pos.shape[0]): # pos[nk, 2], pos[nk, 1], pos[nk, 0]
rowlist.append([bboxfname[:-8], pos[nk, 2], pos[nk, 1], pos[nk, 0], diam[nk], 1/(1+np.exp(-pbb[nk,0]))])
return rowlist
def getfrocvalue(results_filename, outputdir):
return noduleCADEvaluation(annotations_filename,annotations_excluded_filename,seriesuids_filename,results_filename,outputdir)
def getcsv(detp):
if not os.path.exists(frocpath):
os.makedirs(frocpath)
for detpthresh in detp:
print ('detp', detpthresh)
f = open(frocpath + 'predanno'+ str(detpthresh) + '.csv', 'w')
fwriter = csv.writer(f)
fwriter.writerow(firstline)
fnamelist = []
for fname in os.listdir(bboxpath):
if fname.endswith('_pbb.npy'):
fnamelist.append(fname)
# print fname
# for row in convertcsv(fname, bboxpath, k):
# fwriter.writerow(row)
# # return
print(len(fnamelist))
predannolist = p.map(functools.partial(convertcsv, bboxpath=bboxpath, detp=detpthresh), fnamelist)
# print len(predannolist), len(predannolist[0])
for predanno in predannolist:
# print predanno
for row in predanno:
# print row
fwriter.writerow(row)
f.close()
def getfroc(detp):
predannofnamalist = []
outputdirlist = []
for detpthresh in detp:
predannofnamalist.append(outputdir + 'predanno'+ str(detpthresh) + '.csv')
outputpath = outputdir + 'predanno'+ str(detpthresh) +'/'
outputdirlist.append(outputpath)
if not os.path.exists(outputpath):
os.makedirs(outputpath)
# froclist = p.map(getfrocvalue, predannofnamalist, outputdirlist)
froclist = []
for i in range(len(predannofnamalist)):
froclist.append(getfrocvalue(predannofnamalist[i], outputdirlist[i]))
np.save(outputdir+'froclist.npy', froclist)
if __name__ == '__main__':
p = Pool(nprocess)
getcsv(detp)
# getfroc(detp)
p.close()
print('finished!')
| [
"os.path.exists",
"os.listdir",
"os.makedirs",
"csv.writer",
"numpy.exp",
"numpy.array",
"layers.nms",
"multiprocessing.Pool",
"functools.partial",
"numpy.expand_dims",
"numpy.load",
"noduleCADEvaluationLUNA16.noduleCADEvaluation",
"numpy.save"
] | [((1211, 1230), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (1219, 1230), True, 'import numpy as np\n'), ((1249, 1318), 'numpy.load', 'np.load', (["(sideinfopath + bboxfname[:-8] + '_origin.npy')"], {'mmap_mode': '"""r"""'}), "(sideinfopath + bboxfname[:-8] + '_origin.npy', mmap_mode='r')\n", (1256, 1318), True, 'import numpy as np\n'), ((1329, 1399), 'numpy.load', 'np.load', (["(sideinfopath + bboxfname[:-8] + '_spacing.npy')"], {'mmap_mode': '"""r"""'}), "(sideinfopath + bboxfname[:-8] + '_spacing.npy', mmap_mode='r')\n", (1336, 1399), True, 'import numpy as np\n'), ((1412, 1484), 'numpy.load', 'np.load', (["(sideinfopath + bboxfname[:-8] + '_extendbox.npy')"], {'mmap_mode': '"""r"""'}), "(sideinfopath + bboxfname[:-8] + '_extendbox.npy', mmap_mode='r')\n", (1419, 1484), True, 'import numpy as np\n'), ((1496, 1540), 'numpy.load', 'np.load', (['(bboxpath + bboxfname)'], {'mmap_mode': '"""r"""'}), "(bboxpath + bboxfname, mmap_mode='r')\n", (1503, 1540), True, 'import numpy as np\n'), ((1615, 1635), 'numpy.array', 'np.array', (['pbb[check]'], {}), '(pbb[check])\n', (1623, 1635), True, 'import numpy as np\n'), ((1694, 1729), 'numpy.array', 'np.array', (['pbbold[pbbold[:, -1] > 3]'], {}), '(pbbold[pbbold[:, -1] > 3])\n', (1702, 1729), True, 'import numpy as np\n'), ((1760, 1782), 'layers.nms', 'nms', (['pbbold', 'nmsthresh'], {}), '(pbbold, nmsthresh)\n', (1763, 1782), False, 'from layers import nms\n'), ((1793, 1814), 'numpy.array', 'np.array', (['pbb[:, :-1]'], {}), '(pbb[:, :-1])\n', (1801, 1814), True, 'import numpy as np\n'), ((2350, 2476), 'noduleCADEvaluationLUNA16.noduleCADEvaluation', 'noduleCADEvaluation', (['annotations_filename', 'annotations_excluded_filename', 'seriesuids_filename', 'results_filename', 'outputdir'], {}), '(annotations_filename, annotations_excluded_filename,\n seriesuids_filename, results_filename, outputdir)\n', (2369, 2476), False, 'from noduleCADEvaluationLUNA16 import noduleCADEvaluation\n'), ((4056, 4101), 'numpy.save', 'np.save', (["(outputdir + 'froclist.npy')", 'froclist'], {}), "(outputdir + 'froclist.npy', froclist)\n", (4063, 4101), True, 'import numpy as np\n'), ((4156, 4170), 'multiprocessing.Pool', 'Pool', (['nprocess'], {}), '(nprocess)\n', (4160, 4170), False, 'from multiprocessing import Pool\n'), ((2499, 2523), 'os.path.exists', 'os.path.exists', (['frocpath'], {}), '(frocpath)\n', (2513, 2523), False, 'import os\n'), ((2533, 2554), 'os.makedirs', 'os.makedirs', (['frocpath'], {}), '(frocpath)\n', (2544, 2554), False, 'import os\n'), ((2716, 2729), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (2726, 2729), False, 'import csv\n'), ((2810, 2830), 'os.listdir', 'os.listdir', (['bboxpath'], {}), '(bboxpath)\n', (2820, 2830), False, 'import os\n'), ((1137, 1147), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1143, 1147), True, 'import numpy as np\n'), ((3128, 3193), 'functools.partial', 'functools.partial', (['convertcsv'], {'bboxpath': 'bboxpath', 'detp': 'detpthresh'}), '(convertcsv, bboxpath=bboxpath, detp=detpthresh)\n', (3145, 3193), False, 'import functools\n'), ((3764, 3790), 'os.path.exists', 'os.path.exists', (['outputpath'], {}), '(outputpath)\n', (3778, 3790), False, 'import os\n'), ((3804, 3827), 'os.makedirs', 'os.makedirs', (['outputpath'], {}), '(outputpath)\n', (3815, 3827), False, 'import os\n'), ((1854, 1888), 'numpy.expand_dims', 'np.expand_dims', (['extendbox[:, 0]', '(1)'], {}), '(extendbox[:, 0], 1)\n', (1868, 1888), True, 'import numpy as np\n'), ((1964, 1990), 'numpy.expand_dims', 'np.expand_dims', (['spacing', '(1)'], {}), '(spacing, 1)\n', (1978, 1990), True, 'import numpy as np\n'), ((1930, 1959), 'numpy.expand_dims', 'np.expand_dims', (['resolution', '(1)'], {}), '(resolution, 1)\n', (1944, 1959), True, 'import numpy as np\n'), ((2241, 2260), 'numpy.exp', 'np.exp', (['(-pbb[nk, 0])'], {}), '(-pbb[nk, 0])\n', (2247, 2260), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy, matplotlib.pyplot as plt, time
from sklearn.metrics import mean_squared_error, accuracy_score, roc_auc_score
class ExternalRNN(object):
"""Class that implements a External Recurent Neural Network"""
def __init__(self, hidden_layer_size=3, learning_rate=0.2, max_epochs=1000, delays=2):
self.hidden_layer_size = hidden_layer_size
self.learning_rate = learning_rate
self.max_epochs = max_epochs
self.delays = delays
self.auc = 0.5
def fit(self, X, y):
"""Trains the network and returns the trained network"""
self.input_layer_size = X.shape[1]
self.output_layer_size = y.shape[1]
remaining_epochs = self.max_epochs
# Initialize weights
self.W1 = numpy.random.rand(1 + self.input_layer_size, self.hidden_layer_size)
self.W2 = numpy.random.rand(1 + self.hidden_layer_size, self.output_layer_size)
self.W3 = numpy.random.rand(self.output_layer_size * self.delays, self.hidden_layer_size)
self.Ydelayed = numpy.zeros((1, self.output_layer_size * self.delays))
epsilon = 0.001
error = 1
self.J = [] # error
# Repeats until error is small enough or max epochs is reached
while error > epsilon and remaining_epochs > 0:
total_error = numpy.array([])
# For each input instance
for self.X, self.y in zip(X, y):
self.X = numpy.array([self.X])
self.y = numpy.array([self.y])
error, gradients = self.single_step(self.X, self.y)
total_error = numpy.append(total_error, error)
dJdW1 = gradients[0]
dJdW2 = gradients[1]
dJdW3 = gradients[2]
# Calculates new weights
self.W1 = self.W1 - self.learning_rate * dJdW1
self.W2 = self.W2 - self.learning_rate * dJdW2
self.W3 = self.W3 - self.learning_rate * dJdW3
# Shift Ydelayed values through time
self.Ydelayed = numpy.roll(self.Ydelayed, 1, 1)
self.Ydelayed[:,::self.delays] = self.Y
# Saves error for plot
error = total_error.mean()
self.J.append(error)
#print 'Epoch: ' + str(remaining_epochs)
#print 'Error: ' + str(error)
remaining_epochs -= 1
# After training, we plot error in order to see how it behaves
#plt.plot(self.J[1:])
#plt.grid(1)
#plt.ylabel('Cost')
#plt.xlabel('Iterations')
#plt.show()
return self
def predict(self, X):
"""Predicts test values"""
Y = map(lambda x: self.forward(numpy.array([x]))[0], X)
Y = map(lambda y: 1 if y > self.auc else 0, Y)
return numpy.array(Y)
def score(self, X, y_true):
"""Calculates accuracy"""
y_pred = map(lambda x: self.forward(numpy.array([x]))[0], X)
auc = roc_auc_score(y_true, y_pred)
y_pred = map(lambda y: 1 if y > self.auc else 0, y_pred)
y_pred = numpy.array(y_pred)
return accuracy_score(y_true.flatten(), y_pred.flatten())
def single_step(self, X, y):
"""Runs single step training method"""
self.Y = self.forward(X)
cost = self.cost(self.Y, y)
gradients = self.backpropagate(X, y)
return cost, gradients
def forward(self, X):
"""Passes input values through network and return output values"""
self.Zin = numpy.dot(X, self.W1[:-1,:])
self.Zin += numpy.dot(numpy.ones((1, 1)), self.W1[-1:,:])
self.Zin += numpy.dot(self.Ydelayed, self.W3)
self.Z = self.sigmoid(self.Zin)
self.Z = numpy.nan_to_num(self.Z)
self.Yin = numpy.dot(self.Z, self.W2[:-1,])
self.Yin += numpy.dot(numpy.ones((1, 1)), self.W2[-1:,:])
Y = self.linear(self.Yin)
Y = numpy.nan_to_num(Y)
return Y
def cost(self, Y, y):
"""Calculates network output error"""
return mean_squared_error(Y, y)
def backpropagate(self, X, y):
"""Backpropagates costs through the network"""
delta3 = numpy.multiply(-(y-self.Y), self.linear_derivative(self.Yin))
dJdW2 = numpy.dot(self.Z.T, delta3)
dJdW2 = numpy.append(dJdW2, numpy.dot(numpy.ones((1, 1)), delta3), axis=0)
delta2 = numpy.dot(delta3, self.W2[:-1,:].T)*self.sigmoid_derivative(self.Zin)
dJdW1 = numpy.dot(X.T, delta2)
dJdW1 = numpy.append(dJdW1, numpy.dot(numpy.ones((1, 1)), delta2), axis=0)
dJdW3 = numpy.dot(numpy.repeat(self.Ydelayed, self.output_layer_size * self.delays, 0), \
numpy.repeat(delta2, self.output_layer_size * self.delays, 0))
return dJdW1, dJdW2, dJdW3
def sigmoid(self, z):
"""Apply sigmoid activation function"""
return 1/(1+numpy.exp(-z))
def sigmoid_derivative(self, z):
"""Derivative of sigmoid function"""
return numpy.exp(-z)/((1+numpy.exp(-z))**2)
def linear(self, z):
"""Apply linear activation function"""
return z
def linear_derivative(self, z):
"""Derivarive linear function"""
return 1
| [
"numpy.repeat",
"numpy.random.rand",
"numpy.ones",
"numpy.roll",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.mean_squared_error",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.exp",
"numpy.append",
"numpy.nan_to_num"
] | [((786, 854), 'numpy.random.rand', 'numpy.random.rand', (['(1 + self.input_layer_size)', 'self.hidden_layer_size'], {}), '(1 + self.input_layer_size, self.hidden_layer_size)\n', (803, 854), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((873, 942), 'numpy.random.rand', 'numpy.random.rand', (['(1 + self.hidden_layer_size)', 'self.output_layer_size'], {}), '(1 + self.hidden_layer_size, self.output_layer_size)\n', (890, 942), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((961, 1040), 'numpy.random.rand', 'numpy.random.rand', (['(self.output_layer_size * self.delays)', 'self.hidden_layer_size'], {}), '(self.output_layer_size * self.delays, self.hidden_layer_size)\n', (978, 1040), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((1065, 1119), 'numpy.zeros', 'numpy.zeros', (['(1, self.output_layer_size * self.delays)'], {}), '((1, self.output_layer_size * self.delays))\n', (1076, 1119), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((2847, 2861), 'numpy.array', 'numpy.array', (['Y'], {}), '(Y)\n', (2858, 2861), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((3012, 3041), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (3025, 3041), False, 'from sklearn.metrics import mean_squared_error, accuracy_score, roc_auc_score\n'), ((3124, 3143), 'numpy.array', 'numpy.array', (['y_pred'], {}), '(y_pred)\n', (3135, 3143), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((3558, 3587), 'numpy.dot', 'numpy.dot', (['X', 'self.W1[:-1, :]'], {}), '(X, self.W1[:-1, :])\n', (3567, 3587), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((3673, 3706), 'numpy.dot', 'numpy.dot', (['self.Ydelayed', 'self.W3'], {}), '(self.Ydelayed, self.W3)\n', (3682, 3706), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((3764, 3788), 'numpy.nan_to_num', 'numpy.nan_to_num', (['self.Z'], {}), '(self.Z)\n', (3780, 3788), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((3809, 3841), 'numpy.dot', 'numpy.dot', (['self.Z', 'self.W2[:-1,]'], {}), '(self.Z, self.W2[:-1,])\n', (3818, 3841), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((3954, 3973), 'numpy.nan_to_num', 'numpy.nan_to_num', (['Y'], {}), '(Y)\n', (3970, 3973), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((4079, 4103), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['Y', 'y'], {}), '(Y, y)\n', (4097, 4103), False, 'from sklearn.metrics import mean_squared_error, accuracy_score, roc_auc_score\n'), ((4290, 4317), 'numpy.dot', 'numpy.dot', (['self.Z.T', 'delta3'], {}), '(self.Z.T, delta3)\n', (4299, 4317), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((4505, 4527), 'numpy.dot', 'numpy.dot', (['X.T', 'delta2'], {}), '(X.T, delta2)\n', (4514, 4527), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((1345, 1360), 'numpy.array', 'numpy.array', (['[]'], {}), '([])\n', (1356, 1360), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((3617, 3635), 'numpy.ones', 'numpy.ones', (['(1, 1)'], {}), '((1, 1))\n', (3627, 3635), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((3872, 3890), 'numpy.ones', 'numpy.ones', (['(1, 1)'], {}), '((1, 1))\n', (3882, 3890), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((4419, 4455), 'numpy.dot', 'numpy.dot', (['delta3', 'self.W2[:-1, :].T'], {}), '(delta3, self.W2[:-1, :].T)\n', (4428, 4455), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((4638, 4706), 'numpy.repeat', 'numpy.repeat', (['self.Ydelayed', '(self.output_layer_size * self.delays)', '(0)'], {}), '(self.Ydelayed, self.output_layer_size * self.delays, 0)\n', (4650, 4706), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((4736, 4797), 'numpy.repeat', 'numpy.repeat', (['delta2', '(self.output_layer_size * self.delays)', '(0)'], {}), '(delta2, self.output_layer_size * self.delays, 0)\n', (4748, 4797), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((5043, 5056), 'numpy.exp', 'numpy.exp', (['(-z)'], {}), '(-z)\n', (5052, 5056), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((1470, 1491), 'numpy.array', 'numpy.array', (['[self.X]'], {}), '([self.X])\n', (1481, 1491), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((1517, 1538), 'numpy.array', 'numpy.array', (['[self.y]'], {}), '([self.y])\n', (1528, 1538), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((1637, 1669), 'numpy.append', 'numpy.append', (['total_error', 'error'], {}), '(total_error, error)\n', (1649, 1669), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((2098, 2129), 'numpy.roll', 'numpy.roll', (['self.Ydelayed', '(1)', '(1)'], {}), '(self.Ydelayed, 1, 1)\n', (2108, 2129), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((4364, 4382), 'numpy.ones', 'numpy.ones', (['(1, 1)'], {}), '((1, 1))\n', (4374, 4382), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((4574, 4592), 'numpy.ones', 'numpy.ones', (['(1, 1)'], {}), '((1, 1))\n', (4584, 4592), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((4930, 4943), 'numpy.exp', 'numpy.exp', (['(-z)'], {}), '(-z)\n', (4939, 4943), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((5061, 5074), 'numpy.exp', 'numpy.exp', (['(-z)'], {}), '(-z)\n', (5070, 5074), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((2752, 2768), 'numpy.array', 'numpy.array', (['[x]'], {}), '([x])\n', (2763, 2768), False, 'import numpy, matplotlib.pyplot as plt, time\n'), ((2973, 2989), 'numpy.array', 'numpy.array', (['[x]'], {}), '([x])\n', (2984, 2989), False, 'import numpy, matplotlib.pyplot as plt, time\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import abc
import copy
import math
import numpy as np
import paddle
from . import utils
__all__ = [
'BaseQuantizer',
'AbsmaxQuantizer',
'PerChannelAbsmaxQuantizer',
'KLQuantizer',
'HistQuantizer',
]
def abs_max_value(tensor):
return float(paddle.max(paddle.abs(tensor)).numpy())
def merge_max_value(old, new):
"""
Merge the max element one by one in two lists.
"""
assert isinstance(old, list) and isinstance(new, list)
if old != []:
assert len(old) == len(new)
for i in range(len(old)):
assert type(old[i]) == type(new[i])
if isinstance(old[i], list):
new[i] = merge_max_value(old[i], new[i])
else:
new[i] = old[i] if new[i] < old[i] else new[i]
return new
def combine_abs_max_and_hist(tensor, origin_max, origin_hist, bins,
upsample_bins):
"""
"""
new_max = abs_max_value(tensor)
if new_max == 0.0:
return origin_max, origin_hist
elif origin_max == 0.0:
new_hist, _ = np.histogram(
paddle.abs(tensor).numpy(), range=(0, new_max), bins=bins)
new_hist = new_hist.astype(np.float32)
return new_max, new_hist
elif new_max <= origin_max:
new_hist, _ = np.histogram(
paddle.abs(tensor).numpy(), range=(0, origin_max), bins=bins)
new_hist = new_hist.astype(np.float32)
new_hist += origin_hist
return origin_max, new_hist
else:
# bin_width = origin_max / (bins * upsample_bins)
# = new_max / (bins * downsample_bins)
bin_width = origin_max / (bins * upsample_bins)
downsampe_bins = int(math.ceil(new_max / (bins * bin_width)))
new_max = bins * bin_width * downsampe_bins
upsampled_hist = np.repeat(origin_hist, upsample_bins)
expanded_hist = np.zeros((bins * downsampe_bins), dtype=np.float32)
expanded_hist[0:bins * upsample_bins] = upsampled_hist
cumsumed_hist = np.cumsum(
expanded_hist, dtype=np.float64)[downsampe_bins - 1::downsampe_bins]
shift_cumsumed_hist = np.zeros((bins), dtype=np.float64)
shift_cumsumed_hist[1:] = cumsumed_hist[0:-1]
sampled_hist = (cumsumed_hist - shift_cumsumed_hist) / upsample_bins
sampled_hist = sampled_hist.astype(np.float32)
new_hist, _ = np.histogram(
paddle.abs(tensor).numpy(), range=(0, new_max), bins=bins)
new_hist = new_hist.astype(np.float32)
new_hist += sampled_hist
return new_max, new_hist
@six.add_metaclass(abc.ABCMeta)
class BaseQuantizer(object):
"""
Base quantizer for activation and weight.
"""
def __init__(self, quant_bits=8):
super(BaseQuantizer, self).__init__()
assert isinstance(quant_bits, int)
assert quant_bits > 0 and quant_bits <= 16
self.quant_bits = quant_bits
self.thresholds = []
@abc.abstractmethod
def sample_data(self, layer, tensors):
pass
@abc.abstractmethod
def cal_thresholds(self):
pass
class AbsmaxQuantizer(BaseQuantizer):
"""
Per-tensor abs max quantizer.
"""
def __init__(self, quant_bits=8):
super(AbsmaxQuantizer, self).__init__(quant_bits)
def sample_data(self, layer, tensors):
assert isinstance(tensors, tuple)
abs_max_vals = [abs_max_value(t) for t in tensors]
self.thresholds = merge_max_value(self.thresholds, abs_max_vals)
def cal_thresholds(self):
pass
class PerChannelAbsmaxQuantizer(BaseQuantizer):
"""
Per channel abs max quantizer.
"""
def __init__(self, quant_bits=8):
super(PerChannelAbsmaxQuantizer, self).__init__(quant_bits)
def sample_data(self, layer, tensors):
assert isinstance(layer, paddle.nn.Layer)
assert isinstance(tensors, tuple)
abs_max_vals_list = []
for idx, tensor in enumerate(tensors):
if isinstance(layer, tuple(utils.spec_channel_axis_layers)):
abs_max_vals = [
abs_max_value(tensor[:, i]) for i in range(tensor.shape[1])
]
abs_max_vals_list.append(abs_max_vals)
else:
abs_max_vals = [
abs_max_value(tensor[i]) for i in range(tensor.shape[0])
]
abs_max_vals_list.append(abs_max_vals)
self.thresholds = merge_max_value(self.thresholds, abs_max_vals_list)
def cal_thresholds(self):
pass
@six.add_metaclass(abc.ABCMeta)
class BaseHistQuantizer(BaseQuantizer):
"""
"""
def __init__(self, quant_bits=8, bins=1024, upsample_bins=64):
super(BaseHistQuantizer, self).__init__(quant_bits)
self.bins = bins
self.upsample_bins = upsample_bins
self.abs_max_vals = []
self.hists = []
def sample_data(self, layer, tensors):
assert isinstance(tensors, tuple)
if self.abs_max_vals == []:
abs_max_vals = [abs_max_value(t) for t in tensors]
self.abs_max_vals = abs_max_vals
for idx, tensor in enumerate(tensors):
if abs_max_vals[idx] == 0.0:
self.hists.append(None)
else:
hist, _ = np.histogram(
paddle.abs(tensor).numpy(),
range=(0., abs_max_vals[idx]),
bins=self.bins)
hist = hist.astype(np.float32)
self.hists.append(hist)
else:
assert len(self.abs_max_vals) == len(tensors)
assert len(self.hists) == len(tensors)
for idx, tensor in enumerate(tensors):
new_abs_max, new_hist = combine_abs_max_and_hist(
tensor, self.abs_max_vals[idx], self.hists[idx], self.bins,
self.upsample_bins)
self.abs_max_vals[idx] = new_abs_max
self.hists[idx] = new_hist
@abc.abstractmethod
def cal_thresholds(self):
pass
class HistQuantizer(BaseHistQuantizer):
"""
"""
def __init__(self,
quant_bits=8,
bins=1024,
upsample_bins=64,
hist_percent=0.99999):
super(HistQuantizer, self).__init__(quant_bits, bins, upsample_bins)
self.hist_percent = hist_percent
def cal_thresholds(self):
def _helper(abs_max, hist, percent):
assert hist.ndim == 1 and percent < 1.0
hist = hist / np.sum(hist, dtype=np.float64)
cumsumed_hist = np.cumsum(hist)
index = np.argwhere(cumsumed_hist >= percent)[0]
return float((index - 0.5) * (abs_max / hist.shape[0]))
for idx in range(len(self.hists)):
if self.hists[idx] is None:
self.thresholds.append(self.abs_max_vals[idx])
else:
threshold = _helper(self.abs_max_vals[idx], self.hists[idx],
self.hist_percent)
self.thresholds.append(threshold)
class KLQuantizer(BaseHistQuantizer):
"""
"""
def __init__(self, quant_bits=8, bins=1024, upsample_bins=64):
super(KLQuantizer, self).__init__(quant_bits, bins, upsample_bins)
def cal_thresholds(self):
for idx in range(len(self.hists)):
if self.hists[idx] is None:
self.thresholds.append(self.abs_max_vals[idx])
else:
threshold = utils.cal_kl_scaling_factor(
self.hists[idx], self.abs_max_vals[idx], self.quant_bits)
self.thresholds.append(threshold)
| [
"numpy.repeat",
"math.ceil",
"six.add_metaclass",
"numpy.sum",
"numpy.zeros",
"numpy.argwhere",
"paddle.abs",
"numpy.cumsum"
] | [((3223, 3253), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (3240, 3253), False, 'import six\n'), ((5200, 5230), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (5217, 5230), False, 'import six\n'), ((7291, 7306), 'numpy.cumsum', 'np.cumsum', (['hist'], {}), '(hist)\n', (7300, 7306), True, 'import numpy as np\n'), ((2454, 2491), 'numpy.repeat', 'np.repeat', (['origin_hist', 'upsample_bins'], {}), '(origin_hist, upsample_bins)\n', (2463, 2491), True, 'import numpy as np\n'), ((2516, 2565), 'numpy.zeros', 'np.zeros', (['(bins * downsampe_bins)'], {'dtype': 'np.float32'}), '(bins * downsampe_bins, dtype=np.float32)\n', (2524, 2565), True, 'import numpy as np\n'), ((2777, 2809), 'numpy.zeros', 'np.zeros', (['bins'], {'dtype': 'np.float64'}), '(bins, dtype=np.float64)\n', (2785, 2809), True, 'import numpy as np\n'), ((7232, 7262), 'numpy.sum', 'np.sum', (['hist'], {'dtype': 'np.float64'}), '(hist, dtype=np.float64)\n', (7238, 7262), True, 'import numpy as np\n'), ((7327, 7364), 'numpy.argwhere', 'np.argwhere', (['(cumsumed_hist >= percent)'], {}), '(cumsumed_hist >= percent)\n', (7338, 7364), True, 'import numpy as np\n'), ((903, 921), 'paddle.abs', 'paddle.abs', (['tensor'], {}), '(tensor)\n', (913, 921), False, 'import paddle\n'), ((2335, 2374), 'math.ceil', 'math.ceil', (['(new_max / (bins * bin_width))'], {}), '(new_max / (bins * bin_width))\n', (2344, 2374), False, 'import math\n'), ((2655, 2697), 'numpy.cumsum', 'np.cumsum', (['expanded_hist'], {'dtype': 'np.float64'}), '(expanded_hist, dtype=np.float64)\n', (2664, 2697), True, 'import numpy as np\n'), ((1728, 1746), 'paddle.abs', 'paddle.abs', (['tensor'], {}), '(tensor)\n', (1738, 1746), False, 'import paddle\n'), ((1947, 1965), 'paddle.abs', 'paddle.abs', (['tensor'], {}), '(tensor)\n', (1957, 1965), False, 'import paddle\n'), ((3047, 3065), 'paddle.abs', 'paddle.abs', (['tensor'], {}), '(tensor)\n', (3057, 3065), False, 'import paddle\n'), ((6001, 6019), 'paddle.abs', 'paddle.abs', (['tensor'], {}), '(tensor)\n', (6011, 6019), False, 'import paddle\n')] |
# Copyright 2020 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from sklearn.metrics import mean_squared_error
from typing import Union
from deeppavlov.core.common.metrics_registry import register_metric
@register_metric('mean_squared_error')
def mse(y_true: Union[np.array, list],
y_predicted: Union[np.array, list],
*args, **kwargs):
"""
Calculates mean squared error.
Args:
y_true: list of true probs
y_predicted: list of predicted peobs
Returns:
Mean squared error
"""
for value in [y_true, y_predicted]:
assert (np.isfinite(value).all())
return mean_squared_error(y_true, y_predicted, *args, **kwargs)
| [
"numpy.isfinite",
"deeppavlov.core.common.metrics_registry.register_metric",
"sklearn.metrics.mean_squared_error"
] | [((771, 808), 'deeppavlov.core.common.metrics_registry.register_metric', 'register_metric', (['"""mean_squared_error"""'], {}), "('mean_squared_error')\n", (786, 808), False, 'from deeppavlov.core.common.metrics_registry import register_metric\n'), ((1192, 1248), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_true', 'y_predicted', '*args'], {}), '(y_true, y_predicted, *args, **kwargs)\n', (1210, 1248), False, 'from sklearn.metrics import mean_squared_error\n'), ((1155, 1173), 'numpy.isfinite', 'np.isfinite', (['value'], {}), '(value)\n', (1166, 1173), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# _*_ coding: UTF-8 _*_
import json
import codecs
import argparse
import numpy as np
def compute_edit_distance(hypothesis: list, reference: list):
insert, delete, substitute = 0, 0, 0
correct = 0
len_hyp, len_ref = len(hypothesis), len(reference)
if len_hyp == 0 or len_ref ==0:
return len_ref, len_hyp, len_ref, 0
cost_matrix = np.zeros((len_hyp + 1, len_ref + 1), dtype=np.int16)
# record all process,0-equal;1-insertion;2-deletion;3-substitution
ops_matrix = np.zeros((len_hyp + 1, len_ref + 1), dtype=np.int8)
for i in range(len_hyp + 1):
cost_matrix[i][0] = i
for j in range(len_ref + 1):
cost_matrix[0][j] = j
# create cost matrix and operation matrix, i: hyp, j: ref
for i in range(1, len_hyp + 1):
for j in range(1, len_ref + 1):
if hypothesis[i-1] == reference[j-1]:
cost_matrix[i][j] = cost_matrix[i-1][j-1]
else:
substitution = cost_matrix[i-1][j-1] + 1
insertion = cost_matrix[i-1][j] + 1
deletion = cost_matrix[i][j-1] + 1
compare_val = [substitution, insertion, deletion] # priority
min_val = min(compare_val)
operation_idx = compare_val.index(min_val) + 1
cost_matrix[i][j] = min_val
ops_matrix[i][j] = operation_idx
match_idx = [] # save all aligned element subscripts in hyp and ref
i = len_hyp
j = len_ref
while i >= 0 or j >= 0:
i_idx = max(0, i)
j_idx = max(0, j)
if ops_matrix[i_idx][j_idx] == 0: # correct
if i-1 >= 0 and j-1 >= 0:
match_idx.append((j-1, i-1))
correct += 1
i -= 1
j -= 1
elif ops_matrix[i_idx][j_idx] == 2: # insert
i -= 1
insert += 1
elif ops_matrix[i_idx][j_idx] == 3: # delete
j -= 1
delete += 1
elif ops_matrix[i_idx][j_idx] == 1: # substitute
i -= 1
j -= 1
substitute += 1
if i < 0 and j >= 0:
delete += 1
elif j < 0 and i >= 0:
insert += 1
match_idx.reverse()
error = cost_matrix[len_hyp][len_ref]
return len_ref, insert, delete, substitute
def mlf2dic(mlf_path):
results = {}
fi = codecs.open(mlf_path, "r")
lines = fi.readlines()
word_list = []
utt_name=''
for line_idx in range(len(lines)):
line = lines[line_idx].strip()
if '.rec' in line:
word_list = []
utt_name = line.replace('"', '').replace('.rec', '')
elif line == '.':
if lines[line_idx + 1].strip() == '.':
results[utt_name] = word_list
else:
word_list.append(line)
fi.close()
return results
def json2dic(jsonpath, dic=None):
"""
read dic from json or write dic to json
:param jsonpath: filepath of json
:param dic: content dic or None, None means read
:return: content dic for read while None for write
"""
if dic is None:
with codecs.open(jsonpath, 'r') as handle:
output = json.load(handle)
return output
else:
assert isinstance(dic, dict)
with codecs.open(jsonpath, 'w') as handle:
json.dump(dic, handle)
return None
| [
"json.load",
"numpy.zeros",
"codecs.open",
"json.dump"
] | [((391, 443), 'numpy.zeros', 'np.zeros', (['(len_hyp + 1, len_ref + 1)'], {'dtype': 'np.int16'}), '((len_hyp + 1, len_ref + 1), dtype=np.int16)\n', (399, 443), True, 'import numpy as np\n'), ((532, 583), 'numpy.zeros', 'np.zeros', (['(len_hyp + 1, len_ref + 1)'], {'dtype': 'np.int8'}), '((len_hyp + 1, len_ref + 1), dtype=np.int8)\n', (540, 583), True, 'import numpy as np\n'), ((2415, 2441), 'codecs.open', 'codecs.open', (['mlf_path', '"""r"""'], {}), "(mlf_path, 'r')\n", (2426, 2441), False, 'import codecs\n'), ((3182, 3208), 'codecs.open', 'codecs.open', (['jsonpath', '"""r"""'], {}), "(jsonpath, 'r')\n", (3193, 3208), False, 'import codecs\n'), ((3241, 3258), 'json.load', 'json.load', (['handle'], {}), '(handle)\n', (3250, 3258), False, 'import json\n'), ((3341, 3367), 'codecs.open', 'codecs.open', (['jsonpath', '"""w"""'], {}), "(jsonpath, 'w')\n", (3352, 3367), False, 'import codecs\n'), ((3391, 3413), 'json.dump', 'json.dump', (['dic', 'handle'], {}), '(dic, handle)\n', (3400, 3413), False, 'import json\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from helpers import load_problem
experiments = ['synth', 'iso']
cases = ['spacefilling', 'alt', 'refine']
#experiments = ['synth']
#experiments = ['iso']
#cases = ['spacefilling', 'refine']
run_idx = 0
for experiment in experiments:
# load experiment data
np.random.seed(run_idx)
problem, _, _, names, _, _ = load_problem(experiment)
lbme_true = problem.compute_lbme()
bme_true = np.exp(lbme_true)
# determine n_eval
filename = 'output/{}_refine_run{}.npz'.format(experiment,run_idx)
content = np.load(filename)
n_eval = content['n_eval']
# kombinierter plot:
# 1) allocation punkte plot
# 2) BME estimates über iterationen (mit allocation punkten)
# 3) Kriterium über iterationen
# data files:
# 1) n_eval bme_0 bme_1 ... bme_true_0 bme_true_1 ...
lbmes = content['lbmes']
n_models = lbmes.shape[0]
bmes = np.exp(lbmes)
plotdata = np.zeros((n_eval.size, 1+2*n_models))
plotdata[:,0] = n_eval
plotdata[:,1:1+n_models] = bmes.T
plotdata[:,1+n_models:1+2*n_models] = bme_true
filename = 'plotdata/{}_single_bmes.data'.format(experiment)
np.savetxt(filename, plotdata, fmt='%.6e')
# 2) n_eval crit_0 crit_1 ...
criteria = content['criteria']
plotdata = np.zeros((n_eval.size-1, 1+n_models))
plotdata[:,0] = n_eval[1:]
plotdata[:,1:] = criteria.T
filename = 'plotdata/{}_single_criteria.data'.format(experiment)
np.savetxt(filename, plotdata, fmt='%.6e')
# for each model:
# 3) (allocation punkte plot, model 0) n_eval, "0", bme_0, crit_0
model_idx = content['model_idx']
for m in range(n_models):
this_index = (model_idx == m)
count = np.sum(this_index)
plotdata = np.zeros((count,3))
plotdata[:,0] = n_eval[1:][this_index]
plotdata[:,1] = m
plotdata[:,2] = bmes[m,1:][this_index]
print('Evaluation counts, {}, {}: {}'.format(experiment, names[m], count))
filename = 'plotdata/{}_single_allocation_{}.data'.format(experiment, names[m])
np.savetxt(filename, plotdata, fmt='%.6e')
# hier eventuell model order wieder einführen
# error plot:
# file 1: spacefilling vs even alternation vs full-sequential
n_repeats = 51
e_raw = np.zeros((n_eval.size,n_repeats,len(cases)))
for i in range(n_repeats):
for j, case in enumerate(cases):
filename = 'output/{}_{}_run{}.npz'.format(experiment, case, i)
content = np.load(filename)
e_raw[:,i,j] = content['errors']
plotdata = np.zeros((n_eval.size, len(cases)+1))
plotdata[:,0] = n_eval
plotdata[:,1:1+len(cases)] = np.median(e_raw, axis = 1)
filename = 'plotdata/{}_errors.data'.format(experiment)
np.savetxt(filename, plotdata, fmt='%.6e')
# file 2: MC-error
filename = 'output/{}_mc.npz'.format(experiment)
content = np.load(filename)
n_sample_sizes = content['n_sample_sizes']
errors = np.median(content['MC_errors'], axis = 0)
plotdata = np.zeros((n_sample_sizes.size,2))
plotdata[:,0] = n_sample_sizes
plotdata[:,1] = errors
filename = 'plotdata/{}_mc.data'.format(experiment)
np.savetxt(filename, plotdata, fmt='%i %.6e')
mask = (n_sample_sizes < n_eval.size+2)
filename = 'plotdata/{}_mc_short.data'.format(experiment)
np.savetxt(filename, plotdata[mask,:], fmt='%i %.6e')
#%% Creating data for failed-example
content = np.load('output/failure_example.npz')
n_eval = content['n_eval']
criteria = content['criteria']
plotdata = np.zeros((n_eval.size-1, 3))
plotdata[:,0] = n_eval[1:]
plotdata[:,1:] = criteria.T
filename = 'plotdata/synth_failure.data'
np.savetxt(filename, plotdata, fmt='%.6e')
| [
"helpers.load_problem",
"numpy.median",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.random.seed",
"numpy.savetxt",
"numpy.load"
] | [((3557, 3594), 'numpy.load', 'np.load', (['"""output/failure_example.npz"""'], {}), "('output/failure_example.npz')\n", (3564, 3594), True, 'import numpy as np\n'), ((3666, 3696), 'numpy.zeros', 'np.zeros', (['(n_eval.size - 1, 3)'], {}), '((n_eval.size - 1, 3))\n', (3674, 3696), True, 'import numpy as np\n'), ((3792, 3834), 'numpy.savetxt', 'np.savetxt', (['filename', 'plotdata'], {'fmt': '"""%.6e"""'}), "(filename, plotdata, fmt='%.6e')\n", (3802, 3834), True, 'import numpy as np\n'), ((337, 360), 'numpy.random.seed', 'np.random.seed', (['run_idx'], {}), '(run_idx)\n', (351, 360), True, 'import numpy as np\n'), ((394, 418), 'helpers.load_problem', 'load_problem', (['experiment'], {}), '(experiment)\n', (406, 418), False, 'from helpers import load_problem\n'), ((473, 490), 'numpy.exp', 'np.exp', (['lbme_true'], {}), '(lbme_true)\n', (479, 490), True, 'import numpy as np\n'), ((600, 617), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (607, 617), True, 'import numpy as np\n'), ((934, 947), 'numpy.exp', 'np.exp', (['lbmes'], {}), '(lbmes)\n', (940, 947), True, 'import numpy as np\n'), ((968, 1009), 'numpy.zeros', 'np.zeros', (['(n_eval.size, 1 + 2 * n_models)'], {}), '((n_eval.size, 1 + 2 * n_models))\n', (976, 1009), True, 'import numpy as np\n'), ((1196, 1238), 'numpy.savetxt', 'np.savetxt', (['filename', 'plotdata'], {'fmt': '"""%.6e"""'}), "(filename, plotdata, fmt='%.6e')\n", (1206, 1238), True, 'import numpy as np\n'), ((1324, 1365), 'numpy.zeros', 'np.zeros', (['(n_eval.size - 1, 1 + n_models)'], {}), '((n_eval.size - 1, 1 + n_models))\n', (1332, 1365), True, 'import numpy as np\n'), ((1503, 1545), 'numpy.savetxt', 'np.savetxt', (['filename', 'plotdata'], {'fmt': '"""%.6e"""'}), "(filename, plotdata, fmt='%.6e')\n", (1513, 1545), True, 'import numpy as np\n'), ((2755, 2779), 'numpy.median', 'np.median', (['e_raw'], {'axis': '(1)'}), '(e_raw, axis=1)\n', (2764, 2779), True, 'import numpy as np\n'), ((2846, 2888), 'numpy.savetxt', 'np.savetxt', (['filename', 'plotdata'], {'fmt': '"""%.6e"""'}), "(filename, plotdata, fmt='%.6e')\n", (2856, 2888), True, 'import numpy as np\n'), ((2984, 3001), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (2991, 3001), True, 'import numpy as np\n'), ((3062, 3101), 'numpy.median', 'np.median', (["content['MC_errors']"], {'axis': '(0)'}), "(content['MC_errors'], axis=0)\n", (3071, 3101), True, 'import numpy as np\n'), ((3124, 3158), 'numpy.zeros', 'np.zeros', (['(n_sample_sizes.size, 2)'], {}), '((n_sample_sizes.size, 2))\n', (3132, 3158), True, 'import numpy as np\n'), ((3285, 3330), 'numpy.savetxt', 'np.savetxt', (['filename', 'plotdata'], {'fmt': '"""%i %.6e"""'}), "(filename, plotdata, fmt='%i %.6e')\n", (3295, 3330), True, 'import numpy as np\n'), ((3446, 3500), 'numpy.savetxt', 'np.savetxt', (['filename', 'plotdata[mask, :]'], {'fmt': '"""%i %.6e"""'}), "(filename, plotdata[mask, :], fmt='%i %.6e')\n", (3456, 3500), True, 'import numpy as np\n'), ((1773, 1791), 'numpy.sum', 'np.sum', (['this_index'], {}), '(this_index)\n', (1779, 1791), True, 'import numpy as np\n'), ((1811, 1831), 'numpy.zeros', 'np.zeros', (['(count, 3)'], {}), '((count, 3))\n', (1819, 1831), True, 'import numpy as np\n'), ((2148, 2190), 'numpy.savetxt', 'np.savetxt', (['filename', 'plotdata'], {'fmt': '"""%.6e"""'}), "(filename, plotdata, fmt='%.6e')\n", (2158, 2190), True, 'import numpy as np\n'), ((2579, 2596), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (2586, 2596), True, 'import numpy as np\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""post process for 310 inference"""
import os
import argparse
import numpy as np
from PIL import Image
from src.config import config
from src.eval_utils import metrics
batch_size = 1
parser = argparse.ArgumentParser(description="ssd acc calculation")
parser.add_argument("--result_path", type=str, required=True, help="result files path.")
parser.add_argument("--img_path", type=str, required=True, help="image file path.")
parser.add_argument("--anno_file", type=str, required=True, help="annotation file.")
parser.add_argument("--drop", action="store_true", help="drop iscrowd images or not.")
args = parser.parse_args()
def get_imgSize(file_name):
img = Image.open(file_name)
return img.size
def get_result(result_path, img_id_file_path):
"""print the mAP"""
if args.drop:
from pycocotools.coco import COCO
train_cls = config.classes
train_cls_dict = {}
for i, cls in enumerate(train_cls):
train_cls_dict[cls] = i
coco = COCO(args.anno_file)
classs_dict = {}
cat_ids = coco.loadCats(coco.getCatIds())
for cat in cat_ids:
classs_dict[cat["id"]] = cat["name"]
files = os.listdir(img_id_file_path)
pred_data = []
for file in files:
img_ids_name = file.split('.')[0]
img_id = int(np.squeeze(img_ids_name))
if args.drop:
anno_ids = coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = coco.loadAnns(anno_ids)
annos = []
iscrowd = False
for label in anno:
bbox = label["bbox"]
class_name = classs_dict[label["category_id"]]
iscrowd = iscrowd or label["iscrowd"]
if class_name in train_cls:
x_min, x_max = bbox[0], bbox[0] + bbox[2]
y_min, y_max = bbox[1], bbox[1] + bbox[3]
annos.append(list(map(round, [y_min, x_min, y_max, x_max])) + [train_cls_dict[class_name]])
if iscrowd or (not annos):
continue
img_size = get_imgSize(os.path.join(img_id_file_path, file))
image_shape = np.array([img_size[1], img_size[0]])
result_path_0 = os.path.join(result_path, img_ids_name + "_0.bin")
result_path_1 = os.path.join(result_path, img_ids_name + "_1.bin")
boxes = np.fromfile(result_path_0, dtype=np.float32).reshape(config.num_ssd_boxes, 4)
box_scores = np.fromfile(result_path_1, dtype=np.float32).reshape(config.num_ssd_boxes, config.num_classes)
pred_data.append({
"boxes": boxes,
"box_scores": box_scores,
"img_id": img_id,
"image_shape": image_shape
})
mAP = metrics(pred_data, args.anno_file)
print(f" mAP:{mAP}")
if __name__ == '__main__':
get_result(args.result_path, args.img_path)
| [
"numpy.fromfile",
"os.listdir",
"PIL.Image.open",
"argparse.ArgumentParser",
"src.eval_utils.metrics",
"pycocotools.coco.COCO",
"os.path.join",
"numpy.squeeze",
"numpy.array"
] | [((862, 920), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""ssd acc calculation"""'}), "(description='ssd acc calculation')\n", (885, 920), False, 'import argparse\n'), ((1332, 1353), 'PIL.Image.open', 'Image.open', (['file_name'], {}), '(file_name)\n', (1342, 1353), False, 'from PIL import Image\n'), ((1850, 1878), 'os.listdir', 'os.listdir', (['img_id_file_path'], {}), '(img_id_file_path)\n', (1860, 1878), False, 'import os\n'), ((3396, 3430), 'src.eval_utils.metrics', 'metrics', (['pred_data', 'args.anno_file'], {}), '(pred_data, args.anno_file)\n', (3403, 3430), False, 'from src.eval_utils import metrics\n'), ((1664, 1684), 'pycocotools.coco.COCO', 'COCO', (['args.anno_file'], {}), '(args.anno_file)\n', (1668, 1684), False, 'from pycocotools.coco import COCO\n'), ((2815, 2851), 'numpy.array', 'np.array', (['[img_size[1], img_size[0]]'], {}), '([img_size[1], img_size[0]])\n', (2823, 2851), True, 'import numpy as np\n'), ((2876, 2926), 'os.path.join', 'os.path.join', (['result_path', "(img_ids_name + '_0.bin')"], {}), "(result_path, img_ids_name + '_0.bin')\n", (2888, 2926), False, 'import os\n'), ((2951, 3001), 'os.path.join', 'os.path.join', (['result_path', "(img_ids_name + '_1.bin')"], {}), "(result_path, img_ids_name + '_1.bin')\n", (2963, 3001), False, 'import os\n'), ((1985, 2009), 'numpy.squeeze', 'np.squeeze', (['img_ids_name'], {}), '(img_ids_name)\n', (1995, 2009), True, 'import numpy as np\n'), ((2755, 2791), 'os.path.join', 'os.path.join', (['img_id_file_path', 'file'], {}), '(img_id_file_path, file)\n', (2767, 2791), False, 'import os\n'), ((3018, 3062), 'numpy.fromfile', 'np.fromfile', (['result_path_0'], {'dtype': 'np.float32'}), '(result_path_0, dtype=np.float32)\n', (3029, 3062), True, 'import numpy as np\n'), ((3117, 3161), 'numpy.fromfile', 'np.fromfile', (['result_path_1'], {'dtype': 'np.float32'}), '(result_path_1, dtype=np.float32)\n', (3128, 3161), True, 'import numpy as np\n')] |
from datetime import datetime,timedelta
import numpy as np
import json
from urllib.request import urlopen
from html.parser import HTMLParser
import os
# -----------------------------------------------
# General
# -----------------------------------------------
def get_dir(dirname,json_file='input/dirs.json'):
with open(json_file,'r') as f:
all_dirs = json.load(f)
return all_dirs[dirname]
def get_variable_name(model,variable,json_file='input/variables.json'):
with open(json_file,'r') as f:
all_models = json.load(f)
model = all_models[model]
return model[variable]
def get_ecmwf_variable_code(variable,json_file='input/ecmwf_codes.json'):
with open(json_file,'r') as f:
all_codes = json.load(f)
return all_codes[variable]
def get_variable_name_reverse(model,variable,json_file='input/variables.json'):
with open(json_file,'r') as f:
all_models = json.load(f)
model = all_models[model]
model_variable_names = list(model.values())
variable_names = list(model.keys())
i_variable = model_variable_names.index(variable)
return variable_names[i_variable]
def get_urls(model,json_file='input/urls.json'):
with open(json_file,'r') as f:
all_urls = json.load(f)
return all_urls[model]
def get_logins(model,json_file='input/logins.json'):
with open(json_file,'r') as f:
all_logins = json.load(f)
return all_logins[model]
def get_ncfiles_in_dir(input_dir):
ncfiles = []
for filename in os.listdir(input_dir):
if filename.endswith('.nc'):
ncfiles.append(filename)
return ncfiles
def get_ncfiles_in_time_range(input_dir,start_date,end_date,including_end=1,timeformat='%Y%m%d'):
all_ncfiles = get_ncfiles_in_dir(input_dir)
ndays = (end_date-start_date).days+including_end
ncfiles = []
for n in range(ndays):
date = start_date+timedelta(days=n)
for ncfile in all_ncfiles:
if ncfile.startswith(date.strftime(timeformat)):
ncfiles.append(ncfile)
return ncfiles
def get_start_and_end_indices(array,start_value,end_value):
l_indices = np.logical_and(array>=start_value,array<=end_value)
indices = np.where(l_indices)[0]
start_index = indices[0]
end_index = indices[-1]
return (start_index,end_index)
def get_closest_index(A,target):
# A must be sorted!
idx = A.searchsorted(target)
idx = np.clip(idx,1,len(A)-1)
left = A[idx-1]
right = A[idx]
idx -= target-left < right-target
return idx
def rename_ncfiles_in_dir(input_dir: str, filename_indices: list, filename_format: str, new_filename_format: str):
ncfiles = get_ncfiles_in_dir(input_dir)
for ncfile in ncfiles:
input_path = input_dir+ncfile
ncfile_date = datetime.strptime(ncfile[filename_indices[0]:filename_indices[1]],filename_format)
output_path = input_dir+ncfile_date.strftime(new_filename_format)+'.nc'
os.rename(input_path,output_path)
# -----------------------------------------------
# Timeseries
# -----------------------------------------------
def get_time_index(time_array,time):
'''Returns exact index of a requested time, raises
error if this does not exist.'''
t = np.where(time_array==time)[0]
if len(t) > 1:
raise ValueError('Multiple times found in time array that equal requested time.')
elif len(t) == 0:
raise ValueError('Requested time not found in time array.')
else:
return t[0]
def get_time_indices(timeseries,time):
i_times = []
for i,t in enumerate(timeseries):
if t.date() == time.date():
i_times.append(i)
if len(i_times) == 0:
raise ValueError(f'Time {time.strftime("%d-%m-%Y")} not found in timeseries.')
return i_times
def get_closest_time_index(time_array,time):
'''Returns exact index of a requested time if is exists,
otherwise returns the index of the closest time.'''
dt = abs(time_array-time)
i_closest = np.where(dt == dt.min())[0][0]
return i_closest
def get_l_time_range(time,start_time,end_time):
if type(start_time) is datetime.date:
start_time = datetime.datetime(start_time.year,start_time.month,start_time.day)
if type(end_time) is datetime.date:
end_time = datetime.datetime(end_time.year,end_time.month,end_time.day)
l_start = time >= start_time
l_end = time <= end_time
l_time = l_start & l_end
return l_time
def get_n_months(start_date,end_date):
n_months = end_date.month-start_date.month
n_years = end_date.year-start_date.year
if not n_years == 0:
n_months = n_months+12*n_years
return n_months
def add_month_to_timestamp(timestamp,n_month):
month = timestamp.month - 1 + n_month
year = timestamp.year + month // 12
month = month % 12 + 1
return datetime(year,month,timestamp.day)
def convert_time_to_datetime(time_org,time_units):
time = []
i_start_time = time_units.index('since')+len('since')+1
if 'T' in time_units: # YYYY-mm-ddTHH:MM format used by Parcels
i_end_time = i_start_time+len('YYYY-mm-ddTHH:MM')
base_time = datetime.strptime(time_units[i_start_time:i_end_time],'%Y-%m-%dT%H:%M')
else: # YYYY-mm-dd format used by multiple numerical models
i_end_time = i_start_time+len('YYYY-mm-dd')
base_time = datetime.strptime(time_units[i_start_time:i_end_time],'%Y-%m-%d')
if time_units.startswith('seconds'):
if time_org.shape == ():
time = base_time+timedelta(seconds=float(time_org))
return time
for t in time_org:
if not np.isnan(t):
time.append(base_time+timedelta(seconds=float(t)))
else:
time.append(np.nan)
return np.array(time)
elif time_units.startswith('hours'):
if time_org.shape == ():
time = base_time+timedelta(hours=float(time_org))
return time
for t in time_org:
if not np.isnan(t):
time.append(base_time+timedelta(hours=float(t)))
else:
time.append(np.nan)
return np.array(time)
elif time_units.startswith('days'):
if time_org.shape == ():
time = base_time+timedelta(days=float(time_org))
return time
for t in time_org:
if not np.isnan(t):
time.append(base_time+timedelta(days=float(t)))
else:
time.append(np.nan)
return np.array(time)
else:
raise ValueError('Unknown time units for time conversion to datetime.')
def convert_datetime_to_time(time_org,time_units='seconds',time_origin=datetime(1995,1,1)):
time = []
if time_units == 'seconds':
conversion = 1
elif time_units == 'hours':
conversion = 60*60
elif time_units == 'days':
conversion = 24*60*60
else:
raise ValueError('Unknown time units requested fro time conversion from datetime.')
for t in time_org:
time.append((t-time_origin).total_seconds()/conversion)
units = f'{time_units} since {time_origin.strftime("%Y-%m-%d %H:%M")}'
return np.array(time),units
# -----------------------------------------------
# Coordinates
# -----------------------------------------------
def convert_lon_360_to_180(lon):
lon180 = np.copy(lon)
lon180[lon180>180] = lon180[lon180>180]-360
i_lon = np.argsort(lon180)
lon180 = lon180[i_lon]
return lon180,i_lon
# -----------------------------------------------
# OpenDAP
# -----------------------------------------------
def get_ncfiles_from_opendap_catalog(catalog_url):
catalog_content = download_html(catalog_url)
parsed_html = parse_html(catalog_content)
ncfiles = []
for line in parsed_html:
if line.endswith('.nc'):
ncfiles.append(line)
return ncfiles
def download_html(url):
catalog_response = urlopen(url)
return catalog_response.read().decode('UTF-8')
def parse_html(html_text):
parsed_catalog = OpendapHtmlParser()
parsed_catalog.feed(html_text)
return parsed_catalog.data
class OpendapHtmlParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.data = []
def handle_data(self,data):
self.data.append(data) | [
"datetime.datetime",
"numpy.copy",
"html.parser.HTMLParser.__init__",
"os.listdir",
"numpy.logical_and",
"numpy.where",
"datetime.datetime.strptime",
"os.rename",
"datetime.datetime.datetime",
"numpy.argsort",
"numpy.array",
"numpy.isnan",
"json.load",
"datetime.timedelta",
"urllib.reque... | [((1518, 1539), 'os.listdir', 'os.listdir', (['input_dir'], {}), '(input_dir)\n', (1528, 1539), False, 'import os\n'), ((2193, 2249), 'numpy.logical_and', 'np.logical_and', (['(array >= start_value)', '(array <= end_value)'], {}), '(array >= start_value, array <= end_value)\n', (2207, 2249), True, 'import numpy as np\n'), ((4909, 4945), 'datetime.datetime', 'datetime', (['year', 'month', 'timestamp.day'], {}), '(year, month, timestamp.day)\n', (4917, 4945), False, 'from datetime import datetime, timedelta\n'), ((6757, 6777), 'datetime.datetime', 'datetime', (['(1995)', '(1)', '(1)'], {}), '(1995, 1, 1)\n', (6765, 6777), False, 'from datetime import datetime, timedelta\n'), ((7432, 7444), 'numpy.copy', 'np.copy', (['lon'], {}), '(lon)\n', (7439, 7444), True, 'import numpy as np\n'), ((7505, 7523), 'numpy.argsort', 'np.argsort', (['lon180'], {}), '(lon180)\n', (7515, 7523), True, 'import numpy as np\n'), ((8011, 8023), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (8018, 8023), False, 'from urllib.request import urlopen\n'), ((366, 378), 'json.load', 'json.load', (['f'], {}), '(f)\n', (375, 378), False, 'import json\n'), ((537, 549), 'json.load', 'json.load', (['f'], {}), '(f)\n', (546, 549), False, 'import json\n'), ((741, 753), 'json.load', 'json.load', (['f'], {}), '(f)\n', (750, 753), False, 'import json\n'), ((922, 934), 'json.load', 'json.load', (['f'], {}), '(f)\n', (931, 934), False, 'import json\n'), ((1253, 1265), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1262, 1265), False, 'import json\n'), ((1403, 1415), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1412, 1415), False, 'import json\n'), ((2259, 2278), 'numpy.where', 'np.where', (['l_indices'], {}), '(l_indices)\n', (2267, 2278), True, 'import numpy as np\n'), ((2838, 2925), 'datetime.datetime.strptime', 'datetime.strptime', (['ncfile[filename_indices[0]:filename_indices[1]]', 'filename_format'], {}), '(ncfile[filename_indices[0]:filename_indices[1]],\n filename_format)\n', (2855, 2925), False, 'from datetime import datetime, timedelta\n'), ((3009, 3043), 'os.rename', 'os.rename', (['input_path', 'output_path'], {}), '(input_path, output_path)\n', (3018, 3043), False, 'import os\n'), ((3302, 3330), 'numpy.where', 'np.where', (['(time_array == time)'], {}), '(time_array == time)\n', (3310, 3330), True, 'import numpy as np\n'), ((4227, 4295), 'datetime.datetime.datetime', 'datetime.datetime', (['start_time.year', 'start_time.month', 'start_time.day'], {}), '(start_time.year, start_time.month, start_time.day)\n', (4244, 4295), False, 'from datetime import datetime, timedelta\n'), ((4353, 4415), 'datetime.datetime.datetime', 'datetime.datetime', (['end_time.year', 'end_time.month', 'end_time.day'], {}), '(end_time.year, end_time.month, end_time.day)\n', (4370, 4415), False, 'from datetime import datetime, timedelta\n'), ((5216, 5288), 'datetime.datetime.strptime', 'datetime.strptime', (['time_units[i_start_time:i_end_time]', '"""%Y-%m-%dT%H:%M"""'], {}), "(time_units[i_start_time:i_end_time], '%Y-%m-%dT%H:%M')\n", (5233, 5288), False, 'from datetime import datetime, timedelta\n'), ((5424, 5490), 'datetime.datetime.strptime', 'datetime.strptime', (['time_units[i_start_time:i_end_time]', '"""%Y-%m-%d"""'], {}), "(time_units[i_start_time:i_end_time], '%Y-%m-%d')\n", (5441, 5490), False, 'from datetime import datetime, timedelta\n'), ((5847, 5861), 'numpy.array', 'np.array', (['time'], {}), '(time)\n', (5855, 5861), True, 'import numpy as np\n'), ((7250, 7264), 'numpy.array', 'np.array', (['time'], {}), '(time)\n', (7258, 7264), True, 'import numpy as np\n'), ((8280, 8305), 'html.parser.HTMLParser.__init__', 'HTMLParser.__init__', (['self'], {}), '(self)\n', (8299, 8305), False, 'from html.parser import HTMLParser\n'), ((1924, 1941), 'datetime.timedelta', 'timedelta', ([], {'days': 'n'}), '(days=n)\n', (1933, 1941), False, 'from datetime import datetime, timedelta\n'), ((6215, 6229), 'numpy.array', 'np.array', (['time'], {}), '(time)\n', (6223, 6229), True, 'import numpy as np\n'), ((5698, 5709), 'numpy.isnan', 'np.isnan', (['t'], {}), '(t)\n', (5706, 5709), True, 'import numpy as np\n'), ((6580, 6594), 'numpy.array', 'np.array', (['time'], {}), '(time)\n', (6588, 6594), True, 'import numpy as np\n'), ((6068, 6079), 'numpy.isnan', 'np.isnan', (['t'], {}), '(t)\n', (6076, 6079), True, 'import numpy as np\n'), ((6434, 6445), 'numpy.isnan', 'np.isnan', (['t'], {}), '(t)\n', (6442, 6445), True, 'import numpy as np\n')] |
import os
import argparse
import subprocess
import numpy as np
import pandas as pd
def training_model(filename, technique, pruning_rate, layer):
""" Training the pruned model """
# Opens the temporary file
f = open('../eval.txt', 'a+')
# Training with pre-trained weights
if technique.upper() != 'FROM-SCRATCH':
weights = filename + str(pruning_rate) + '.conv.' + str(layer)
command = './darknet detector train ' + filename + '.data ' + filename + '.cfg ' + weights + ' -dont_show -map'
# Training from scratch
else:
command = './darknet detector train ' + filename + '.data ' + filename + '.cfg -dont_show -map'
# Running training algorithm and saving results to temporary file
subprocess.call(command, shell = True, stdout = f)
# Closing file
f.close()
print('\n[DONE] Fine-tuning of the model pruned by ' + str(pruning_rate) + '%\n')
def pre_weights(filename, pruning_rate, layer):
""" Generates a pre-weight from a trained weight """
# Opens the temporary file
f = open('../eval.txt', 'a+')
# Running freezing algorithm and saving results to temporary file
weights = filename + str(pruning_rate) + '.conv.' + str(layer)
command = './darknet partial ' + filename + '.cfg ' + filename + '.weights ' + weights + ' ' + str(layer)
subprocess.call(command, shell = True, stdout = f)
# Closing file
f.close()
print('\n[DONE] Pre-weights of the model pruned by ' + str(pruning_rate) + '%\n')
def valid_set(filename, set):
""" Changes validation set in the .data file """
# Opens the file in read-only mode
f = open(filename + '.data', 'r')
# Reads lines until EOF
lines = f.readlines()
# Loop over the lines
for i, line in enumerate(lines):
if 'valid' in line:
lines[i] = 'valid = data/' + filename + '_' + set + '.txt\n'
# Opens the file in write-only mode
f = open(filename + '.data', 'w')
# Changing validation set in the data file
f.writelines(lines)
# Closing file
f.close()
def hyperparams(filename, img_size, iter, lr, steps):
""" Changes hyperparameters of the .cfg file """
# Opens the file in read-only mode
f = open(filename + '.cfg', 'r')
# Read lines until EOF
lines = f.readlines()
# Loop over the lines
for i, line in enumerate(lines):
if 'width' in line:
lines[i] = 'width = ' + str(img_size) + '\n'
elif 'height' in line:
lines[i] = 'height = ' + str(img_size) + '\n'
elif 'steps = ' in line:
lines[i] = 'steps = ' + steps + '\n'
elif 'learning_rate' in line:
lines[i] = 'learning_rate = ' + str(lr) + '\n'
elif 'max_batches' in line:
lines[i] = 'max_batches = ' + str(iter) + '\n'
elif '^batch =' in line:
lines[i] = 'batch = 64\n'
elif '^subdivisions' in line:
lines[i] = 'subdivisions = 16\n'
# Opens the file in write-only mode
f = open(filename + '.cfg', 'w')
# Changing image size in the config file
f.writelines(lines)
# Closing file
f.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--method', type = str, help = 'Pruning method')
parser.add_argument('--imgsize', type = int, default = 416, help = 'Image size')
parser.add_argument('--dataset', type = str, default = 'dfire', help = 'Dataset name')
parser.add_argument('--lr', type = float, default = 0.01, help = 'Learning rate')
parser.add_argument('--tuning-iter', type = int, default = 30000, help = 'Number of fine-tuning iterations')
parser.add_argument('--layer', type = int, default = 161, help = 'Weights frozen up to this layer for fine-tuning')
parser.add_argument('--steps', type = str, default = '24000,27000', help = 'At these iterations the learning rate will be multiplied by scales factor (0.1 by default)')
opt = parser.parse_args()
# Open root folder
root = 'Fine-Tuning/' + opt.method + os.sep
os.chdir(root)
# Pruned models with pruning rate from 10% to 90%
folders = np.arange(start = 10, stop = 100, step = 10)
# Partial weights
weights = np.arange(start = 1000, stop = opt.tuning_iter + 1000, step = 1000)
try:
# Remove current eval.txt file
os.remove('eval.txt')
except:
pass
for folder in folders:
# Open current folder
subdir = str(folder) + os.sep
os.chdir(subdir)
# Change hyperparameters
hyperparams(opt.dataset, opt.imgsize, opt.tuning_iter, opt.lr, opt.steps)
# Change validation set
valid_set(opt.dataset, 'valid')
# Pre-trained weights
if opt.method.upper() != 'FROM-SCRATCH':
# Freezing layers to generate pre-weights
pre_weights(opt.dataset, folder, opt.layer)
# Training pruned model
training_model(opt.dataset, opt.method, folder, opt.layer)
# Remove partial weights
os.chdir('weights/')
for w in weights:
try:
os.remove(opt.dataset + '_' + str(w) + '.weights')
except:
pass
os.remove(opt.dataset + '_last.weights')
os.remove(opt.dataset + '_final.weights')
# Returns to root folder
os.chdir('../../') | [
"argparse.ArgumentParser",
"os.chdir",
"subprocess.call",
"numpy.arange",
"os.remove"
] | [((744, 790), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)', 'stdout': 'f'}), '(command, shell=True, stdout=f)\n', (759, 790), False, 'import subprocess\n'), ((1341, 1387), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)', 'stdout': 'f'}), '(command, shell=True, stdout=f)\n', (1356, 1387), False, 'import subprocess\n'), ((3208, 3233), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3231, 3233), False, 'import argparse\n'), ((4082, 4096), 'os.chdir', 'os.chdir', (['root'], {}), '(root)\n', (4090, 4096), False, 'import os\n'), ((4166, 4204), 'numpy.arange', 'np.arange', ([], {'start': '(10)', 'stop': '(100)', 'step': '(10)'}), '(start=10, stop=100, step=10)\n', (4175, 4204), True, 'import numpy as np\n'), ((4247, 4308), 'numpy.arange', 'np.arange', ([], {'start': '(1000)', 'stop': '(opt.tuning_iter + 1000)', 'step': '(1000)'}), '(start=1000, stop=opt.tuning_iter + 1000, step=1000)\n', (4256, 4308), True, 'import numpy as np\n'), ((4372, 4393), 'os.remove', 'os.remove', (['"""eval.txt"""'], {}), "('eval.txt')\n", (4381, 4393), False, 'import os\n'), ((4524, 4540), 'os.chdir', 'os.chdir', (['subdir'], {}), '(subdir)\n', (4532, 4540), False, 'import os\n'), ((5060, 5080), 'os.chdir', 'os.chdir', (['"""weights/"""'], {}), "('weights/')\n", (5068, 5080), False, 'import os\n'), ((5240, 5280), 'os.remove', 'os.remove', (["(opt.dataset + '_last.weights')"], {}), "(opt.dataset + '_last.weights')\n", (5249, 5280), False, 'import os\n'), ((5289, 5330), 'os.remove', 'os.remove', (["(opt.dataset + '_final.weights')"], {}), "(opt.dataset + '_final.weights')\n", (5298, 5330), False, 'import os\n'), ((5373, 5391), 'os.chdir', 'os.chdir', (['"""../../"""'], {}), "('../../')\n", (5381, 5391), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 6 13:45:30 2019
@author: LOVESA
"""
#importing relevant packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import os
import cv2
import math
from os.path import isfile, join
import Helper as Functions
#Parameters definition
low_threshold = 50
high_threshold = 150
kernel_size = 7
# Define the Hough transform parameters
# Make a blank the same size as our image to draw on
rho = 2 # distance resolution in pixels of the Hough grid
theta = np.pi/180 # angular resolution in radians of the Hough grid
threshold = 160 # minimum number of votes (intersections in Hough grid cell)
min_line_len = 10 #minimum number of pixels making up a line
max_line_gap = 20 # maximum gap in pixels between connectable line segments
# image processing pipeline function
def pipeline(image):
plt.imshow(image) #plot original image
initial_img = np.copy(image)*0 # creating a blank to draw lines on
#from PipelineFunctions import grayscale, gaussian blur and canny functions on the whole image
gray = Functions.grayscale(image)
plt.imshow(gray, cmap='gray') #grayscale image
gaussian_blur = Functions.gaussian_blur(gray, kernel_size)
plt.imshow(gaussian_blur)
edges = Functions.canny(gaussian_blur, low_threshold, high_threshold)
plt.imshow(edges) #plots binary image
# dilate()ing edge image.
#This will make the lines thicker which will help fit the Hough lines better
dilated = cv2.dilate(edges, np.ones((3,3), dtype=np.uint8))
plt.imshow(dilated)
#Cropping the image to within the region of interest
#imshape = image.shape
#vertices = np.array([[(140,imshape[0]),(440, 325), (520, 325), (960,imshape[0])]], dtype=np.int32)
vertices = np.array([[(140,550),(440, 325), (550, 325), (920,550)]], dtype=np.int32)
#the co-ordinate origin in the image is top-left corner of image
# create a masked edges image
masked_image = Functions.region_of_interest(dilated, vertices)
plt.imshow(masked_image) #plots binary image with lines within boundary
#Shows the region
#x, y = vertices.T
#plt.plot(x, y, 'b--', lw=4)
#Detecting shape edges in the remaining (cropped) image data
#Applying the hough transform
hough_img = Functions.hough_lines(masked_image, rho, theta, threshold, min_line_len, max_line_gap)
#Testing output of hough_img
#print(hough_img.shape)
#print(hough_img)
#for i in range(hough_img.shape[0]):
# for j in range(hough_img.shape[1]):
# for k in range(3):
# if hough_img[i,j,k] > 1e-4:
# print(i,j,k,hough_img[i,j,k])
plt.imshow(hough_img) #Prints full binary image with red lines only
weighted_img = Functions.weighted_img(hough_img,image, α=0.8, β=1.,γ=0.)
plt.imshow(weighted_img) #Prints original image with lines highlighted red
return weighted_img | [
"matplotlib.pyplot.imshow",
"Helper.region_of_interest",
"Helper.canny",
"numpy.copy",
"Helper.hough_lines",
"numpy.ones",
"Helper.weighted_img",
"numpy.array",
"Helper.grayscale",
"Helper.gaussian_blur"
] | [((910, 927), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (920, 927), True, 'import matplotlib.pyplot as plt\n'), ((1136, 1162), 'Helper.grayscale', 'Functions.grayscale', (['image'], {}), '(image)\n', (1155, 1162), True, 'import Helper as Functions\n'), ((1168, 1197), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gray'], {'cmap': '"""gray"""'}), "(gray, cmap='gray')\n", (1178, 1197), True, 'import matplotlib.pyplot as plt\n'), ((1237, 1279), 'Helper.gaussian_blur', 'Functions.gaussian_blur', (['gray', 'kernel_size'], {}), '(gray, kernel_size)\n', (1260, 1279), True, 'import Helper as Functions\n'), ((1285, 1310), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gaussian_blur'], {}), '(gaussian_blur)\n', (1295, 1310), True, 'import matplotlib.pyplot as plt\n'), ((1324, 1385), 'Helper.canny', 'Functions.canny', (['gaussian_blur', 'low_threshold', 'high_threshold'], {}), '(gaussian_blur, low_threshold, high_threshold)\n', (1339, 1385), True, 'import Helper as Functions\n'), ((1391, 1408), 'matplotlib.pyplot.imshow', 'plt.imshow', (['edges'], {}), '(edges)\n', (1401, 1408), True, 'import matplotlib.pyplot as plt\n'), ((1618, 1637), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dilated'], {}), '(dilated)\n', (1628, 1637), True, 'import matplotlib.pyplot as plt\n'), ((1847, 1923), 'numpy.array', 'np.array', (['[[(140, 550), (440, 325), (550, 325), (920, 550)]]'], {'dtype': 'np.int32'}), '([[(140, 550), (440, 325), (550, 325), (920, 550)]], dtype=np.int32)\n', (1855, 1923), True, 'import numpy as np\n'), ((2052, 2099), 'Helper.region_of_interest', 'Functions.region_of_interest', (['dilated', 'vertices'], {}), '(dilated, vertices)\n', (2080, 2099), True, 'import Helper as Functions\n'), ((2105, 2129), 'matplotlib.pyplot.imshow', 'plt.imshow', (['masked_image'], {}), '(masked_image)\n', (2115, 2129), True, 'import matplotlib.pyplot as plt\n'), ((2389, 2479), 'Helper.hough_lines', 'Functions.hough_lines', (['masked_image', 'rho', 'theta', 'threshold', 'min_line_len', 'max_line_gap'], {}), '(masked_image, rho, theta, threshold, min_line_len,\n max_line_gap)\n', (2410, 2479), True, 'import Helper as Functions\n'), ((2794, 2815), 'matplotlib.pyplot.imshow', 'plt.imshow', (['hough_img'], {}), '(hough_img)\n', (2804, 2815), True, 'import matplotlib.pyplot as plt\n'), ((2888, 2949), 'Helper.weighted_img', 'Functions.weighted_img', (['hough_img', 'image'], {'α': '(0.8)', 'β': '(1.0)', 'γ': '(0.0)'}), '(hough_img, image, α=0.8, β=1.0, γ=0.0)\n', (2910, 2949), True, 'import Helper as Functions\n'), ((2951, 2975), 'matplotlib.pyplot.imshow', 'plt.imshow', (['weighted_img'], {}), '(weighted_img)\n', (2961, 2975), True, 'import matplotlib.pyplot as plt\n'), ((969, 983), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (976, 983), True, 'import numpy as np\n'), ((1581, 1612), 'numpy.ones', 'np.ones', (['(3, 3)'], {'dtype': 'np.uint8'}), '((3, 3), dtype=np.uint8)\n', (1588, 1612), True, 'import numpy as np\n')] |
import csv # csv libary
import cv2
from math import ceil
import numpy as np
import matplotlib.pyplot as plt
import sklearn
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from scipy import ndimage
# Global Parameters
epochs = 5
batch_size = 32
validation_split = 0.2
correction = 0.2
# Read in each row/line from driving_log.csv
lines = [] # samples
with open('training_data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
'''
Generators can be a great way to work with large amounts of data. Instead of storing the preprocessed data in memory all at once, using a generator you can pull pieces
of the data and process them on the fly only when you need them, which is much more memory-efficient.
A generator is like a coroutine, a process that can run separately from another main routine, which makes it a useful Python function. Instead of using return, the
generator uses yield, which still returns the desired output values but saves the current values of all the generator's variables. When the generator is called a second
time it re-starts right after the yield statement, with all its variables set to the same values as before.
'''
def generator(samples, batch_size):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
measurements = []
for batch_sample in batch_samples:
for i in range(3):
source_path = batch_sample[i]
filename = source_path.split('/')[-1]
current_path ='training_data/IMG/' + filename
image = ndimage.imread(current_path)
images.append(image)
# create adjusted steering measurements for the side camera images
steering_center = float(batch_sample[3])
steering_left = steering_center + correction
steering_right = steering_center - correction
# add angles to data set
measurements.extend([steering_center])
measurements.extend([steering_left])
measurements.extend([steering_right])
# Data augmentation
augmented_images, augmented_measurements = [], []
for image, measurement in zip(images, measurements):
augmented_images.append(image)
augmented_images.append(cv2.flip(image,1))
augmented_measurements.append(measurement)
augmented_measurements.append(measurement*-1.0)
# Keras requires arrays. Convert images and steering measurements to numpy arrays
X_train = np.array(augmented_images) # features from images
y_train = np.array(augmented_measurements) # ground truth measurments
# shuffle the data
yield shuffle(X_train, y_train)
# Utilize Generators
train_samples, validation_samples = train_test_split(lines, test_size=validation_split)
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
# Setup Keras
from keras.models import Sequential # The keras.models.Sequential class is a wrapper for the neural network model. It provides common functions like fit(), evaluate(), and compile().
from keras.models import Model # Create the Sequential model
from keras.layers import Lambda
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Conv2D, Cropping2D #
from keras.layers.pooling import MaxPooling2D
# Build the Neural Network Architecture
model = Sequential()
model.add(Lambda(lambda x: x /255.0 - 0.5, input_shape=(160,320,3))) # normalize the image by 255 the maximum for an image pixel between 0 and 1, then mean-center the image by subtracting -0.5 from each element, and give a image pixel of -0.5 to 0.5
model.add(Cropping2D(cropping=((75,25),(0,0)))) # cropping image 75 pixels from the top and 25 from the bottom, from "Even more powerful network video"
#NVIDIA END TO END NETWORK WITH MAX POOLING AND DROPOUTS ADDED AS DENOTED
model.add(Conv2D(24, (5,5), padding='valid', activation='relu')) # 24 filters 5x5 kernal
model.add(MaxPooling2D()) #ADDED
model.add(Dropout(0.5)) # ADDED dropout rate set to 0.5 for training/validation
model.add(Conv2D(36, (5,5), padding='valid', activation='relu'))
model.add(MaxPooling2D()) #ADDED
model.add(Conv2D(48, (5,5), padding='valid', activation='relu'))
model.add(MaxPooling2D()) #ADDED
model.add(Conv2D(64, (3,3), padding='valid', activation='relu'))
model.add(Conv2D(64, (1,1), padding='valid', activation='relu'))
model.add(MaxPooling2D()) #ADDED
model.add(Dropout(0.5)) # dropout rate set to 0.5 for training/validation
model.add(Flatten())
# Next, four fully connected layers
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1)) # single output node = predicted steering angle
model.summary() # print a model summary
# To compile the keras model use a "mean squared error" loss function, good for regression and "Adam" optimizer
model.compile(loss='mse', optimizer='adam')
history_object = model.fit_generator(train_generator, steps_per_epoch = ceil(len(train_samples)/batch_size), validation_data = validation_generator, validation_steps = ceil(len(validation_samples)/batch_size), epochs=epochs, verbose=1)
# save trained model architecture
model.save('model.h5')
### print the keys contained in the history object
print(history_object.history.keys())
### Keras outputs a history object that contains the training and validation loss for each epoch.
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
# save the file (see next line)
plt.savefig('examples/mean_squared_error_loss.png')
| [
"keras.layers.core.Flatten",
"matplotlib.pyplot.ylabel",
"scipy.ndimage.imread",
"numpy.array",
"keras.layers.pooling.MaxPooling2D",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"csv.reader",
"matplotlib.pyplot.savefig",
"keras.layers.convolutional.Cropping2D",
"sklearn.model_selection.... | [((3208, 3259), 'sklearn.model_selection.train_test_split', 'train_test_split', (['lines'], {'test_size': 'validation_split'}), '(lines, test_size=validation_split)\n', (3224, 3259), False, 'from sklearn.model_selection import train_test_split\n'), ((3916, 3928), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3926, 3928), False, 'from keras.models import Sequential\n'), ((5913, 5953), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['loss']"], {}), "(history_object.history['loss'])\n", (5921, 5953), True, 'import matplotlib.pyplot as plt\n'), ((5954, 5998), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['val_loss']"], {}), "(history_object.history['val_loss'])\n", (5962, 5998), True, 'import matplotlib.pyplot as plt\n'), ((5999, 6041), 'matplotlib.pyplot.title', 'plt.title', (['"""model mean squared error loss"""'], {}), "('model mean squared error loss')\n", (6008, 6041), True, 'import matplotlib.pyplot as plt\n'), ((6042, 6079), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mean squared error loss"""'], {}), "('mean squared error loss')\n", (6052, 6079), True, 'import matplotlib.pyplot as plt\n'), ((6080, 6099), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (6090, 6099), True, 'import matplotlib.pyplot as plt\n'), ((6100, 6165), 'matplotlib.pyplot.legend', 'plt.legend', (["['training set', 'validation set']"], {'loc': '"""upper right"""'}), "(['training set', 'validation set'], loc='upper right')\n", (6110, 6165), True, 'import matplotlib.pyplot as plt\n'), ((6199, 6250), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""examples/mean_squared_error_loss.png"""'], {}), "('examples/mean_squared_error_loss.png')\n", (6210, 6250), True, 'import matplotlib.pyplot as plt\n'), ((461, 480), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (471, 480), False, 'import csv\n'), ((3939, 3999), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (3945, 3999), False, 'from keras.layers import Lambda\n'), ((4190, 4229), 'keras.layers.convolutional.Cropping2D', 'Cropping2D', ([], {'cropping': '((75, 25), (0, 0))'}), '(cropping=((75, 25), (0, 0)))\n', (4200, 4229), False, 'from keras.layers.convolutional import Conv2D, Cropping2D\n'), ((4417, 4471), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(24)', '(5, 5)'], {'padding': '"""valid"""', 'activation': '"""relu"""'}), "(24, (5, 5), padding='valid', activation='relu')\n", (4423, 4471), False, 'from keras.layers.convolutional import Conv2D, Cropping2D\n'), ((4506, 4520), 'keras.layers.pooling.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (4518, 4520), False, 'from keras.layers.pooling import MaxPooling2D\n'), ((4539, 4551), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4546, 4551), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((4619, 4673), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(36)', '(5, 5)'], {'padding': '"""valid"""', 'activation': '"""relu"""'}), "(36, (5, 5), padding='valid', activation='relu')\n", (4625, 4673), False, 'from keras.layers.convolutional import Conv2D, Cropping2D\n'), ((4684, 4698), 'keras.layers.pooling.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (4696, 4698), False, 'from keras.layers.pooling import MaxPooling2D\n'), ((4717, 4771), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(48)', '(5, 5)'], {'padding': '"""valid"""', 'activation': '"""relu"""'}), "(48, (5, 5), padding='valid', activation='relu')\n", (4723, 4771), False, 'from keras.layers.convolutional import Conv2D, Cropping2D\n'), ((4782, 4796), 'keras.layers.pooling.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (4794, 4796), False, 'from keras.layers.pooling import MaxPooling2D\n'), ((4815, 4869), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""valid"""', 'activation': '"""relu"""'}), "(64, (3, 3), padding='valid', activation='relu')\n", (4821, 4869), False, 'from keras.layers.convolutional import Conv2D, Cropping2D\n'), ((4880, 4934), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(64)', '(1, 1)'], {'padding': '"""valid"""', 'activation': '"""relu"""'}), "(64, (1, 1), padding='valid', activation='relu')\n", (4886, 4934), False, 'from keras.layers.convolutional import Conv2D, Cropping2D\n'), ((4945, 4959), 'keras.layers.pooling.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (4957, 4959), False, 'from keras.layers.pooling import MaxPooling2D\n'), ((4978, 4990), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4985, 4990), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((5052, 5061), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (5059, 5061), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((5110, 5120), 'keras.layers.core.Dense', 'Dense', (['(100)'], {}), '(100)\n', (5115, 5120), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((5132, 5141), 'keras.layers.core.Dense', 'Dense', (['(50)'], {}), '(50)\n', (5137, 5141), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((5153, 5162), 'keras.layers.core.Dense', 'Dense', (['(10)'], {}), '(10)\n', (5158, 5162), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((5174, 5182), 'keras.layers.core.Dense', 'Dense', (['(1)'], {}), '(1)\n', (5179, 5182), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((1398, 1414), 'sklearn.utils.shuffle', 'shuffle', (['samples'], {}), '(samples)\n', (1405, 1414), False, 'from sklearn.utils import shuffle\n'), ((2929, 2955), 'numpy.array', 'np.array', (['augmented_images'], {}), '(augmented_images)\n', (2937, 2955), True, 'import numpy as np\n'), ((3001, 3033), 'numpy.array', 'np.array', (['augmented_measurements'], {}), '(augmented_measurements)\n', (3009, 3033), True, 'import numpy as np\n'), ((3123, 3148), 'sklearn.utils.shuffle', 'shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (3130, 3148), False, 'from sklearn.utils import shuffle\n'), ((1873, 1901), 'scipy.ndimage.imread', 'ndimage.imread', (['current_path'], {}), '(current_path)\n', (1887, 1901), False, 'from scipy import ndimage\n'), ((2670, 2688), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (2678, 2688), False, 'import cv2\n')] |
import scipy.io as io
import numpy as np
import os
from dataset.data_util import pil_load_img
from dataset.dataload import TextDataset, TextInstance
class TotalText(TextDataset):
def __init__(self, data_root, ignore_list=None, is_training=True, transform=None):
super().__init__(transform)
self.data_root = data_root
self.is_training = is_training
if ignore_list:
with open(ignore_list) as f:
ignore_list = f.readlines()
ignore_list = [line.strip() for line in ignore_list]
else:
ignore_list = []
self.image_root = os.path.join(data_root, 'Images', 'Train' if is_training else 'Test')
self.annotation_root = os.path.join(data_root, 'gt', 'Train' if is_training else 'Test')
self.image_list = os.listdir(self.image_root)
self.image_list = list(filter(lambda img: img.replace('.jpg', '') not in ignore_list, self.image_list))
self.annotation_list = ['poly_gt_{}.mat'.format(img_name.replace('.jpg', '')) for img_name in self.image_list]
def parse_mat(self, mat_path):
"""
.mat file parser
:param mat_path: (str), mat file path
:return: (list), TextInstance
"""
annot = io.loadmat(mat_path)
polygon = []
for cell in annot['polygt']:
x = cell[1][0]
y = cell[3][0]
text = cell[4][0]
if len(x) < 4: # too few points
continue
try:
ori = cell[5][0]
except:
ori = 'c'
pts = np.stack([x, y]).T.astype(np.int32)
polygon.append(TextInstance(pts, ori, text))
return polygon
def __getitem__(self, item):
image_id = self.image_list[item]
image_path = os.path.join(self.image_root, image_id)
# Read image data
image = pil_load_img(image_path)
# Read annotation
annotation_id = self.annotation_list[item]
annotation_path = os.path.join(self.annotation_root, annotation_id)
polygons = self.parse_mat(annotation_path)
for i, polygon in enumerate(polygons):
if polygon.text != '#':
polygon.find_bottom_and_sideline()
return self.get_training_data(image, polygons, image_id=image_id, image_path=image_path)
def __len__(self):
return len(self.image_list)
if __name__ == '__main__':
import os
from util.augmentation import BaseTransform, Augmentation
means = (0.485, 0.456, 0.406)
stds = (0.229, 0.224, 0.225)
transform = Augmentation(
size=512, mean=means, std=stds
)
trainset = TotalText(
data_root='data/total-text',
ignore_list='./ignore_list.txt',
is_training=True,
transform=transform
)
for idx in range(len(trainset)):
img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, meta = trainset[idx]
print(idx, img.shape) | [
"os.listdir",
"scipy.io.loadmat",
"os.path.join",
"util.augmentation.Augmentation",
"numpy.stack",
"dataset.data_util.pil_load_img",
"dataset.dataload.TextInstance"
] | [((2619, 2663), 'util.augmentation.Augmentation', 'Augmentation', ([], {'size': '(512)', 'mean': 'means', 'std': 'stds'}), '(size=512, mean=means, std=stds)\n', (2631, 2663), False, 'from util.augmentation import BaseTransform, Augmentation\n'), ((628, 697), 'os.path.join', 'os.path.join', (['data_root', '"""Images"""', "('Train' if is_training else 'Test')"], {}), "(data_root, 'Images', 'Train' if is_training else 'Test')\n", (640, 697), False, 'import os\n'), ((729, 794), 'os.path.join', 'os.path.join', (['data_root', '"""gt"""', "('Train' if is_training else 'Test')"], {}), "(data_root, 'gt', 'Train' if is_training else 'Test')\n", (741, 794), False, 'import os\n'), ((821, 848), 'os.listdir', 'os.listdir', (['self.image_root'], {}), '(self.image_root)\n', (831, 848), False, 'import os\n'), ((1265, 1285), 'scipy.io.loadmat', 'io.loadmat', (['mat_path'], {}), '(mat_path)\n', (1275, 1285), True, 'import scipy.io as io\n'), ((1824, 1863), 'os.path.join', 'os.path.join', (['self.image_root', 'image_id'], {}), '(self.image_root, image_id)\n', (1836, 1863), False, 'import os\n'), ((1907, 1931), 'dataset.data_util.pil_load_img', 'pil_load_img', (['image_path'], {}), '(image_path)\n', (1919, 1931), False, 'from dataset.data_util import pil_load_img\n'), ((2036, 2085), 'os.path.join', 'os.path.join', (['self.annotation_root', 'annotation_id'], {}), '(self.annotation_root, annotation_id)\n', (2048, 2085), False, 'import os\n'), ((1674, 1702), 'dataset.dataload.TextInstance', 'TextInstance', (['pts', 'ori', 'text'], {}), '(pts, ori, text)\n', (1686, 1702), False, 'from dataset.dataload import TextDataset, TextInstance\n'), ((1611, 1627), 'numpy.stack', 'np.stack', (['[x, y]'], {}), '([x, y])\n', (1619, 1627), True, 'import numpy as np\n')] |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from ..proto import onnx_proto
from ..common._apply_operation import apply_reshape
from ..common._registration import register_converter
import numpy as np
def convert_sklearn_label_binariser(scope, operator, container):
op = operator.raw_operator
classes = op.classes_
zeros_tensor = np.zeros((len(classes))).astype(np.int)
unit_tensor = np.ones((len(classes))).astype(np.int)
reshaped_input_name = scope.get_unique_variable_name('reshaped_input')
shape_result_name = scope.get_unique_variable_name('shape_result')
zeros_matrix_name = scope.get_unique_variable_name('zeros_matrix')
unit_matrix_name = scope.get_unique_variable_name('unit_matrix')
classes_tensor_name = scope.get_unique_variable_name('classes_tensor')
equal_condition_tensor_name = scope.get_unique_variable_name('equal_condition_tensor')
zeros_tensor_name = scope.get_unique_variable_name('zero_tensor')
unit_tensor_name = scope.get_unique_variable_name('unit_tensor')
class_dtype = onnx_proto.TensorProto.STRING
if np.issubdtype(op.classes_.dtype, np.signedinteger):
class_dtype = onnx_proto.TensorProto.INT64
else:
classes = np.array([s.encode('utf-8') for s in classes])
container.add_initializer(classes_tensor_name, class_dtype,
[len(classes)], classes)
container.add_initializer(zeros_tensor_name, onnx_proto.TensorProto.INT64,
zeros_tensor.shape, zeros_tensor)
container.add_initializer(unit_tensor_name, onnx_proto.TensorProto.INT64,
unit_tensor.shape, unit_tensor)
apply_reshape(scope, operator.inputs[0].full_name, reshaped_input_name, container, desired_shape=[-1, 1])
container.add_node('Shape', reshaped_input_name, shape_result_name,
name=scope.get_unique_operator_name('shape'))
container.add_node('Tile', [zeros_tensor_name, shape_result_name], zeros_matrix_name,
name=scope.get_unique_operator_name('tile'), op_version=6)
container.add_node('Tile', [unit_tensor_name, shape_result_name], unit_matrix_name,
name=scope.get_unique_operator_name('tile'), op_version=6)
container.add_node('Equal', [classes_tensor_name, reshaped_input_name], equal_condition_tensor_name,
name=scope.get_unique_operator_name('equal'))
container.add_node('Where', [equal_condition_tensor_name, unit_matrix_name, zeros_matrix_name],
operator.output_full_names, name=scope.get_unique_operator_name('where'), op_version=9)
register_converter('SklearnLabelBinarizer', convert_sklearn_label_binariser)
| [
"numpy.issubdtype"
] | [((1355, 1405), 'numpy.issubdtype', 'np.issubdtype', (['op.classes_.dtype', 'np.signedinteger'], {}), '(op.classes_.dtype, np.signedinteger)\n', (1368, 1405), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import pytest
from nltk.metrics.distance import masi_distance
from pandas.testing import assert_series_equal
from crowdkit.aggregation.utils import get_accuracy
from crowdkit.metrics.data import alpha_krippendorff, consistency, uncertainty
from crowdkit.metrics.performers import accuracy_on_aggregates
def test_consistency(toy_answers_df):
assert consistency(toy_answers_df) == 0.9384615384615385
class TestUncertaintyMetric:
def test_uncertainty_mean_per_task_skills(self, toy_answers_df):
performers_skills = pd.Series(
[0.6, 0.8, 1.0, 0.4, 0.8],
index=pd.Index(['w1', 'w2', 'w3', 'w4', 'w5'], name='performer'),
)
assert uncertainty(toy_answers_df, performers_skills) == 0.6308666201949331
def test_uncertainty_raises_wrong_compte_by(self, toy_answers_df):
performers_skills = pd.Series(
[0.6, 0.8, 1.0, 0.4, 0.8],
index=pd.Index(['w1', 'w2', 'w3', 'w4', 'w5'], name='performer'),
)
with pytest.raises(KeyError):
uncertainty(toy_answers_df, performers_skills, compute_by='invalid')
def test_uncertainty_docstring_examples(self):
assert uncertainty(
pd.DataFrame.from_records(
[
{'task': 'X', 'performer': 'A', 'label': 'Yes'},
{'task': 'X', 'performer': 'B', 'label': 'Yes'},
]
)
) == 0.0
assert uncertainty(
pd.DataFrame.from_records(
[
{'task': 'X', 'performer': 'A', 'label': 'Yes'},
{'task': 'X', 'performer': 'B', 'label': 'No'},
{'task': 'X', 'performer': 'C', 'label': 'Maybe'},
]
)
) == 1.0986122886681096
np.testing.assert_allclose(
uncertainty(
pd.DataFrame.from_records(
[
{'task': 'X', 'performer': 'A', 'label': 'Yes'},
{'task': 'X', 'performer': 'B', 'label': 'No'},
{'task': 'Y', 'performer': 'A', 'label': 'Yes'},
{'task': 'Y', 'performer': 'B', 'label': 'Yes'},
]
),
compute_by="task",
aggregate=False
), pd.Series([0.693147, 0.0], index=['X', 'Y'], name='task'), atol=1e-3
)
np.testing.assert_allclose(
uncertainty(
pd.DataFrame.from_records(
[
{'task': 'X', 'performer': 'A', 'label': 'Yes'},
{'task': 'X', 'performer': 'B', 'label': 'No'},
{'task': 'Y', 'performer': 'A', 'label': 'Yes'},
{'task': 'Y', 'performer': 'B', 'label': 'Yes'},
]
),
compute_by="performer",
aggregate=False
), pd.Series([0.0, 0.693147], index=['A', 'B'], name='performer'), atol=1e-3
)
def test_uncertainty_raises_skills_not_found(self):
answers = pd.DataFrame.from_records(
[
{'task': '1', 'performer': 'A', 'label': frozenset(['dog'])},
{'task': '1', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '1', 'performer': 'C', 'label': frozenset(['cat'])},
]
)
performers_skills = pd.Series(
[1, 1],
index=pd.Index(['A', 'B'], name='performer'),
)
with pytest.raises(AssertionError):
uncertainty(answers, performers_skills)
def test_uncertainty_per_performer(self):
answers = pd.DataFrame.from_records(
[
{'task': '1', 'performer': 'A', 'label': frozenset(['dog'])},
{'task': '1', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '1', 'performer': 'C', 'label': frozenset(['cat'])},
{'task': '2', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': '2', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '2', 'performer': 'C', 'label': frozenset(['cat'])},
{'task': '3', 'performer': 'A', 'label': frozenset(['dog'])},
{'task': '3', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '3', 'performer': 'C', 'label': frozenset(['dog'])},
{'task': '4', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': '4', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '4', 'performer': 'C', 'label': frozenset(['cat'])},
]
)
performers_skills = pd.Series(
[1, 1, 1],
index=pd.Index(['A', 'B', 'C'], name='performer'),
)
entropies = uncertainty(
answers,
performers_skills,
compute_by='performer',
aggregate=False
)
assert isinstance(entropies, pd.Series)
assert sorted(np.unique(entropies.index).tolist()) == ['A', 'B', 'C']
# B always answers the same, entropy = 0
np.testing.assert_allclose(entropies['B'], 0, atol=1e-6)
# A answers uniformly, entropy = max possible
np.testing.assert_allclose(entropies['A'], 0.693147, atol=1e-6)
# C answers non-uniformly, entropy = between B and A
assert entropies['A'] > entropies['C'] > entropies['B']
assert entropies.mean() == uncertainty(
answers,
performers_skills,
compute_by='performer',
aggregate=True
)
def test_uncertainty_per_task(self):
answers = pd.DataFrame.from_records(
[
{'task': '1', 'performer': 'A', 'label': frozenset(['dog'])},
{'task': '1', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '1', 'performer': 'C', 'label': frozenset(['cat'])},
{'task': '2', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': '2', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '2', 'performer': 'C', 'label': frozenset(['cat'])},
{'task': '3', 'performer': 'A', 'label': frozenset(['dog'])},
{'task': '3', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '3', 'performer': 'C', 'label': frozenset(['dog'])},
{'task': '4', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': '4', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '4', 'performer': 'C', 'label': frozenset(['cat'])},
{'task': '4', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': '5', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': '5', 'performer': 'B', 'label': frozenset(['dog'])},
]
)
performers_skills = pd.Series(
[1, 1, 1],
index=pd.Index(['A', 'B', 'C'], name='performer'),
)
entropies = uncertainty(answers,
performers_skills,
compute_by='task',
aggregate=False)
assert isinstance(entropies, pd.Series)
assert sorted(np.unique(entropies.index).tolist()) == ['1', '2', '3', '4', '5']
# Everybody answered same on tasks 2 and 4
np.testing.assert_allclose(entropies['2'], 0, atol=1e-6)
np.testing.assert_allclose(entropies['4'], 0, atol=1e-6)
# On tasks 1 and 3, 2 performers agreed and one answered differently
assert entropies['1'] > 0
np.testing.assert_allclose(entropies['1'], entropies['3'], atol=1e-6)
# Complete disagreement on task 5, max possible entropy
np.testing.assert_allclose(entropies['5'], 0.693147, atol=1e-6)
assert entropies.mean() == uncertainty(
answers,
performers_skills,
compute_by='task',
aggregate=True
)
def test_golden_set_accuracy(toy_answers_df, toy_gold_df):
assert get_accuracy(toy_answers_df, toy_gold_df) == 5 / 9
assert get_accuracy(toy_answers_df, toy_gold_df, by='performer').equals(pd.Series(
[0.5, 1.0, 1.0, 0.5, 0.0],
index=['w1', 'w2', 'w3', 'w4', 'w5'],
))
def test_accuracy_on_aggregates(toy_answers_df):
expected_performers_accuracy = pd.Series(
[0.6, 0.8, 1.0, 0.4, 0.8],
index=pd.Index(['w1', 'w2', 'w3', 'w4', 'w5'], name='performer'),
)
assert_series_equal(accuracy_on_aggregates(toy_answers_df, by='performer'), expected_performers_accuracy)
assert accuracy_on_aggregates(toy_answers_df) == 0.7083333333333334
def test_alpha_krippendorff(toy_answers_df):
assert alpha_krippendorff(pd.DataFrame.from_records([
{'task': 'X', 'performer': 'A', 'label': 'Yes'},
{'task': 'X', 'performer': 'B', 'label': 'Yes'},
{'task': 'Y', 'performer': 'A', 'label': 'No'},
{'task': 'Y', 'performer': 'B', 'label': 'No'},
])) == 1.0
assert alpha_krippendorff(pd.DataFrame.from_records([
{'task': 'X', 'performer': 'A', 'label': 'Yes'},
{'task': 'X', 'performer': 'B', 'label': 'Yes'},
{'task': 'Y', 'performer': 'A', 'label': 'No'},
{'task': 'Y', 'performer': 'B', 'label': 'No'},
{'task': 'Z', 'performer': 'A', 'label': 'Yes'},
{'task': 'Z', 'performer': 'B', 'label': 'No'},
])) == 0.4444444444444444
assert alpha_krippendorff(toy_answers_df) == 0.14219114219114215
def test_alpha_krippendorff_with_distance():
whos_on_the_picture = pd.DataFrame.from_records([
{'task': 'X', 'performer': 'A', 'label': frozenset(['dog'])},
{'task': 'X', 'performer': 'B', 'label': frozenset(['dog'])},
{'task': 'Y', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': 'Y', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': 'Z', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': 'Z', 'performer': 'B', 'label': frozenset(['cat', 'mouse'])},
])
assert alpha_krippendorff(whos_on_the_picture) == 0.5454545454545454
assert alpha_krippendorff(whos_on_the_picture, masi_distance) == 0.6673336668334168
| [
"pandas.Series",
"pandas.DataFrame.from_records",
"numpy.unique",
"crowdkit.aggregation.utils.get_accuracy",
"numpy.testing.assert_allclose",
"crowdkit.metrics.data.alpha_krippendorff",
"crowdkit.metrics.performers.accuracy_on_aggregates",
"pandas.Index",
"crowdkit.metrics.data.consistency",
"pyte... | [((394, 421), 'crowdkit.metrics.data.consistency', 'consistency', (['toy_answers_df'], {}), '(toy_answers_df)\n', (405, 421), False, 'from crowdkit.metrics.data import alpha_krippendorff, consistency, uncertainty\n'), ((4908, 4993), 'crowdkit.metrics.data.uncertainty', 'uncertainty', (['answers', 'performers_skills'], {'compute_by': '"""performer"""', 'aggregate': '(False)'}), "(answers, performers_skills, compute_by='performer', aggregate=False\n )\n", (4919, 4993), False, 'from crowdkit.metrics.data import alpha_krippendorff, consistency, uncertainty\n'), ((5232, 5289), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["entropies['B']", '(0)'], {'atol': '(1e-06)'}), "(entropies['B'], 0, atol=1e-06)\n", (5258, 5289), True, 'import numpy as np\n'), ((5352, 5416), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["entropies['A']", '(0.693147)'], {'atol': '(1e-06)'}), "(entropies['A'], 0.693147, atol=1e-06)\n", (5378, 5416), True, 'import numpy as np\n'), ((7168, 7243), 'crowdkit.metrics.data.uncertainty', 'uncertainty', (['answers', 'performers_skills'], {'compute_by': '"""task"""', 'aggregate': '(False)'}), "(answers, performers_skills, compute_by='task', aggregate=False)\n", (7179, 7243), False, 'from crowdkit.metrics.data import alpha_krippendorff, consistency, uncertainty\n'), ((7537, 7594), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["entropies['2']", '(0)'], {'atol': '(1e-06)'}), "(entropies['2'], 0, atol=1e-06)\n", (7563, 7594), True, 'import numpy as np\n'), ((7602, 7659), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["entropies['4']", '(0)'], {'atol': '(1e-06)'}), "(entropies['4'], 0, atol=1e-06)\n", (7628, 7659), True, 'import numpy as np\n'), ((7779, 7849), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["entropies['1']", "entropies['3']"], {'atol': '(1e-06)'}), "(entropies['1'], entropies['3'], atol=1e-06)\n", (7805, 7849), True, 'import numpy as np\n'), ((7922, 7986), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["entropies['5']", '(0.693147)'], {'atol': '(1e-06)'}), "(entropies['5'], 0.693147, atol=1e-06)\n", (7948, 7986), True, 'import numpy as np\n'), ((8227, 8268), 'crowdkit.aggregation.utils.get_accuracy', 'get_accuracy', (['toy_answers_df', 'toy_gold_df'], {}), '(toy_answers_df, toy_gold_df)\n', (8239, 8268), False, 'from crowdkit.aggregation.utils import get_accuracy\n'), ((8354, 8428), 'pandas.Series', 'pd.Series', (['[0.5, 1.0, 1.0, 0.5, 0.0]'], {'index': "['w1', 'w2', 'w3', 'w4', 'w5']"}), "([0.5, 1.0, 1.0, 0.5, 0.0], index=['w1', 'w2', 'w3', 'w4', 'w5'])\n", (8363, 8428), True, 'import pandas as pd\n'), ((8690, 8744), 'crowdkit.metrics.performers.accuracy_on_aggregates', 'accuracy_on_aggregates', (['toy_answers_df'], {'by': '"""performer"""'}), "(toy_answers_df, by='performer')\n", (8712, 8744), False, 'from crowdkit.metrics.performers import accuracy_on_aggregates\n'), ((8787, 8825), 'crowdkit.metrics.performers.accuracy_on_aggregates', 'accuracy_on_aggregates', (['toy_answers_df'], {}), '(toy_answers_df)\n', (8809, 8825), False, 'from crowdkit.metrics.performers import accuracy_on_aggregates\n'), ((9634, 9668), 'crowdkit.metrics.data.alpha_krippendorff', 'alpha_krippendorff', (['toy_answers_df'], {}), '(toy_answers_df)\n', (9652, 9668), False, 'from crowdkit.metrics.data import alpha_krippendorff, consistency, uncertainty\n'), ((10241, 10280), 'crowdkit.metrics.data.alpha_krippendorff', 'alpha_krippendorff', (['whos_on_the_picture'], {}), '(whos_on_the_picture)\n', (10259, 10280), False, 'from crowdkit.metrics.data import alpha_krippendorff, consistency, uncertainty\n'), ((10314, 10368), 'crowdkit.metrics.data.alpha_krippendorff', 'alpha_krippendorff', (['whos_on_the_picture', 'masi_distance'], {}), '(whos_on_the_picture, masi_distance)\n', (10332, 10368), False, 'from crowdkit.metrics.data import alpha_krippendorff, consistency, uncertainty\n'), ((727, 773), 'crowdkit.metrics.data.uncertainty', 'uncertainty', (['toy_answers_df', 'performers_skills'], {}), '(toy_answers_df, performers_skills)\n', (738, 773), False, 'from crowdkit.metrics.data import alpha_krippendorff, consistency, uncertainty\n'), ((1048, 1071), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (1061, 1071), False, 'import pytest\n'), ((1085, 1153), 'crowdkit.metrics.data.uncertainty', 'uncertainty', (['toy_answers_df', 'performers_skills'], {'compute_by': '"""invalid"""'}), "(toy_answers_df, performers_skills, compute_by='invalid')\n", (1096, 1153), False, 'from crowdkit.metrics.data import alpha_krippendorff, consistency, uncertainty\n'), ((2377, 2434), 'pandas.Series', 'pd.Series', (['[0.693147, 0.0]'], {'index': "['X', 'Y']", 'name': '"""task"""'}), "([0.693147, 0.0], index=['X', 'Y'], name='task')\n", (2386, 2434), True, 'import pandas as pd\n'), ((3002, 3064), 'pandas.Series', 'pd.Series', (['[0.0, 0.693147]'], {'index': "['A', 'B']", 'name': '"""performer"""'}), "([0.0, 0.693147], index=['A', 'B'], name='performer')\n", (3011, 3064), True, 'import pandas as pd\n'), ((3602, 3631), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3615, 3631), False, 'import pytest\n'), ((3645, 3684), 'crowdkit.metrics.data.uncertainty', 'uncertainty', (['answers', 'performers_skills'], {}), '(answers, performers_skills)\n', (3656, 3684), False, 'from crowdkit.metrics.data import alpha_krippendorff, consistency, uncertainty\n'), ((5578, 5657), 'crowdkit.metrics.data.uncertainty', 'uncertainty', (['answers', 'performers_skills'], {'compute_by': '"""performer"""', 'aggregate': '(True)'}), "(answers, performers_skills, compute_by='performer', aggregate=True)\n", (5589, 5657), False, 'from crowdkit.metrics.data import alpha_krippendorff, consistency, uncertainty\n'), ((8022, 8096), 'crowdkit.metrics.data.uncertainty', 'uncertainty', (['answers', 'performers_skills'], {'compute_by': '"""task"""', 'aggregate': '(True)'}), "(answers, performers_skills, compute_by='task', aggregate=True)\n", (8033, 8096), False, 'from crowdkit.metrics.data import alpha_krippendorff, consistency, uncertainty\n'), ((8289, 8346), 'crowdkit.aggregation.utils.get_accuracy', 'get_accuracy', (['toy_answers_df', 'toy_gold_df'], {'by': '"""performer"""'}), "(toy_answers_df, toy_gold_df, by='performer')\n", (8301, 8346), False, 'from crowdkit.aggregation.utils import get_accuracy\n'), ((8600, 8658), 'pandas.Index', 'pd.Index', (["['w1', 'w2', 'w3', 'w4', 'w5']"], {'name': '"""performer"""'}), "(['w1', 'w2', 'w3', 'w4', 'w5'], name='performer')\n", (8608, 8658), True, 'import pandas as pd\n'), ((8925, 9158), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (["[{'task': 'X', 'performer': 'A', 'label': 'Yes'}, {'task': 'X', 'performer':\n 'B', 'label': 'Yes'}, {'task': 'Y', 'performer': 'A', 'label': 'No'}, {\n 'task': 'Y', 'performer': 'B', 'label': 'No'}]"], {}), "([{'task': 'X', 'performer': 'A', 'label': 'Yes'},\n {'task': 'X', 'performer': 'B', 'label': 'Yes'}, {'task': 'Y',\n 'performer': 'A', 'label': 'No'}, {'task': 'Y', 'performer': 'B',\n 'label': 'No'}])\n", (8950, 9158), True, 'import pandas as pd\n'), ((9225, 9560), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (["[{'task': 'X', 'performer': 'A', 'label': 'Yes'}, {'task': 'X', 'performer':\n 'B', 'label': 'Yes'}, {'task': 'Y', 'performer': 'A', 'label': 'No'}, {\n 'task': 'Y', 'performer': 'B', 'label': 'No'}, {'task': 'Z',\n 'performer': 'A', 'label': 'Yes'}, {'task': 'Z', 'performer': 'B',\n 'label': 'No'}]"], {}), "([{'task': 'X', 'performer': 'A', 'label': 'Yes'},\n {'task': 'X', 'performer': 'B', 'label': 'Yes'}, {'task': 'Y',\n 'performer': 'A', 'label': 'No'}, {'task': 'Y', 'performer': 'B',\n 'label': 'No'}, {'task': 'Z', 'performer': 'A', 'label': 'Yes'}, {\n 'task': 'Z', 'performer': 'B', 'label': 'No'}])\n", (9250, 9560), True, 'import pandas as pd\n'), ((641, 699), 'pandas.Index', 'pd.Index', (["['w1', 'w2', 'w3', 'w4', 'w5']"], {'name': '"""performer"""'}), "(['w1', 'w2', 'w3', 'w4', 'w5'], name='performer')\n", (649, 699), True, 'import pandas as pd\n'), ((965, 1023), 'pandas.Index', 'pd.Index', (["['w1', 'w2', 'w3', 'w4', 'w5']"], {'name': '"""performer"""'}), "(['w1', 'w2', 'w3', 'w4', 'w5'], name='performer')\n", (973, 1023), True, 'import pandas as pd\n'), ((1246, 1375), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (["[{'task': 'X', 'performer': 'A', 'label': 'Yes'}, {'task': 'X', 'performer':\n 'B', 'label': 'Yes'}]"], {}), "([{'task': 'X', 'performer': 'A', 'label': 'Yes'},\n {'task': 'X', 'performer': 'B', 'label': 'Yes'}])\n", (1271, 1375), True, 'import pandas as pd\n'), ((1519, 1702), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (["[{'task': 'X', 'performer': 'A', 'label': 'Yes'}, {'task': 'X', 'performer':\n 'B', 'label': 'No'}, {'task': 'X', 'performer': 'C', 'label': 'Maybe'}]"], {}), "([{'task': 'X', 'performer': 'A', 'label': 'Yes'},\n {'task': 'X', 'performer': 'B', 'label': 'No'}, {'task': 'X',\n 'performer': 'C', 'label': 'Maybe'}])\n", (1544, 1702), True, 'import pandas as pd\n'), ((1914, 2148), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (["[{'task': 'X', 'performer': 'A', 'label': 'Yes'}, {'task': 'X', 'performer':\n 'B', 'label': 'No'}, {'task': 'Y', 'performer': 'A', 'label': 'Yes'}, {\n 'task': 'Y', 'performer': 'B', 'label': 'Yes'}]"], {}), "([{'task': 'X', 'performer': 'A', 'label': 'Yes'},\n {'task': 'X', 'performer': 'B', 'label': 'No'}, {'task': 'Y',\n 'performer': 'A', 'label': 'Yes'}, {'task': 'Y', 'performer': 'B',\n 'label': 'Yes'}])\n", (1939, 2148), True, 'import pandas as pd\n'), ((2534, 2768), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (["[{'task': 'X', 'performer': 'A', 'label': 'Yes'}, {'task': 'X', 'performer':\n 'B', 'label': 'No'}, {'task': 'Y', 'performer': 'A', 'label': 'Yes'}, {\n 'task': 'Y', 'performer': 'B', 'label': 'Yes'}]"], {}), "([{'task': 'X', 'performer': 'A', 'label': 'Yes'},\n {'task': 'X', 'performer': 'B', 'label': 'No'}, {'task': 'Y',\n 'performer': 'A', 'label': 'Yes'}, {'task': 'Y', 'performer': 'B',\n 'label': 'Yes'}])\n", (2559, 2768), True, 'import pandas as pd\n'), ((3538, 3576), 'pandas.Index', 'pd.Index', (["['A', 'B']"], {'name': '"""performer"""'}), "(['A', 'B'], name='performer')\n", (3546, 3576), True, 'import pandas as pd\n'), ((4832, 4875), 'pandas.Index', 'pd.Index', (["['A', 'B', 'C']"], {'name': '"""performer"""'}), "(['A', 'B', 'C'], name='performer')\n", (4840, 4875), True, 'import pandas as pd\n'), ((7092, 7135), 'pandas.Index', 'pd.Index', (["['A', 'B', 'C']"], {'name': '"""performer"""'}), "(['A', 'B', 'C'], name='performer')\n", (7100, 7135), True, 'import pandas as pd\n'), ((5118, 5144), 'numpy.unique', 'np.unique', (['entropies.index'], {}), '(entropies.index)\n', (5127, 5144), True, 'import numpy as np\n'), ((7411, 7437), 'numpy.unique', 'np.unique', (['entropies.index'], {}), '(entropies.index)\n', (7420, 7437), True, 'import numpy as np\n')] |
import albumentations
from albumentations.pytorch import ToTensorV2
import cv2
import numpy as np
def crop_image_from_gray(img, tol=7):
if img.ndim == 2:
mask = img > tol
return img[np.ix_(mask.any(1), mask.any(0))]
elif img.ndim == 3:
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
mask = gray_img > tol
check_shape = img[:, :, 0][np.ix_(mask.any(1), mask.any(0))].shape[0]
if (check_shape == 0):
return img
else:
img1 = img[:, :, 0][np.ix_(mask.any(1), mask.any(0))]
img2 = img[:, :, 1][np.ix_(mask.any(1), mask.any(0))]
img3 = img[:, :, 2][np.ix_(mask.any(1), mask.any(0))]
img = np.stack([img1, img2, img3], axis=-1)
return img
def crop_maskImg(image, sigmaX=10):
image = crop_image_from_gray(image)
#image = cv2.addWeighted(image, 4, cv2.GaussianBlur(image, (0, 0), sigmaX), -4, 128)
return image
def get_riadd_train_transforms(args):
image_size = args.img_size
transforms_train = albumentations.Compose([
#albumentations.RandomResizedCrop(image_size, image_size, scale=(0.85, 1), p=1),
albumentations.Resize(image_size, image_size),
albumentations.HorizontalFlip(p=0.5),
albumentations.VerticalFlip(p=0.5),
albumentations.MedianBlur(blur_limit = 7, p=0.3),
albumentations.IAAAdditiveGaussianNoise(scale = (0,0.15*255), p = 0.5),
albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, p=0.3),
albumentations.RandomBrightnessContrast(brightness_limit=(-0.2,0.2), contrast_limit=(-0.2, 0.2), p=0.3),
albumentations.Cutout(max_h_size=20, max_w_size=20, num_holes=5, p=0.5),
albumentations.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
ToTensorV2(),
])
return transforms_train
def get_riadd_valid_transforms(args):
image_size = args.img_size
valid_transforms = albumentations.Compose([
albumentations.Resize(image_size, image_size),
albumentations.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
ToTensorV2(),
])
return valid_transforms
def get_riadd_test_transforms(args):
image_size = args['img_size']
test_transforms = albumentations.Compose([
albumentations.Resize(image_size, image_size),
albumentations.HorizontalFlip(p=0.5),
albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, p=0.5),
albumentations.RandomBrightnessContrast(brightness_limit=(-0.2,0.2), contrast_limit=(-0.2, 0.2), p=0.5),
albumentations.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
ToTensorV2(),
])
return test_transforms
# if __name__ == '__main__':
# img = cv2.imread('/media/ExtDiskB/Hanson/datasets/wheat/RIADD/valid/1.png')
# img1 = preprocessing(img)
# # result=color_seperate(hsv_img, thresh_image)
# cv2.imwrite('1222.png',img1) | [
"albumentations.pytorch.ToTensorV2",
"albumentations.MedianBlur",
"albumentations.RandomBrightnessContrast",
"albumentations.Cutout",
"albumentations.VerticalFlip",
"albumentations.IAAAdditiveGaussianNoise",
"albumentations.HueSaturationValue",
"numpy.stack",
"albumentations.Normalize",
"albumenta... | [((280, 317), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (292, 317), False, 'import cv2\n'), ((1166, 1211), 'albumentations.Resize', 'albumentations.Resize', (['image_size', 'image_size'], {}), '(image_size, image_size)\n', (1187, 1211), False, 'import albumentations\n'), ((1222, 1258), 'albumentations.HorizontalFlip', 'albumentations.HorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1251, 1258), False, 'import albumentations\n'), ((1268, 1302), 'albumentations.VerticalFlip', 'albumentations.VerticalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1295, 1302), False, 'import albumentations\n'), ((1312, 1358), 'albumentations.MedianBlur', 'albumentations.MedianBlur', ([], {'blur_limit': '(7)', 'p': '(0.3)'}), '(blur_limit=7, p=0.3)\n', (1337, 1358), False, 'import albumentations\n'), ((1370, 1439), 'albumentations.IAAAdditiveGaussianNoise', 'albumentations.IAAAdditiveGaussianNoise', ([], {'scale': '(0, 0.15 * 255)', 'p': '(0.5)'}), '(scale=(0, 0.15 * 255), p=0.5)\n', (1409, 1439), False, 'import albumentations\n'), ((1450, 1554), 'albumentations.HueSaturationValue', 'albumentations.HueSaturationValue', ([], {'hue_shift_limit': '(10)', 'sat_shift_limit': '(10)', 'val_shift_limit': '(10)', 'p': '(0.3)'}), '(hue_shift_limit=10, sat_shift_limit=10,\n val_shift_limit=10, p=0.3)\n', (1483, 1554), False, 'import albumentations\n'), ((1560, 1668), 'albumentations.RandomBrightnessContrast', 'albumentations.RandomBrightnessContrast', ([], {'brightness_limit': '(-0.2, 0.2)', 'contrast_limit': '(-0.2, 0.2)', 'p': '(0.3)'}), '(brightness_limit=(-0.2, 0.2),\n contrast_limit=(-0.2, 0.2), p=0.3)\n', (1599, 1668), False, 'import albumentations\n'), ((1673, 1744), 'albumentations.Cutout', 'albumentations.Cutout', ([], {'max_h_size': '(20)', 'max_w_size': '(20)', 'num_holes': '(5)', 'p': '(0.5)'}), '(max_h_size=20, max_w_size=20, num_holes=5, p=0.5)\n', (1694, 1744), False, 'import albumentations\n'), ((1754, 1833), 'albumentations.Normalize', 'albumentations.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1778, 1833), False, 'import albumentations\n'), ((1878, 1890), 'albumentations.pytorch.ToTensorV2', 'ToTensorV2', ([], {}), '()\n', (1888, 1890), False, 'from albumentations.pytorch import ToTensorV2\n'), ((2054, 2099), 'albumentations.Resize', 'albumentations.Resize', (['image_size', 'image_size'], {}), '(image_size, image_size)\n', (2075, 2099), False, 'import albumentations\n'), ((2109, 2188), 'albumentations.Normalize', 'albumentations.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (2133, 2188), False, 'import albumentations\n'), ((2233, 2245), 'albumentations.pytorch.ToTensorV2', 'ToTensorV2', ([], {}), '()\n', (2243, 2245), False, 'from albumentations.pytorch import ToTensorV2\n'), ((2410, 2455), 'albumentations.Resize', 'albumentations.Resize', (['image_size', 'image_size'], {}), '(image_size, image_size)\n', (2431, 2455), False, 'import albumentations\n'), ((2465, 2501), 'albumentations.HorizontalFlip', 'albumentations.HorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (2494, 2501), False, 'import albumentations\n'), ((2511, 2615), 'albumentations.HueSaturationValue', 'albumentations.HueSaturationValue', ([], {'hue_shift_limit': '(10)', 'sat_shift_limit': '(10)', 'val_shift_limit': '(10)', 'p': '(0.5)'}), '(hue_shift_limit=10, sat_shift_limit=10,\n val_shift_limit=10, p=0.5)\n', (2544, 2615), False, 'import albumentations\n'), ((2621, 2729), 'albumentations.RandomBrightnessContrast', 'albumentations.RandomBrightnessContrast', ([], {'brightness_limit': '(-0.2, 0.2)', 'contrast_limit': '(-0.2, 0.2)', 'p': '(0.5)'}), '(brightness_limit=(-0.2, 0.2),\n contrast_limit=(-0.2, 0.2), p=0.5)\n', (2660, 2729), False, 'import albumentations\n'), ((2734, 2813), 'albumentations.Normalize', 'albumentations.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (2758, 2813), False, 'import albumentations\n'), ((2858, 2870), 'albumentations.pytorch.ToTensorV2', 'ToTensorV2', ([], {}), '()\n', (2868, 2870), False, 'from albumentations.pytorch import ToTensorV2\n'), ((710, 747), 'numpy.stack', 'np.stack', (['[img1, img2, img3]'], {'axis': '(-1)'}), '([img1, img2, img3], axis=-1)\n', (718, 747), True, 'import numpy as np\n')] |
"""
Short corridor with switched actions (Example 13.1) of Sutton and Barto's
"Reinforcement learning"
"""
import numpy as np
class ShortCorridor():
"""Short corridor with switched actions"""
def __init__(self):
self.num_states = 4
self.states = self.state_space()
self.actions = self.action_space()
self.initial_state = self.states[0]
self.terminal_state = self.states[-1]
self.switch_state = self.states[1]
def reset(self):
"""Initialize state"""
state = self.initial_state
return state
def step(self, state, action):
"""Agent takes action in given state"""
done = False
if state == self.switch_state:# invert moving direction
action = -action
next_state = state + action
if next_state < self.initial_state:
next_state = self.initial_state
if next_state == self.terminal_state:
done = True
reward = -1
return next_state, reward, done
def state_space(self):
"""Define state space"""
states = np.array([state for state in range(self.num_states)])
return states
def action_space(self):
"""
Define action space
Returns
-------
actions : ndarray
actions in short corridor: -1: go left; 1: go right.
"""
actions = np.array([-1, 1])
return actions
#%%
class FeatureVectors(ShortCorridor):
"""Feature vector for short corridor with switched actions."""
def __init__(self):
super().__init__()
self.states = self.state_space()
self.actions = self.action_space()
self.x_vec = self.feat_vec()
def feat_vec(self):
"""Define feature vectors for state action pairs (Important: The agent
cannot distinguish between the states from the given observations)"""
x_vec = {}
for state in self.states:
x_vec[(state, self.actions[0])] = np.array([0,1])
x_vec[(state, self.actions[1])] = np.array([1,0])
return x_vec
#%%
if __name__ == "__main__":
SC = ShortCorridor()
FV = FeatureVectors()
print(FV.x_vec[(0,1)])
#%% Test
eps= 0.82 # highest prob for eps = 0.82
NUM_EPISODES = 1000
avg_steps = 0
for episode in range(NUM_EPISODES):
steps = 0
state = SC.reset()
done = False
while not done:
steps += 1
action = np.random.choice(SC.action_space(), p=[eps/2, 1-eps/2])
state, reward, done = SC.step(state, action)
#print(steps)
avg_steps = avg_steps + 1/(episode+1)*(steps - avg_steps)
print(avg_steps) | [
"numpy.array"
] | [((1538, 1555), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (1546, 1555), True, 'import numpy as np\n'), ((2172, 2188), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (2180, 2188), True, 'import numpy as np\n'), ((2234, 2250), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (2242, 2250), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for metric utils."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
from language.mentionmemory.utils import metric_utils
import numpy as np
_LARGE_NUMBER = 1e12
class ComputeMetricsTest(absltest.TestCase):
"""Test whether metrics computations produce expected values."""
batch_size = 32
seq_len = 20
vocab_size = 100
def test_logit_values_as_expected(self):
"""Test whether metrics computations produce expected values."""
logits = np.random.rand(self.batch_size, self.seq_len, self.vocab_size)
targets = np.random.randint(
self.vocab_size, size=(self.batch_size, self.seq_len))
dense_targets = jax.nn.one_hot(targets, self.vocab_size)
weights = np.random.randint(2, size=(self.batch_size, self.seq_len))
# Check loss and denominator make sense for random values
loss, denominator = metric_utils.compute_weighted_cross_entropy(
logits,
targets,
weights,
)
expected_loss = -jax.nn.log_softmax(logits, axis=-1) * dense_targets
expected_loss = (expected_loss * np.expand_dims(weights, axis=-1)).sum()
self.assertAlmostEqual(loss, expected_loss, 1)
self.assertAlmostEqual(denominator, weights.sum(), 1)
# Check loss makes sense for uniform and degenerate scores
logits = np.ones(shape=(self.batch_size, self.seq_len, self.vocab_size))
loss, denominator = metric_utils.compute_weighted_cross_entropy(
logits,
targets,
weights,
)
expected_loss = np.log(self.vocab_size)
self.assertAlmostEqual(loss / denominator, expected_loss, 4)
logits = np.zeros(shape=(self.batch_size, self.seq_len, self.vocab_size))
logits = logits + (
_LARGE_NUMBER * dense_targets - _LARGE_NUMBER * (1 - dense_targets))
loss, denominator = metric_utils.compute_weighted_cross_entropy(
logits,
targets,
weights,
)
self.assertAlmostEqual(loss / denominator, 0.0, 4)
def test_prob_values_as_expected(self):
probs = np.random.rand(self.batch_size, self.seq_len, self.vocab_size)
targets = np.random.randint(
self.vocab_size, size=(self.batch_size, self.seq_len))
dense_targets = jax.nn.one_hot(targets, self.vocab_size)
weights = np.random.randint(2, size=(self.batch_size, self.seq_len))
# Check loss and denominator make sense with probs as inputs
loss, denominator = metric_utils.compute_weighted_cross_entropy(
probs,
targets,
weights,
inputs_are_prob=True,
)
expected_loss = -np.log(probs) * dense_targets
expected_loss = (expected_loss * np.expand_dims(weights, axis=-1)).sum()
self.assertAlmostEqual(loss, expected_loss, 1)
self.assertAlmostEqual(denominator, weights.sum(), 1)
# Check loss makes sense for uniform and degenerate probabilities
probs = np.ones(shape=(self.batch_size, self.seq_len, self.vocab_size))
probs = probs / self.vocab_size
loss, denominator = metric_utils.compute_weighted_cross_entropy(
probs,
targets,
weights,
inputs_are_prob=True,
)
expected_loss = np.log(self.vocab_size)
self.assertAlmostEqual(loss / denominator, expected_loss, 4)
probs = np.zeros(shape=(self.batch_size, self.seq_len, self.vocab_size))
probs = probs + dense_targets
loss, denominator = metric_utils.compute_weighted_cross_entropy(
probs,
targets,
weights,
inputs_are_prob=True,
)
self.assertAlmostEqual(loss / denominator, 0.0, 4)
def test_accuracy_as_expected(self):
logits = np.random.rand(self.batch_size, self.seq_len, self.vocab_size)
targets = np.random.randint(
self.vocab_size, size=(self.batch_size, self.seq_len))
dense_targets = jax.nn.one_hot(targets, self.vocab_size)
weights = np.random.randint(2, size=(self.batch_size, self.seq_len))
# Check accuracy and denominator make sense
logits = np.ones((self.batch_size, self.seq_len, self.vocab_size),
dtype=np.float32)
correct = np.random.randint(2, size=(self.batch_size, self.seq_len, 1))
logits = logits + dense_targets * (0.5 * correct - 0.5 * (1 - correct))
acc, denominator = metric_utils.compute_weighted_accuracy(
logits,
targets,
weights,
)
expected_accuracy = (np.squeeze(correct) * weights).sum() / weights.sum()
self.assertAlmostEqual(acc / denominator, expected_accuracy, 1)
self.assertAlmostEqual(denominator, weights.sum(), 1)
class ComputeCrossEntropyTest(parameterized.TestCase):
"""Test whether loss and metrics computations produce expected values."""
@parameterized.parameters(
(0, 1, 29, 31, 31),
# Tests with large score values
(1, 1000000, 29, 31),
(2, 1000000, 29, 31),
# Tests with large number of positive, negatives and neutral classes
(3, 100, 29, 1001),
(4, 100, 323, 31),
# Tests whether lack of positives affects the numerical stability
(5, 1, 29, 31, 1, 31),
(6, 1, 29, 31, 0, 31),
(7, 1, 29, 31, 31, 1),
(8, 1, 29, 31, 31, 0),
(9, 1, 29, 31, 1, 1),
(10, 1, 29, 31, 0, 0),
(11, 1000000, 29, 31, 0, 0),
(12, 100, 29, 1001, 0, 0),
(13, 100, 323, 31, 0, 0),
)
def test_loss_and_metrics_as_expected(self,
seed,
scale,
local_n_mentions,
global_n_mentions,
max_num_positives=None,
max_num_negatives=None):
"""Test whether loss and metrics computation produces expected values."""
np.random.seed(seed)
max_num_negatives = max_num_negatives or global_n_mentions
max_num_positives = max_num_positives or global_n_mentions
shape = (local_n_mentions, global_n_mentions)
scores = np.random.random(shape) * scale
num_positives = np.random.randint(
max_num_positives + 1, size=(local_n_mentions))
num_positives[0] = 0
num_positives[-1] = global_n_mentions
num_negatives = np.random.randint(
max_num_negatives + 1, size=(local_n_mentions))
num_negatives = np.minimum(num_negatives, global_n_mentions - num_positives)
positives = np.zeros(shape, dtype=np.bool_)
negatives = np.zeros(shape, dtype=np.bool_)
for index in range(local_n_mentions):
ids = np.random.choice(
global_n_mentions,
num_positives[index] + num_negatives[index],
replace=False)
positives[index, ids[:num_positives[index]]] = True
negatives[index, ids[num_positives[index]:]] = True
self.assertEqual(np.logical_and(positives, negatives).sum(), 0)
weights = np.logical_and(num_positives > 0, num_negatives > 0)
(actual_loss, actual_metrics, (actual_acc_per_sample,
actual_weights_per_sample)
) = metric_utils.compute_cross_entropy_loss_with_positives_and_negatives_masks(
scores, positives, negatives)
expected_loss, expected_acc, expected_denom = 0, 0, 0
expected_acc_per_sample = []
# Consider every sample independently
for i in range(local_n_mentions):
if not weights[i]:
expected_acc_per_sample.append(0)
continue
# Collect positive and negative scores
positive_scores, negative_scores = [], []
for j in range(global_n_mentions):
if positives[i, j]:
positive_scores.append(scores[i, j])
if negatives[i, j]:
negative_scores.append(scores[i, j])
self.assertNotEmpty(positive_scores)
self.assertNotEmpty(negative_scores)
n_pos = len(positive_scores)
max_negative_scores = max(negative_scores)
current_loss, current_acc = 0, 0
# Consider positive class per sample independently
# and compute loss using a naive softmax op
for pos_index in range(n_pos):
current_scores = np.array([positive_scores[pos_index]] +
negative_scores)
current_scores = jax.nn.log_softmax(current_scores)
current_loss += -current_scores[0]
current_acc += int(positive_scores[pos_index] > max_negative_scores)
expected_loss += current_loss / n_pos
expected_acc += current_acc / n_pos
expected_denom += 1
expected_acc_per_sample.append(current_acc / n_pos)
self.assertAlmostEqual(actual_loss, expected_loss, places=2)
self.assertAlmostEqual(actual_metrics['loss'], expected_loss, places=2)
self.assertAlmostEqual(actual_metrics['acc'], expected_acc, places=4)
self.assertAlmostEqual(
actual_metrics['denominator'], expected_denom, places=4)
self.assertTrue(np.all(weights == actual_weights_per_sample))
self.assertSequenceAlmostEqual(
actual_acc_per_sample, expected_acc_per_sample, places=4)
class ComputeMetricsFromDuplicatesTest(absltest.TestCase):
"""Test whether metrics computation produces expected values."""
batch_size = 32
seq_len = 20
num_items = 100
num_classes = 200
def test_values_as_expected(self):
"""Test whether metrics computation produces expected values."""
probs = np.ones((self.batch_size, self.seq_len, self.num_items),
dtype=np.float32) / self.num_items
classes = np.ones((self.batch_size, self.seq_len, self.num_items),
dtype=np.int32)
targets = np.ones((self.batch_size, self.seq_len), dtype=np.int32)
weights = np.random.randint(2, size=(self.batch_size, self.seq_len))
# Check case where all classes are targets
loss, avg_prob, denominator = metric_utils.compute_loss_and_prob_from_probs_with_duplicates(
probs,
classes,
targets,
weights,
)
self.assertAlmostEqual(loss / denominator, 0.0, 4)
self.assertAlmostEqual(avg_prob / denominator, 1.0, 4)
self.assertAlmostEqual(denominator, weights.sum(), 4)
# Check case where no classes are targets
targets = np.zeros((self.batch_size, self.seq_len), dtype=np.int32)
loss, avg_prob, denominator = metric_utils.compute_loss_and_prob_from_probs_with_duplicates(
probs,
classes,
targets,
weights,
)
self.assertAlmostEqual(avg_prob / denominator, 0.0, 4)
# Check random cases
classes = np.random.randint(
self.num_classes, size=(self.batch_size, self.seq_len, self.num_items))
targets = np.random.randint(
self.num_classes, size=(self.batch_size, self.seq_len))
loss, avg_prob, denominator = metric_utils.compute_loss_and_prob_from_probs_with_duplicates(
probs,
classes,
targets,
weights,
)
correct_probs = (classes == np.expand_dims(targets, axis=-1)) * probs
expected_avg_prob = (
correct_probs * np.expand_dims(weights, axis=-1)).sum() / weights.sum()
self.assertAlmostEqual(avg_prob / denominator, expected_avg_prob, 4)
class ProcessMetricsTest(absltest.TestCase):
"""Test metrics processing."""
def test_values_as_expected(self):
"""Test whether processed dictionaries match expected values."""
metric_dict = {
'cat1': {
'key': 2.0,
'denominator': 1.0
},
'cat2': {
'key': 2.0,
'denominator': 2.0
},
}
processed_metrics = metric_utils.process_metrics(metric_dict)
expected_result = {
'cat1_key': 2.0,
'cat1_denom': 1.0,
'cat2_key': 1.0,
'cat2_denom': 2.0,
}
self.assertEqual(processed_metrics, expected_result)
metric_dict = {
'cat1': {
'key': 2.0,
'denominator': 1.0
},
'cat2': {
'key': 2.0,
'denominator': 2.0
},
}
processed_metrics = metric_utils.process_metrics(metric_dict, prefix='pref')
expected_result = {
'pref/cat1_key': 2.0,
'pref/cat1_denom': 1.0,
'pref/cat2_key': 1.0,
'pref/cat2_denom': 2.0,
}
self.assertEqual(processed_metrics, expected_result)
class UpdateMetricsDTypeTest(absltest.TestCase):
"""Test metrics processing."""
def test_types_as_expected(self):
"""Test whether updated metrics match expected types."""
metric_dict = {
'cat1': {
'key': jnp.asarray([1], dtype=jnp.int32),
'denominator': jnp.asarray([1], dtype=jnp.int16)
},
'cat2': {
'key': 2.0,
'denominator': jnp.asarray([1], dtype=jnp.bfloat16)
},
}
processed_metrics = metric_utils.update_metrics_dtype(metric_dict)
self.assertEqual(processed_metrics['cat1']['key'].dtype, jnp.float32)
self.assertEqual(processed_metrics['cat1']['denominator'].dtype,
jnp.float32)
self.assertIsInstance(processed_metrics['cat2']['key'], float)
self.assertEqual(processed_metrics['cat2']['denominator'].dtype,
jnp.float32)
if __name__ == '__main__':
absltest.main()
| [
"numpy.random.rand",
"jax.nn.log_softmax",
"numpy.log",
"language.mentionmemory.utils.metric_utils.compute_cross_entropy_loss_with_positives_and_negatives_masks",
"numpy.array",
"numpy.random.random",
"jax.numpy.asarray",
"numpy.random.seed",
"language.mentionmemory.utils.metric_utils.compute_loss_a... | [((5319, 5683), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(0, 1, 29, 31, 31)', '(1, 1000000, 29, 31)', '(2, 1000000, 29, 31)', '(3, 100, 29, 1001)', '(4, 100, 323, 31)', '(5, 1, 29, 31, 1, 31)', '(6, 1, 29, 31, 0, 31)', '(7, 1, 29, 31, 31, 1)', '(8, 1, 29, 31, 31, 0)', '(9, 1, 29, 31, 1, 1)', '(10, 1, 29, 31, 0, 0)', '(11, 1000000, 29, 31, 0, 0)', '(12, 100, 29, 1001, 0, 0)', '(13, 100, 323, 31, 0, 0)'], {}), '((0, 1, 29, 31, 31), (1, 1000000, 29, 31), (2, \n 1000000, 29, 31), (3, 100, 29, 1001), (4, 100, 323, 31), (5, 1, 29, 31,\n 1, 31), (6, 1, 29, 31, 0, 31), (7, 1, 29, 31, 31, 1), (8, 1, 29, 31, 31,\n 0), (9, 1, 29, 31, 1, 1), (10, 1, 29, 31, 0, 0), (11, 1000000, 29, 31, \n 0, 0), (12, 100, 29, 1001, 0, 0), (13, 100, 323, 31, 0, 0))\n', (5343, 5683), False, 'from absl.testing import parameterized\n'), ((13731, 13746), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (13744, 13746), False, 'from absl.testing import absltest\n'), ((1143, 1205), 'numpy.random.rand', 'np.random.rand', (['self.batch_size', 'self.seq_len', 'self.vocab_size'], {}), '(self.batch_size, self.seq_len, self.vocab_size)\n', (1157, 1205), True, 'import numpy as np\n'), ((1220, 1292), 'numpy.random.randint', 'np.random.randint', (['self.vocab_size'], {'size': '(self.batch_size, self.seq_len)'}), '(self.vocab_size, size=(self.batch_size, self.seq_len))\n', (1237, 1292), True, 'import numpy as np\n'), ((1322, 1362), 'jax.nn.one_hot', 'jax.nn.one_hot', (['targets', 'self.vocab_size'], {}), '(targets, self.vocab_size)\n', (1336, 1362), False, 'import jax\n'), ((1377, 1435), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(self.batch_size, self.seq_len)'}), '(2, size=(self.batch_size, self.seq_len))\n', (1394, 1435), True, 'import numpy as np\n'), ((1523, 1592), 'language.mentionmemory.utils.metric_utils.compute_weighted_cross_entropy', 'metric_utils.compute_weighted_cross_entropy', (['logits', 'targets', 'weights'], {}), '(logits, targets, weights)\n', (1566, 1592), False, 'from language.mentionmemory.utils import metric_utils\n'), ((1961, 2024), 'numpy.ones', 'np.ones', ([], {'shape': '(self.batch_size, self.seq_len, self.vocab_size)'}), '(shape=(self.batch_size, self.seq_len, self.vocab_size))\n', (1968, 2024), True, 'import numpy as np\n'), ((2049, 2118), 'language.mentionmemory.utils.metric_utils.compute_weighted_cross_entropy', 'metric_utils.compute_weighted_cross_entropy', (['logits', 'targets', 'weights'], {}), '(logits, targets, weights)\n', (2092, 2118), False, 'from language.mentionmemory.utils import metric_utils\n'), ((2170, 2193), 'numpy.log', 'np.log', (['self.vocab_size'], {}), '(self.vocab_size)\n', (2176, 2193), True, 'import numpy as np\n'), ((2273, 2337), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.batch_size, self.seq_len, self.vocab_size)'}), '(shape=(self.batch_size, self.seq_len, self.vocab_size))\n', (2281, 2337), True, 'import numpy as np\n'), ((2464, 2533), 'language.mentionmemory.utils.metric_utils.compute_weighted_cross_entropy', 'metric_utils.compute_weighted_cross_entropy', (['logits', 'targets', 'weights'], {}), '(logits, targets, weights)\n', (2507, 2533), False, 'from language.mentionmemory.utils import metric_utils\n'), ((2676, 2738), 'numpy.random.rand', 'np.random.rand', (['self.batch_size', 'self.seq_len', 'self.vocab_size'], {}), '(self.batch_size, self.seq_len, self.vocab_size)\n', (2690, 2738), True, 'import numpy as np\n'), ((2753, 2825), 'numpy.random.randint', 'np.random.randint', (['self.vocab_size'], {'size': '(self.batch_size, self.seq_len)'}), '(self.vocab_size, size=(self.batch_size, self.seq_len))\n', (2770, 2825), True, 'import numpy as np\n'), ((2855, 2895), 'jax.nn.one_hot', 'jax.nn.one_hot', (['targets', 'self.vocab_size'], {}), '(targets, self.vocab_size)\n', (2869, 2895), False, 'import jax\n'), ((2910, 2968), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(self.batch_size, self.seq_len)'}), '(2, size=(self.batch_size, self.seq_len))\n', (2927, 2968), True, 'import numpy as np\n'), ((3059, 3153), 'language.mentionmemory.utils.metric_utils.compute_weighted_cross_entropy', 'metric_utils.compute_weighted_cross_entropy', (['probs', 'targets', 'weights'], {'inputs_are_prob': '(True)'}), '(probs, targets, weights,\n inputs_are_prob=True)\n', (3102, 3153), False, 'from language.mentionmemory.utils import metric_utils\n'), ((3511, 3574), 'numpy.ones', 'np.ones', ([], {'shape': '(self.batch_size, self.seq_len, self.vocab_size)'}), '(shape=(self.batch_size, self.seq_len, self.vocab_size))\n', (3518, 3574), True, 'import numpy as np\n'), ((3636, 3730), 'language.mentionmemory.utils.metric_utils.compute_weighted_cross_entropy', 'metric_utils.compute_weighted_cross_entropy', (['probs', 'targets', 'weights'], {'inputs_are_prob': '(True)'}), '(probs, targets, weights,\n inputs_are_prob=True)\n', (3679, 3730), False, 'from language.mentionmemory.utils import metric_utils\n'), ((3787, 3810), 'numpy.log', 'np.log', (['self.vocab_size'], {}), '(self.vocab_size)\n', (3793, 3810), True, 'import numpy as np\n'), ((3889, 3953), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.batch_size, self.seq_len, self.vocab_size)'}), '(shape=(self.batch_size, self.seq_len, self.vocab_size))\n', (3897, 3953), True, 'import numpy as np\n'), ((4013, 4107), 'language.mentionmemory.utils.metric_utils.compute_weighted_cross_entropy', 'metric_utils.compute_weighted_cross_entropy', (['probs', 'targets', 'weights'], {'inputs_are_prob': '(True)'}), '(probs, targets, weights,\n inputs_are_prob=True)\n', (4056, 4107), False, 'from language.mentionmemory.utils import metric_utils\n'), ((4252, 4314), 'numpy.random.rand', 'np.random.rand', (['self.batch_size', 'self.seq_len', 'self.vocab_size'], {}), '(self.batch_size, self.seq_len, self.vocab_size)\n', (4266, 4314), True, 'import numpy as np\n'), ((4329, 4401), 'numpy.random.randint', 'np.random.randint', (['self.vocab_size'], {'size': '(self.batch_size, self.seq_len)'}), '(self.vocab_size, size=(self.batch_size, self.seq_len))\n', (4346, 4401), True, 'import numpy as np\n'), ((4431, 4471), 'jax.nn.one_hot', 'jax.nn.one_hot', (['targets', 'self.vocab_size'], {}), '(targets, self.vocab_size)\n', (4445, 4471), False, 'import jax\n'), ((4486, 4544), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(self.batch_size, self.seq_len)'}), '(2, size=(self.batch_size, self.seq_len))\n', (4503, 4544), True, 'import numpy as np\n'), ((4608, 4683), 'numpy.ones', 'np.ones', (['(self.batch_size, self.seq_len, self.vocab_size)'], {'dtype': 'np.float32'}), '((self.batch_size, self.seq_len, self.vocab_size), dtype=np.float32)\n', (4615, 4683), True, 'import numpy as np\n'), ((4719, 4780), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(self.batch_size, self.seq_len, 1)'}), '(2, size=(self.batch_size, self.seq_len, 1))\n', (4736, 4780), True, 'import numpy as np\n'), ((4881, 4945), 'language.mentionmemory.utils.metric_utils.compute_weighted_accuracy', 'metric_utils.compute_weighted_accuracy', (['logits', 'targets', 'weights'], {}), '(logits, targets, weights)\n', (4919, 4945), False, 'from language.mentionmemory.utils import metric_utils\n'), ((6407, 6427), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6421, 6427), True, 'import numpy as np\n'), ((6671, 6734), 'numpy.random.randint', 'np.random.randint', (['(max_num_positives + 1)'], {'size': 'local_n_mentions'}), '(max_num_positives + 1, size=local_n_mentions)\n', (6688, 6734), True, 'import numpy as np\n'), ((6834, 6897), 'numpy.random.randint', 'np.random.randint', (['(max_num_negatives + 1)'], {'size': 'local_n_mentions'}), '(max_num_negatives + 1, size=local_n_mentions)\n', (6851, 6897), True, 'import numpy as np\n'), ((6929, 6989), 'numpy.minimum', 'np.minimum', (['num_negatives', '(global_n_mentions - num_positives)'], {}), '(num_negatives, global_n_mentions - num_positives)\n', (6939, 6989), True, 'import numpy as np\n'), ((7007, 7038), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.bool_'}), '(shape, dtype=np.bool_)\n', (7015, 7038), True, 'import numpy as np\n'), ((7055, 7086), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.bool_'}), '(shape, dtype=np.bool_)\n', (7063, 7086), True, 'import numpy as np\n'), ((7468, 7520), 'numpy.logical_and', 'np.logical_and', (['(num_positives > 0)', '(num_negatives > 0)'], {}), '(num_positives > 0, num_negatives > 0)\n', (7482, 7520), True, 'import numpy as np\n'), ((7650, 7759), 'language.mentionmemory.utils.metric_utils.compute_cross_entropy_loss_with_positives_and_negatives_masks', 'metric_utils.compute_cross_entropy_loss_with_positives_and_negatives_masks', (['scores', 'positives', 'negatives'], {}), '(\n scores, positives, negatives)\n', (7724, 7759), False, 'from language.mentionmemory.utils import metric_utils\n'), ((10046, 10118), 'numpy.ones', 'np.ones', (['(self.batch_size, self.seq_len, self.num_items)'], {'dtype': 'np.int32'}), '((self.batch_size, self.seq_len, self.num_items), dtype=np.int32)\n', (10053, 10118), True, 'import numpy as np\n'), ((10155, 10211), 'numpy.ones', 'np.ones', (['(self.batch_size, self.seq_len)'], {'dtype': 'np.int32'}), '((self.batch_size, self.seq_len), dtype=np.int32)\n', (10162, 10211), True, 'import numpy as np\n'), ((10226, 10284), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(self.batch_size, self.seq_len)'}), '(2, size=(self.batch_size, self.seq_len))\n', (10243, 10284), True, 'import numpy as np\n'), ((10367, 10466), 'language.mentionmemory.utils.metric_utils.compute_loss_and_prob_from_probs_with_duplicates', 'metric_utils.compute_loss_and_prob_from_probs_with_duplicates', (['probs', 'classes', 'targets', 'weights'], {}), '(probs,\n classes, targets, weights)\n', (10428, 10466), False, 'from language.mentionmemory.utils import metric_utils\n'), ((10736, 10793), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.seq_len)'], {'dtype': 'np.int32'}), '((self.batch_size, self.seq_len), dtype=np.int32)\n', (10744, 10793), True, 'import numpy as np\n'), ((10828, 10927), 'language.mentionmemory.utils.metric_utils.compute_loss_and_prob_from_probs_with_duplicates', 'metric_utils.compute_loss_and_prob_from_probs_with_duplicates', (['probs', 'classes', 'targets', 'weights'], {}), '(probs,\n classes, targets, weights)\n', (10889, 10927), False, 'from language.mentionmemory.utils import metric_utils\n'), ((11063, 11156), 'numpy.random.randint', 'np.random.randint', (['self.num_classes'], {'size': '(self.batch_size, self.seq_len, self.num_items)'}), '(self.num_classes, size=(self.batch_size, self.seq_len,\n self.num_items))\n', (11080, 11156), True, 'import numpy as np\n'), ((11176, 11249), 'numpy.random.randint', 'np.random.randint', (['self.num_classes'], {'size': '(self.batch_size, self.seq_len)'}), '(self.num_classes, size=(self.batch_size, self.seq_len))\n', (11193, 11249), True, 'import numpy as np\n'), ((11294, 11393), 'language.mentionmemory.utils.metric_utils.compute_loss_and_prob_from_probs_with_duplicates', 'metric_utils.compute_loss_and_prob_from_probs_with_duplicates', (['probs', 'classes', 'targets', 'weights'], {}), '(probs,\n classes, targets, weights)\n', (11355, 11393), False, 'from language.mentionmemory.utils import metric_utils\n'), ((12091, 12132), 'language.mentionmemory.utils.metric_utils.process_metrics', 'metric_utils.process_metrics', (['metric_dict'], {}), '(metric_dict)\n', (12119, 12132), False, 'from language.mentionmemory.utils import metric_utils\n'), ((12544, 12600), 'language.mentionmemory.utils.metric_utils.process_metrics', 'metric_utils.process_metrics', (['metric_dict'], {'prefix': '"""pref"""'}), "(metric_dict, prefix='pref')\n", (12572, 12600), False, 'from language.mentionmemory.utils import metric_utils\n'), ((13306, 13352), 'language.mentionmemory.utils.metric_utils.update_metrics_dtype', 'metric_utils.update_metrics_dtype', (['metric_dict'], {}), '(metric_dict)\n', (13339, 13352), False, 'from language.mentionmemory.utils import metric_utils\n'), ((6618, 6641), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (6634, 6641), True, 'import numpy as np\n'), ((7142, 7242), 'numpy.random.choice', 'np.random.choice', (['global_n_mentions', '(num_positives[index] + num_negatives[index])'], {'replace': '(False)'}), '(global_n_mentions, num_positives[index] + num_negatives[\n index], replace=False)\n', (7158, 7242), True, 'import numpy as np\n'), ((9452, 9496), 'numpy.all', 'np.all', (['(weights == actual_weights_per_sample)'], {}), '(weights == actual_weights_per_sample)\n', (9458, 9496), True, 'import numpy as np\n'), ((9919, 9993), 'numpy.ones', 'np.ones', (['(self.batch_size, self.seq_len, self.num_items)'], {'dtype': 'np.float32'}), '((self.batch_size, self.seq_len, self.num_items), dtype=np.float32)\n', (9926, 9993), True, 'import numpy as np\n'), ((1646, 1681), 'jax.nn.log_softmax', 'jax.nn.log_softmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (1664, 1681), False, 'import jax\n'), ((3211, 3224), 'numpy.log', 'np.log', (['probs'], {}), '(probs)\n', (3217, 3224), True, 'import numpy as np\n'), ((8682, 8738), 'numpy.array', 'np.array', (['([positive_scores[pos_index]] + negative_scores)'], {}), '([positive_scores[pos_index]] + negative_scores)\n', (8690, 8738), True, 'import numpy as np\n'), ((8798, 8832), 'jax.nn.log_softmax', 'jax.nn.log_softmax', (['current_scores'], {}), '(current_scores)\n', (8816, 8832), False, 'import jax\n'), ((11462, 11494), 'numpy.expand_dims', 'np.expand_dims', (['targets'], {'axis': '(-1)'}), '(targets, axis=-1)\n', (11476, 11494), True, 'import numpy as np\n'), ((13052, 13085), 'jax.numpy.asarray', 'jnp.asarray', (['[1]'], {'dtype': 'jnp.int32'}), '([1], dtype=jnp.int32)\n', (13063, 13085), True, 'import jax.numpy as jnp\n'), ((13114, 13147), 'jax.numpy.asarray', 'jnp.asarray', (['[1]'], {'dtype': 'jnp.int16'}), '([1], dtype=jnp.int16)\n', (13125, 13147), True, 'import jax.numpy as jnp\n'), ((13228, 13264), 'jax.numpy.asarray', 'jnp.asarray', (['[1]'], {'dtype': 'jnp.bfloat16'}), '([1], dtype=jnp.bfloat16)\n', (13239, 13264), True, 'import jax.numpy as jnp\n'), ((1735, 1767), 'numpy.expand_dims', 'np.expand_dims', (['weights'], {'axis': '(-1)'}), '(weights, axis=-1)\n', (1749, 1767), True, 'import numpy as np\n'), ((3278, 3310), 'numpy.expand_dims', 'np.expand_dims', (['weights'], {'axis': '(-1)'}), '(weights, axis=-1)\n', (3292, 3310), True, 'import numpy as np\n'), ((7406, 7442), 'numpy.logical_and', 'np.logical_and', (['positives', 'negatives'], {}), '(positives, negatives)\n', (7420, 7442), True, 'import numpy as np\n'), ((5003, 5022), 'numpy.squeeze', 'np.squeeze', (['correct'], {}), '(correct)\n', (5013, 5022), True, 'import numpy as np\n'), ((11554, 11586), 'numpy.expand_dims', 'np.expand_dims', (['weights'], {'axis': '(-1)'}), '(weights, axis=-1)\n', (11568, 11586), True, 'import numpy as np\n')] |
"""
Multi object tracking results and ground truth
- conversion,
- evaluation,
- visualization.
For more help run this file as a script with --help parameter.
PyCharm debugger could have problems debugging inside this module due to a bug:
https://stackoverflow.com/questions/47988936/debug-properly-with-pycharm-module-named-io-py
workaround: rename the file temporarily
TODO: merge with utils.gt.gt
"""
import warnings
import numpy as np
import pandas as pd
import tqdm
from .bbox_mot import BboxMot
from .mot import Mot
from .posemot import PoseMot
metrics_higher_is_better = ["idf1", "idp", "idr", "recall", "precision", "mota"]
metrics_lower_is_better = [
"num_false_positives",
"num_misses",
"num_switches",
"num_fragmentations",
"motp",
"motp_px",
]
def load_any_mot(filename_or_buffer):
df = pd.read_csv(filename_or_buffer, nrows=2)
try:
filename_or_buffer.seek(0)
except AttributeError:
pass
try:
for s in df.columns:
float(s)
bbox_mot = True
except ValueError:
bbox_mot = False
if bbox_mot:
mot = BboxMot(filename_or_buffer=filename_or_buffer)
elif "keypoint" in df.columns:
mot = PoseMot(filename_or_buffer=filename_or_buffer)
else:
mot = Mot(filename_or_buffer=filename_or_buffer)
return mot
def load_idtracker(filename_or_buffer):
"""
Load idTracker results.
Example trajectories.txt:
X1 Y1 ProbId1 X2 Y2 ProbId2 X3 Y3 ProbId3 X4 Y4 ProbId4 X5 Y5 ProbId5
459.85 657.37 NaN 393.9 578.17 NaN 603.95 244.9 NaN 1567.3 142.51 NaN 371.6 120.74 NaN
456.43 664.32 NaN 391.7 583.05 NaN 606.34 242.57 NaN 1565.3 138.53 NaN 360.93 121.86 NaN
453.22 670.03 NaN 389.63 587.08 NaN 608.41 240.66 NaN 1566.8 132.25 NaN 355.92 122.81 NaN
...
:param filename_or_buffer: idTracker results (trajectories.txt or trajectories_nogaps.txt)
:return: DataFrame with frame id x y width height confidence columns
"""
df = pd.read_csv(filename_or_buffer, delim_whitespace=True)
df.index += 1
n_animals = len(df.columns) // 3
for i in range(1, n_animals + 1):
df[i] = i
df["frame"] = df.index
objs = []
for i in range(1, n_animals + 1):
objs.append(
df[["frame", i, "X" + str(i), "Y" + str(i)]].rename(
{"X" + str(i): "x", "Y" + str(i): "y", i: "id"}, axis=1
)
)
df_out = pd.concat(objs)
df_out.sort_values(["frame", "id"], inplace=True)
df[df.isna()] = -1
df_out["width"] = -1
df_out["height"] = -1
df_out["confidence"] = -1
return df_out
def load_idtrackerai(filename_or_buffer):
"""
Load idtracker.ai results
:param filename_or_buffer: idTracker results (trajectories.txt or trajectories_nogaps.txt)
:return: DataFrame with frame id x y width height confidence columns
"""
traj_ndarray = np.load(filename_or_buffer, allow_pickle=True)
traj_dict = traj_ndarray.item()
n_frames, n_ids, _ = traj_dict["trajectories"].shape
frames = np.repeat(np.arange(1, n_frames + 1), n_ids).reshape(n_frames, n_ids, 1)
obj_ids = np.tile(np.arange(1, n_ids + 1), n_frames).reshape(n_frames, n_ids, 1)
df = pd.DataFrame(
np.concatenate((frames, obj_ids, traj_dict["trajectories"]), axis=2).reshape(
(n_frames * n_ids, 4)
),
columns=["frame", "id", "x", "y"],
)
df = df.astype({"frame": "int", "id": "int"})
df[df.isna()] = -1
df["width"] = -1
df["height"] = -1
df["confidence"] = -1
return df
def load_toxtrac(filename_or_buffer, topleft_xy=(0, 0)):
"""
Load ToxTrack results.
Example Tracking_0.txt:
0 0 1 194.513 576.447 1
1 0 1 192.738 580.313 1
2 0 1 190.818 584.126 1
3 0 1 188.84 588.213 1
4 0 1 186.78 592.463 1
Documentation of the file format is in
[ToxTrac: a fast and robust software for tracking organisms](https://arxiv.org/pdf/1706.02577.pdf) page 33.
:param filename_or_buffer: Toxtrac results (Tracking_0.txt)
:param topleft_xy: tuple, length 2; xy coordinates of the arena top left corner
:return: DataFrame with frame id x y width height confidence columns
"""
df = pd.read_csv(
filename_or_buffer,
delim_whitespace=True,
names=["frame", "arena", "id", "x", "y", "label"],
usecols=["frame", "id", "x", "y"],
)
df["frame"] += 1 # MATLAB indexing
df["x"] += topleft_xy[0]
df["y"] += topleft_xy[1]
df = df.assign(width=-1)
df = df.assign(height=-1)
df = df.assign(confidence=-1)
df.sort_values(["frame", "id"], inplace=True)
df[df.isna()] = -1
return df
def load_sleap_analysis_as_posemot(filename_or_buffer, num_objects=None):
"""
:param filename_or_buffer:
:param num_objects:
:return: PoseMot() nans where object is not present
"""
import h5py
f = h5py.File(filename_or_buffer, "r")
# occupancy_matrix = f['track_occupancy'][:]
try:
tracks_matrix = f["tracks"][:] # noqa: F841
except KeyError:
print(
f'File {filename_or_buffer} doesn\'t appear to be SLEAP "analysis" file.\n'
f"Export analysis from sleap-label using File -> Export Analysis HDF5.\n"
)
raise
if num_objects is None:
num_objects = f["tracks"].shape[0]
mot = PoseMot()
mot.init_blank(
range(f["tracks"].shape[3]), range(num_objects), f["tracks"].shape[2]
)
mot.ds["x"].values = np.moveaxis(f["tracks"][:num_objects, 0, :, :], 2, 0)
mot.ds["y"].values = np.moveaxis(f["tracks"][:num_objects, 1, :, :], 2, 0)
mot.marker_radius = 8
return mot
def load_sleap_as_dataframe(filename):
try:
import sleap
except ImportError as exception:
exception.msg = """
io.load_sleap_to_dataframe() requires the sleap module installed. Either install the module or export analysis file from
sleap-label application and use load_posemot_sleap_analysis() without additional dependencies.
"""
raise exception
labels = sleap.load_file(filename)
points = []
for frame in tqdm.tqdm(labels):
for instance in frame:
for node_name, point in zip(labels.skeleton.node_names, instance):
try:
score = point.score
except AttributeError:
score = -1
if isinstance(instance, sleap.instance.PredictedInstance):
instance_class = 'predicted'
elif isinstance(instance, sleap.instance.Instance):
instance_class = 'manual'
else:
assert False, 'unknown instance type: {}'.format(type(instance))
points.append((point.x, point.y, score, point.visible, node_name, instance.frame_idx,
instance.track.name, instance_class, instance.video.backend.filename))
df = pd.DataFrame(points, columns=['x', 'y', 'score', 'visible', 'bodypart', 'frame',
'track', 'source', 'video'])
df['keypoint'] = df.bodypart.apply(labels.skeleton.node_names.index)
return df
def load_sleap_as_posemot(filename):
df = load_sleap_as_dataframe(filename)
df['id'] = df.track.str.split('_', expand=True)[1].astype(int) # default SLEAP track naming "track_<num>"
df = df.rename(columns={'score': 'confidence'})
df = df.set_index(["frame", "id", "keypoint"])
# remove duplicated instance with preference to manually annotated
df_predicted = df.query('source == "predicted"')
df_manual = df.query('source == "manual"')
df_unique = df_predicted.copy()
df_unique.loc[df_manual.index] = df_manual
assert df_unique.index.is_unique
return PoseMot.from_df(df_unique.reset_index())
def save_mot(filename, df):
df.to_csv(filename, index=False) # header=False,
def load_mot(filepath_or_buffer):
"""
Load Multiple Object Tacking Challenge trajectories file.
:param filepath_or_buffer: mot filename_or_buffer or buffer
:return: DataFrame, columns frame and id start with 1 (MATLAB indexing)
"""
df = pd.read_csv(
filepath_or_buffer, index_col=["frame", "id"]
) # names=[u'frame', u'id', u'x', u'y', u'width', u'height', u'confidence']
return df[(df.x != -1) & (df.y != -1)]
def mot_in_roi(df, roi):
"""
Limit MOT to a region of interest.
:param df: MOT trajectories, DataFrame
:param roi: utils.roi.ROI
:return: MOT trajectories, DataFrame
"""
idx_in_roi = (
(df.x >= roi.x())
& (df.y >= roi.y())
& (df.x < roi.x() + roi.width())
& (df.y < roi.y() + roi.height())
)
return df[idx_in_roi]
def eval_mot(df_gt, df_results, sqdistth=10000):
"""
Evaluate trajectories by comparing them to a ground truth.
:param df_gt: ground truth DataFrame, columns <frame>, <id>, <x>, <y>; <frame> and <id> are 1-based; see load_mot
:param df_results: result trajectories DataFrame, format same as df_gt
:param sqdistth: square of the distance threshold, only detections and ground truth objects closer than
the threshold can be matched
:return: (summary DataFrame, MOTAccumulator)
"""
nan_mask = (
(df_results.x == -1)
| (df_results.x == -1)
| df_results.x.isna()
| df_results.y.isna()
)
if len(df_results[nan_mask]) > 0:
warnings.warn("stripping nans from the evaluated trajectories")
df_results = df_results[~nan_mask]
import motmetrics as mm
from motmetrics.utils import compare_to_groundtruth
acc = compare_to_groundtruth(
df_gt, df_results, dist="euc", distfields=["x", "y"], distth=sqdistth
)
mh = mm.metrics.create()
# remove id_global_assignment metric, workaround for https://github.com/cheind/py-motmetrics/issues/19
metrics = mh.names[:]
metrics.remove("id_global_assignment")
return mh.compute(acc, metrics), acc # metrics=mm.metrics.motchallenge_metrics
def eval_and_save(ground_truth, mot_results, out_csv=None, results_keypoint=None):
"""
Evaluate results and save metrics.
:param ground_truth: ground truth filename_or_buffer (MOT format), buffer or Mot object
:param mot_results: results filename_or_buffer (MOT format), buffer or Mot
:param out_csv: output file with a summary (filename_or_buffer or buffer)
:param results_keypoint: keypoint used for evaluation of keypoint/pose data against centroid ground truth
"""
try:
df_gt = ground_truth.to_dataframe()
except AttributeError:
df_gt = load_mot(ground_truth)
try:
df_results = mot_results.to_dataframe()
except AttributeError:
df_results = load_any_mot(mot_results).to_dataframe()
if results_keypoint is not None:
df_results = df_results[df_results.keypoint == results_keypoint]
df_gt = df_gt.rename(columns={"frame": "FrameId", "id": "Id"}).set_index(["FrameId", "Id"])
df_results = df_results.rename(columns={"frame": "FrameId", "id": "Id"}).set_index(["FrameId", "Id"])
print("Evaluating...")
summary, acc = eval_mot(df_gt, df_results)
summary["motp_px"] = np.sqrt(
summary["motp"]
) # convert from square pixels to pixels
import motmetrics as mm
# mh = mm.metrics.create()
print(mm.io.render_summary(summary))
if out_csv is not None:
summary.to_csv(out_csv, index=False)
def array_to_mot_dataframe(results):
"""
Create MOT challenge format DataFrame out of 3 dimensional array of trajectories.
:param results: ndarray, shape=(n_frames, n_animals, 2 or 4); coordinates are in yx order, nan when id not present
:return: DataFrame with frame, id, x, y, width, height and confidence columns
"""
assert results.ndim == 3
assert results.shape[2] == 2 or results.shape[2] == 4
objs = []
columns = ["x", "y"]
indices = [1, 0]
if results.shape[2] == 4:
columns.extend(["width", "height"])
indices.extend([3, 2])
for i in range(results.shape[1]):
df = pd.DataFrame(results[:, i, indices], columns=columns)
df["frame"] = list(range(1, results.shape[0] + 1))
df = df[~(df.x.isna() | df.y.isna())]
df["id"] = i + 1
df = df[["frame", "id"] + columns]
objs.append(df)
df = pd.concat(objs)
df.sort_values(["frame", "id"], inplace=True)
if results.shape[2] == 2:
df["width"] = -1
df["height"] = -1
df["confidence"] = -1
return df
| [
"motmetrics.io.render_summary",
"numpy.sqrt",
"pandas.read_csv",
"pandas.DataFrame",
"motmetrics.utils.compare_to_groundtruth",
"tqdm.tqdm",
"h5py.File",
"motmetrics.metrics.create",
"numpy.concatenate",
"numpy.moveaxis",
"warnings.warn",
"numpy.load",
"pandas.concat",
"numpy.arange",
"s... | [((836, 876), 'pandas.read_csv', 'pd.read_csv', (['filename_or_buffer'], {'nrows': '(2)'}), '(filename_or_buffer, nrows=2)\n', (847, 876), True, 'import pandas as pd\n'), ((2010, 2064), 'pandas.read_csv', 'pd.read_csv', (['filename_or_buffer'], {'delim_whitespace': '(True)'}), '(filename_or_buffer, delim_whitespace=True)\n', (2021, 2064), True, 'import pandas as pd\n'), ((2451, 2466), 'pandas.concat', 'pd.concat', (['objs'], {}), '(objs)\n', (2460, 2466), True, 'import pandas as pd\n'), ((2927, 2973), 'numpy.load', 'np.load', (['filename_or_buffer'], {'allow_pickle': '(True)'}), '(filename_or_buffer, allow_pickle=True)\n', (2934, 2973), True, 'import numpy as np\n'), ((4260, 4404), 'pandas.read_csv', 'pd.read_csv', (['filename_or_buffer'], {'delim_whitespace': '(True)', 'names': "['frame', 'arena', 'id', 'x', 'y', 'label']", 'usecols': "['frame', 'id', 'x', 'y']"}), "(filename_or_buffer, delim_whitespace=True, names=['frame',\n 'arena', 'id', 'x', 'y', 'label'], usecols=['frame', 'id', 'x', 'y'])\n", (4271, 4404), True, 'import pandas as pd\n'), ((4947, 4981), 'h5py.File', 'h5py.File', (['filename_or_buffer', '"""r"""'], {}), "(filename_or_buffer, 'r')\n", (4956, 4981), False, 'import h5py\n'), ((5549, 5602), 'numpy.moveaxis', 'np.moveaxis', (["f['tracks'][:num_objects, 0, :, :]", '(2)', '(0)'], {}), "(f['tracks'][:num_objects, 0, :, :], 2, 0)\n", (5560, 5602), True, 'import numpy as np\n'), ((5628, 5681), 'numpy.moveaxis', 'np.moveaxis', (["f['tracks'][:num_objects, 1, :, :]", '(2)', '(0)'], {}), "(f['tracks'][:num_objects, 1, :, :], 2, 0)\n", (5639, 5681), True, 'import numpy as np\n'), ((6125, 6150), 'sleap.load_file', 'sleap.load_file', (['filename'], {}), '(filename)\n', (6140, 6150), False, 'import sleap\n'), ((6185, 6202), 'tqdm.tqdm', 'tqdm.tqdm', (['labels'], {}), '(labels)\n', (6194, 6202), False, 'import tqdm\n'), ((7004, 7117), 'pandas.DataFrame', 'pd.DataFrame', (['points'], {'columns': "['x', 'y', 'score', 'visible', 'bodypart', 'frame', 'track', 'source', 'video']"}), "(points, columns=['x', 'y', 'score', 'visible', 'bodypart',\n 'frame', 'track', 'source', 'video'])\n", (7016, 7117), True, 'import pandas as pd\n'), ((8228, 8286), 'pandas.read_csv', 'pd.read_csv', (['filepath_or_buffer'], {'index_col': "['frame', 'id']"}), "(filepath_or_buffer, index_col=['frame', 'id'])\n", (8239, 8286), True, 'import pandas as pd\n'), ((9726, 9823), 'motmetrics.utils.compare_to_groundtruth', 'compare_to_groundtruth', (['df_gt', 'df_results'], {'dist': '"""euc"""', 'distfields': "['x', 'y']", 'distth': 'sqdistth'}), "(df_gt, df_results, dist='euc', distfields=['x', 'y'],\n distth=sqdistth)\n", (9748, 9823), False, 'from motmetrics.utils import compare_to_groundtruth\n'), ((9843, 9862), 'motmetrics.metrics.create', 'mm.metrics.create', ([], {}), '()\n', (9860, 9862), True, 'import motmetrics as mm\n'), ((11299, 11323), 'numpy.sqrt', 'np.sqrt', (["summary['motp']"], {}), "(summary['motp'])\n", (11306, 11323), True, 'import numpy as np\n'), ((12459, 12474), 'pandas.concat', 'pd.concat', (['objs'], {}), '(objs)\n', (12468, 12474), True, 'import pandas as pd\n'), ((9524, 9587), 'warnings.warn', 'warnings.warn', (['"""stripping nans from the evaluated trajectories"""'], {}), "('stripping nans from the evaluated trajectories')\n", (9537, 9587), False, 'import warnings\n'), ((11448, 11477), 'motmetrics.io.render_summary', 'mm.io.render_summary', (['summary'], {}), '(summary)\n', (11468, 11477), True, 'import motmetrics as mm\n'), ((12198, 12251), 'pandas.DataFrame', 'pd.DataFrame', (['results[:, i, indices]'], {'columns': 'columns'}), '(results[:, i, indices], columns=columns)\n', (12210, 12251), True, 'import pandas as pd\n'), ((3091, 3117), 'numpy.arange', 'np.arange', (['(1)', '(n_frames + 1)'], {}), '(1, n_frames + 1)\n', (3100, 3117), True, 'import numpy as np\n'), ((3176, 3199), 'numpy.arange', 'np.arange', (['(1)', '(n_ids + 1)'], {}), '(1, n_ids + 1)\n', (3185, 3199), True, 'import numpy as np\n'), ((3270, 3338), 'numpy.concatenate', 'np.concatenate', (["(frames, obj_ids, traj_dict['trajectories'])"], {'axis': '(2)'}), "((frames, obj_ids, traj_dict['trajectories']), axis=2)\n", (3284, 3338), True, 'import numpy as np\n')] |
import numpy as np
class Poblacion:
"""Posibles soluciones del óptimo de una función.
Esta clase crea posibles soluciones para una función a optimizar. Toma la dimensión
del espacio, los límites de búsqueda y el número de elementos a tomar en cuenta.
Attributes:
dimension: Un entero que determina la dimensión de la función y el espacio.
lim: Una lista [float, float] que contiene los límites de cada dimensión para restringir la búsqueda,
el primer float es el mínimo, el segundo el máximo.
elementos: Un entero que determina el número de posibles soluciones.
valores: Un arreglo numpy de flotantes de tamanño (elementos, dimension) que contiene
muestras de una distribución uniforme determinado por lim.
"""
def __init__(self, dim, limites, total_individuos):
"""Se inicializa la clase con la dimensión, límites y el total de muestras
a agregar. Siempre se debe especificar la dimensión, los límites y el número
de elementos en la población.
Los valores se inicializan a None porque se asignan posteriormente.
"""
self.dimension = dim
self.lim = limites
self.elementos = total_individuos
self.valores = None
def inicializar(self):
"""Se crear un arreglo de numpy de flotantes de tamaño (elementos, dimension) con valores
muestreados de una distribución uniforme, después se asignan a valores. Esta es la población
asignada.
"""
self.valores = np.random.uniform(
*self.lim, size=(self.elementos, self.dimension)
)
@property
def puntos(self):
"""Propiedad que devuelve el valor de valores."""
return self.valores
class UMDA:
"""Implementación del algoritmo de optimización UMDA (Univariate Marginal Distribution Algorithm)
que encuentra el mínimo de una función objetivo mediante el muestreo de una distribución normal.
Attributes:
objetivo: Una función de Python que corresponde a la función objetivo.
dimension: Valor entero que determina la dimensión de la función.
lim: Una lista [float, float] que contiene los límites de cada dimensión para restringir la búsqueda,
el primer float es el mínimo, el segundo el máximo.
elementos: Un entero que determina el número de posibles soluciones.
mejores: Un entero constante que corresponde a la tercera parte de elementos.
pasos: Un entero que contiene la información del número de iteraciones, el default es 100.
poblacion_valores: Variable donde se guardará la información de las posibles soluciones.
evaluaciones: Variables que guarda la información de evaluar la función en todos los elementos
que pertenecen a a poblacion_valores.
args: Tupla de argumentos adicionales que pueda tener la función para ser evaluada.
"""
def __init__(self, func, dim, limites, poblacion, iteraciones=100, args=()):
"""Se inicializan todos los atributos excepto poblacion_valores y evaluaciones
que cambian de acuerdo a la ejecución del código.
"""
self.objetivo = func
self.dimension = dim
self.lim = limites
self.elementos = poblacion
self.mejores = self.elementos // 3
self.pasos = iteraciones
self.poblacion_valores = None
self.evaluaciones = None
self.f_args = args
def actualizar(self):
"""Crea un arreglo vacío donde se guardan los valores de poblacion_valores y sus evaluaciones
con objetivo. El arreglo temp_arreglo tiene dimensión (elementos, dimension + 1) para guardar
toda la población y además las evaluaciones en la última columna.
"""
temp_arreglo = np.zeros((self.elementos, self.dimension + 1))
temp_arreglo[:, :-1] = self.poblacion_valores
temp_arreglo[:, -1] = np.array(
[self.objetivo(i, *self.f_args) for i in self.poblacion_valores]
)
# copiar el arreglo creado para evitar aliasing
self.evaluaciones = np.copy(temp_arreglo)
def optimizar(self):
"""Implementación del algoritmo UMDA. Se toman los valores de poblacion_valores, se seleccionan
los elementos hasta el valor de mejores, después se realiza una evaluación de objetivo y se ordenan
de forma descendiente, de mejor candidato a peor. Una vez ordenados se crea una nueva población
encontrando la media y desviación estándar por cada dimensión y se muestrea de una distribución
normal de tal forma que en cada dimensión se crea una distribución normal hasta convergencia.
"""
poblacion = Poblacion(self.dimension, self.lim, self.elementos)
poblacion.inicializar()
self.poblacion_valores = poblacion.puntos
# crear un arreglo para los q mejores
q_mejores = np.zeros((self.mejores, self.dimension + 1))
for _ in range(self.pasos):
# siempre actualizar los valores
self.actualizar()
# ordenar los puntos dado el valor del objetivo, de mejor a peor
self.evaluaciones = self.evaluaciones[self.evaluaciones[:, -1].argsort()]
self.evaluaciones = np.clip(self.evaluaciones[:, :-1], *self.lim)
# escoger los q mejores
q_mejores = self.evaluaciones[: self.mejores, :]
# se toma el arreglo transpuesto para iterar sobre dimensión y no elementos
for i in q_mejores[:, :-1].T:
self.poblacion_valores = np.random.normal(
i.mean(), i.std(), size=self.poblacion_valores.shape
)
@property
def resultado(self):
"""Propiedad que devuelve el primer valor de evaluaciones, que corresponde al valor
mínimo, el mejor resultado.
"""
return self.evaluaciones[0, :]
| [
"numpy.clip",
"numpy.copy",
"numpy.zeros",
"numpy.random.uniform"
] | [((1557, 1624), 'numpy.random.uniform', 'np.random.uniform', (['*self.lim'], {'size': '(self.elementos, self.dimension)'}), '(*self.lim, size=(self.elementos, self.dimension))\n', (1574, 1624), True, 'import numpy as np\n'), ((3821, 3867), 'numpy.zeros', 'np.zeros', (['(self.elementos, self.dimension + 1)'], {}), '((self.elementos, self.dimension + 1))\n', (3829, 3867), True, 'import numpy as np\n'), ((4133, 4154), 'numpy.copy', 'np.copy', (['temp_arreglo'], {}), '(temp_arreglo)\n', (4140, 4154), True, 'import numpy as np\n'), ((4935, 4979), 'numpy.zeros', 'np.zeros', (['(self.mejores, self.dimension + 1)'], {}), '((self.mejores, self.dimension + 1))\n', (4943, 4979), True, 'import numpy as np\n'), ((5287, 5332), 'numpy.clip', 'np.clip', (['self.evaluaciones[:, :-1]', '*self.lim'], {}), '(self.evaluaciones[:, :-1], *self.lim)\n', (5294, 5332), True, 'import numpy as np\n')] |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import torch.nn.functional as F
import wavenet
import math
import numpy as np
import time
from utils import print_etr
class Conv(torch.nn.Module):
"""
A convolution with the option to be causal and use xavier initialization
"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
dilation=1, bias=True, w_init_gain='linear', is_causal=False):
super(Conv, self).__init__()
self.is_causal = is_causal
self.kernel_size = kernel_size
self.dilation = dilation
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
dilation=dilation, bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
if self.is_causal:
padding = (int((self.kernel_size - 1) * (self.dilation)), 0)
signal = torch.nn.functional.pad(signal, padding)
return self.conv(signal)
class WaveNet(torch.nn.Module):
def __init__(self,
n_in_channels,
n_layers,
max_dilation,
n_residual_channels,
n_skip_channels,
n_out_channels,):
# n_cond_channels,
# upsamp_window,
# upsamp_stride):
super(WaveNet, self).__init__()
# self.upsample = torch.nn.ConvTranspose1d(n_cond_channels,
# n_cond_channels,
# upsamp_window,
# upsamp_stride)
self.n_layers = n_layers
self.max_dilation = max_dilation
self.n_residual_channels = n_residual_channels
self.n_out_channels = n_out_channels
# self.cond_layers = Conv(n_cond_channels, 2*n_residual_channels*n_layers,
# w_init_gain='tanh')
self.dilate_layers = torch.nn.ModuleList()
self.res_layers = torch.nn.ModuleList()
self.skip_layers = torch.nn.ModuleList()
self.embed = torch.nn.Embedding(n_in_channels,
n_residual_channels)
self.conv_out = Conv(n_skip_channels, n_out_channels,
bias=False, w_init_gain='relu')
self.conv_end = Conv(n_out_channels, n_out_channels,
bias=False, w_init_gain='linear')
self.output_length = 16
loop_factor = math.floor(math.log2(max_dilation)) + 1
for i in range(n_layers):
dilation = 2 ** (i % loop_factor)
# Kernel size is 2 in nv-wavenet
in_layer = Conv(n_residual_channels, 2*n_residual_channels,
kernel_size=2, dilation=dilation,
w_init_gain='tanh', is_causal=True)
self.dilate_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_layer = Conv(n_residual_channels, n_residual_channels,
w_init_gain='linear')
self.res_layers.append(res_layer)
skip_layer = Conv(n_residual_channels, n_skip_channels,
w_init_gain='relu')
self.skip_layers.append(skip_layer)
def receptive_field(self):
receptive_field = 0
loop_factor = math.floor(math.log2(self.max_dilation)) + 1
for i in range(self.n_layers):
dilation = 2 ** (i % loop_factor)
receptive_field += dilation
return receptive_field
def forward(self, forward_input):
forward_input = forward_input[1]
# print("forward_input.shape", forward_input.shape)
# print(forward_input[0])
# cond_input = self.upsample(features)
# assert(cond_input.size(2) >= forward_input.size(1))
# if cond_input.size(2) > forward_input.size(1):
# cond_input = cond_input[:, :, :forward_input.size(1)]
forward_input = self.embed(forward_input.long())
# print(forward_input.shape)
# print(forward_input[0])
forward_input = forward_input.transpose(1, 2)
# cond_acts = self.cond_layers(cond_input)
# cond_acts = cond_acts.view(cond_acts.size(0), self.n_layers, -1, cond_acts.size(2))
for i in range(self.n_layers):
in_act = self.dilate_layers[i](forward_input)
in_act = in_act # + cond_acts[:,i,:,:]
t_act = torch.tanh(in_act[:, :self.n_residual_channels, :])
s_act = torch.sigmoid(in_act[:, self.n_residual_channels:, :])
acts = t_act * s_act
if i < len(self.res_layers):
res_acts = self.res_layers[i](acts)
forward_input = res_acts + forward_input
if i == 0:
output = self.skip_layers[i](acts)
else:
output = self.skip_layers[i](acts) + output
output = torch.nn.functional.relu(output, True)
output = self.conv_out(output)
output = torch.nn.functional.relu(output, True)
output = self.conv_end(output)
# Remove last probabilities because they've seen all the data
last = output[:, :, -1]
last = last.unsqueeze(2)
output = output[:, :, :-1]
# Replace probability for first value with 0's because we don't know
first = last * 0.0
output = torch.cat((first, output), dim=2)
# print("output.shape", output.shape)
return output
def generate(self,
num_samples,
first_samples=None,
temperature=1.,
receptive_field=None):
self.eval()
if receptive_field is None:
receptive_field = self.receptive_field()
if first_samples is None:
first_samples = torch.zeros([1], dtype=torch.long)
with torch.no_grad():
generated = torch.tensor(first_samples)
num_pad = receptive_field - generated.size(0)
if num_pad > 0:
generated = torch.cat((torch.zeros(num_pad), generated))
# print("generated", generated)
# print("generated.shape", generated.shape)
start = time.time()
for i in range(num_samples):
if i%10 == 0 and i > 0:
print(f"Generated {i} samples.")
print_etr(start, total_iterations = num_samples, current_iteration = i + 1)
# input = Variable(torch.FloatTensor(1, self.classes, self.receptive_field).zero_())
# input = input.scatter_(1, generated[-self.receptive_field:].view(1, -1, self.receptive_field), 1.)
input = generated[-receptive_field:].view(1, receptive_field)
# print("input.shape", input.shape)
with torch.no_grad():
x = self((None, input))[:, :, -1].squeeze()
if temperature > 0:
x /= temperature
prob = F.softmax(x, dim=0).cpu().numpy()
x = np.random.choice(self.n_out_channels, p=prob)
else:
x = torch.max(x, 0)[1]
generated = torch.cat((generated, torch.tensor([x], dtype=torch.long)), 0)
# generated = (generated.double() / self.classes) * 2. - 1.
# mu_gen = mu_law_expansion(generated.double(), self.classes)
# print("mu_gen", mu_gen)
# self.train()
return generated
def export_weights(self):
"""
Returns a dictionary with tensors ready for nv_wavenet wrapper
"""
model = {}
# We're not using a convolution to start to this does nothing
model["embedding_prev"] = torch.cuda.FloatTensor(self.n_out_channels,
self.n_residual_channels).fill_(0.0)
model["embedding_curr"] = self.embed.weight.data
model["conv_out_weight"] = self.conv_out.conv.weight.data
model["conv_end_weight"] = self.conv_end.conv.weight.data
dilate_weights = []
dilate_biases = []
for layer in self.dilate_layers:
dilate_weights.append(layer.conv.weight.data)
dilate_biases.append(layer.conv.bias.data)
model["dilate_weights"] = dilate_weights
model["dilate_biases"] = dilate_biases
model["max_dilation"] = self.max_dilation
res_weights = []
res_biases = []
for layer in self.res_layers:
res_weights.append(layer.conv.weight.data)
res_biases.append(layer.conv.bias.data)
model["res_weights"] = res_weights
model["res_biases"] = res_biases
skip_weights = []
skip_biases = []
for layer in self.skip_layers:
skip_weights.append(layer.conv.weight.data)
skip_biases.append(layer.conv.bias.data)
model["skip_weights"] = skip_weights
model["skip_biases"] = skip_biases
model["use_embed_tanh"] = False
return model
def get_cond_input(self, features):
"""
Takes in features and gets the 2*R x batch x # layers x samples tensor
"""
# TODO(rcosta): trim conv artifacts. mauybe pad spec to kernel multiple
cond_input = self.upsample(features)
time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]
cond_input = cond_input[:, :, :-time_cutoff]
cond_input = self.cond_layers(cond_input).data
cond_input = cond_input.view(cond_input.size(0), self.n_layers, -1, cond_input.size(2))
# This makes the data channels x batch x num_layers x samples
cond_input = cond_input.permute(2,0,1,3)
return cond_input
| [
"torch.nn.Conv1d",
"torch.max",
"math.log2",
"torch.nn.functional.pad",
"torch.nn.functional.softmax",
"torch.tanh",
"torch.nn.ModuleList",
"utils.print_etr",
"torch.nn.Embedding",
"numpy.random.choice",
"torch.nn.functional.relu",
"time.time",
"torch.cat",
"torch.cuda.FloatTensor",
"tor... | [((2235, 2352), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['in_channels', 'out_channels'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'dilation': 'dilation', 'bias': 'bias'}), '(in_channels, out_channels, kernel_size=kernel_size, stride=\n stride, dilation=dilation, bias=bias)\n', (2250, 2352), False, 'import torch\n'), ((3791, 3812), 'torch.nn.ModuleList', 'torch.nn.ModuleList', ([], {}), '()\n', (3810, 3812), False, 'import torch\n'), ((3839, 3860), 'torch.nn.ModuleList', 'torch.nn.ModuleList', ([], {}), '()\n', (3858, 3860), False, 'import torch\n'), ((3888, 3909), 'torch.nn.ModuleList', 'torch.nn.ModuleList', ([], {}), '()\n', (3907, 3909), False, 'import torch\n'), ((3940, 3994), 'torch.nn.Embedding', 'torch.nn.Embedding', (['n_in_channels', 'n_residual_channels'], {}), '(n_in_channels, n_residual_channels)\n', (3958, 3994), False, 'import torch\n'), ((6891, 6929), 'torch.nn.functional.relu', 'torch.nn.functional.relu', (['output', '(True)'], {}), '(output, True)\n', (6915, 6929), False, 'import torch\n'), ((6986, 7024), 'torch.nn.functional.relu', 'torch.nn.functional.relu', (['output', '(True)'], {}), '(output, True)\n', (7010, 7024), False, 'import torch\n'), ((7357, 7390), 'torch.cat', 'torch.cat', (['(first, output)'], {'dim': '(2)'}), '((first, output), dim=2)\n', (7366, 7390), False, 'import torch\n'), ((8168, 8179), 'time.time', 'time.time', ([], {}), '()\n', (8177, 8179), False, 'import time\n'), ((2699, 2739), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['signal', 'padding'], {}), '(signal, padding)\n', (2722, 2739), False, 'import torch\n'), ((6414, 6465), 'torch.tanh', 'torch.tanh', (['in_act[:, :self.n_residual_channels, :]'], {}), '(in_act[:, :self.n_residual_channels, :])\n', (6424, 6465), False, 'import torch\n'), ((6486, 6540), 'torch.sigmoid', 'torch.sigmoid', (['in_act[:, self.n_residual_channels:, :]'], {}), '(in_act[:, self.n_residual_channels:, :])\n', (6499, 6540), False, 'import torch\n'), ((7794, 7828), 'torch.zeros', 'torch.zeros', (['[1]'], {'dtype': 'torch.long'}), '([1], dtype=torch.long)\n', (7805, 7828), False, 'import torch\n'), ((7842, 7857), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7855, 7857), False, 'import torch\n'), ((7883, 7910), 'torch.tensor', 'torch.tensor', (['first_samples'], {}), '(first_samples)\n', (7895, 7910), False, 'import torch\n'), ((2495, 2536), 'torch.nn.init.calculate_gain', 'torch.nn.init.calculate_gain', (['w_init_gain'], {}), '(w_init_gain)\n', (2523, 2536), False, 'import torch\n'), ((4361, 4384), 'math.log2', 'math.log2', (['max_dilation'], {}), '(max_dilation)\n', (4370, 4384), False, 'import math\n'), ((5304, 5332), 'math.log2', 'math.log2', (['self.max_dilation'], {}), '(self.max_dilation)\n', (5313, 5332), False, 'import math\n'), ((8318, 8389), 'utils.print_etr', 'print_etr', (['start'], {'total_iterations': 'num_samples', 'current_iteration': '(i + 1)'}), '(start, total_iterations=num_samples, current_iteration=i + 1)\n', (8327, 8389), False, 'from utils import print_etr\n'), ((8743, 8758), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8756, 8758), False, 'import torch\n'), ((8962, 9007), 'numpy.random.choice', 'np.random.choice', (['self.n_out_channels'], {'p': 'prob'}), '(self.n_out_channels, p=prob)\n', (8978, 9007), True, 'import numpy as np\n'), ((9622, 9691), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['self.n_out_channels', 'self.n_residual_channels'], {}), '(self.n_out_channels, self.n_residual_channels)\n', (9644, 9691), False, 'import torch\n'), ((8025, 8045), 'torch.zeros', 'torch.zeros', (['num_pad'], {}), '(num_pad)\n', (8036, 8045), False, 'import torch\n'), ((9046, 9061), 'torch.max', 'torch.max', (['x', '(0)'], {}), '(x, 0)\n', (9055, 9061), False, 'import torch\n'), ((9111, 9146), 'torch.tensor', 'torch.tensor', (['[x]'], {'dtype': 'torch.long'}), '([x], dtype=torch.long)\n', (9123, 9146), False, 'import torch\n'), ((8908, 8927), 'torch.nn.functional.softmax', 'F.softmax', (['x'], {'dim': '(0)'}), '(x, dim=0)\n', (8917, 8927), True, 'import torch.nn.functional as F\n')] |
from sklearn.svm import LinearSVC
import numpy as np
import scipy
from Blob import Blob
import logging
import time
import IAlgorithm
__author__ = 'simon'
class Classificator(IAlgorithm.IAlgorithm):
def __init__(self, classificator, use_sparse = None):
''' Trains a classificator in training phase and predicts labels in test phase
Params:
@classificator The sklearn classificator having a fit and predict function.
@use_sparse Whether to store the individual features as sparse matrix.
If None, sparsity will be used if less space is consumed in sparse format.
'''
self.model = classificator
logging.debug('Using sparse: %s'%use_sparse)
self.use_sparse = use_sparse
def _compute(self, blob_generator):
for blob in blob_generator:
blob.data = self.model.decision_function(blob.data.reshape(1,-1))
yield blob
def _train(self, blob_generator):
# First, collect all elements of the input
data = []
labels = []
metas = []
for blob in blob_generator:
if self.use_sparse is None:
# Determine automatically by comparing size
sparse_vec = scipy.sparse.csr_matrix(blob.data.ravel())
sparse_memory_req = sparse_vec.data.nbytes + sparse_vec.indptr.nbytes + sparse_vec.indices.nbytes
self.use_sparse = sparse_memory_req < blob.data.nbytes
logging.debug('Using sparse format for collecting features: %s'%self.use_sparse)
logging.debug('Blob data needs %i'%blob.data.nbytes)
logging.debug('%i with sparse vs %i with dense'%(sparse_memory_req,blob.data.nbytes))
if self.use_sparse:
data.append(scipy.sparse.csr_matrix(blob.data.ravel()))
else:
data.append(blob.data.ravel())
labels.append(blob.meta.label)
metas.append(blob.meta)
# Stack data to matrix explicitly here, as both fit and predict
# would to this stacking otherwise
try:
if self.use_sparse:
data = scipy.sparse.vstack(data)
data = data.astype(np.float64)
else:
data = np.array(data, dtype=np.float64)
except ValueError:
logging.error("Length of all feature vectors need to be the same for Classificator training.")
raise Exception
logging.warning('Training the model with feature dim %i, this might take a while'%data.shape[1])
self.model.fit(data, labels)
logging.warning('Finished')
for (d,m) in zip(self.model.decision_function(data),metas):
b = Blob()
b.data = d
b.meta = m
yield b
| [
"logging.debug",
"logging.warning",
"numpy.array",
"Blob.Blob",
"scipy.sparse.vstack",
"logging.error"
] | [((666, 712), 'logging.debug', 'logging.debug', (["('Using sparse: %s' % use_sparse)"], {}), "('Using sparse: %s' % use_sparse)\n", (679, 712), False, 'import logging\n'), ((2530, 2637), 'logging.warning', 'logging.warning', (["('Training the model with feature dim %i, this might take a while' % data.\n shape[1])"], {}), "(\n 'Training the model with feature dim %i, this might take a while' %\n data.shape[1])\n", (2545, 2637), False, 'import logging\n'), ((2672, 2699), 'logging.warning', 'logging.warning', (['"""Finished"""'], {}), "('Finished')\n", (2687, 2699), False, 'import logging\n'), ((2789, 2795), 'Blob.Blob', 'Blob', ([], {}), '()\n', (2793, 2795), False, 'from Blob import Blob\n'), ((1495, 1582), 'logging.debug', 'logging.debug', (["('Using sparse format for collecting features: %s' % self.use_sparse)"], {}), "('Using sparse format for collecting features: %s' % self.\n use_sparse)\n", (1508, 1582), False, 'import logging\n'), ((1592, 1646), 'logging.debug', 'logging.debug', (["('Blob data needs %i' % blob.data.nbytes)"], {}), "('Blob data needs %i' % blob.data.nbytes)\n", (1605, 1646), False, 'import logging\n'), ((1661, 1754), 'logging.debug', 'logging.debug', (["('%i with sparse vs %i with dense' % (sparse_memory_req, blob.data.nbytes))"], {}), "('%i with sparse vs %i with dense' % (sparse_memory_req, blob.\n data.nbytes))\n", (1674, 1754), False, 'import logging\n'), ((2204, 2229), 'scipy.sparse.vstack', 'scipy.sparse.vstack', (['data'], {}), '(data)\n', (2223, 2229), False, 'import scipy\n'), ((2318, 2350), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.float64'}), '(data, dtype=np.float64)\n', (2326, 2350), True, 'import numpy as np\n'), ((2390, 2494), 'logging.error', 'logging.error', (['"""Length of all feature vectors need to be the same for Classificator training."""'], {}), "(\n 'Length of all feature vectors need to be the same for Classificator training.'\n )\n", (2403, 2494), False, 'import logging\n')] |
import numpy as np
import brainscore
from brainio.assemblies import DataAssembly
from brainscore.benchmarks._properties_common import PropertiesBenchmark, _assert_grating_activations
from brainscore.benchmarks._properties_common import calc_spatial_frequency_tuning
from brainscore.metrics.ceiling import NeuronalPropertyCeiling
from brainscore.metrics.distribution_similarity import BootstrapDistributionSimilarity, ks_similarity
from result_caching import store
ASSEMBLY_NAME = 'schiller.Schiller1976c'
REGION = 'V1'
TIMEBINS = [(70, 170)]
PARENT = 'V1-spatial_frequency'
PROPERTY_NAMES = ['spatial_frequency_selective', 'spatial_frequency_bandwidth']
BIBTEX = """@article{Schiller1976,
author = {<NAME> <NAME> <NAME>.},
doi = {10.1152/jn.1976.39.6.1352},
issn = {0022-3077},
journal = {Journal of neurophysiology},
number = {6},
pages = {1334--1351},
pmid = {825624},
title = {{Quantitative studies of single-cell properties in monkey striate cortex. III. Spatial Frequency}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/825624},
volume = {39},
year = {1976}
}
"""
RESPONSE_THRESHOLD = 5
def _MarquesSchiller1976V1Property(property_name):
assembly = brainscore.get_assembly(ASSEMBLY_NAME)
similarity_metric = BootstrapDistributionSimilarity(similarity_func=ks_similarity, property_name=property_name)
ceil_func = NeuronalPropertyCeiling(similarity_metric)
parent = PARENT
return PropertiesBenchmark(identifier=f'dicarlo.Marques_schiller1976-{property_name}', assembly=assembly,
neuronal_property=schiller1976_properties, similarity_metric=similarity_metric,
timebins=TIMEBINS,
parent=parent, ceiling_func=ceil_func, bibtex=BIBTEX, version=1)
def MarquesSchiller1976V1SpatialFrequencySelective():
property_name = 'spatial_frequency_selective'
return _MarquesSchiller1976V1Property(property_name=property_name)
def MarquesSchiller1976V1SpatialFrequencyBandwidth():
property_name = 'spatial_frequency_bandwidth'
return _MarquesSchiller1976V1Property(property_name=property_name)
@store(identifier_ignore=['responses', 'baseline'])
def schiller1976_properties(model_identifier, responses, baseline):
_assert_grating_activations(responses)
radius = np.array(sorted(set(responses.radius.values)))
spatial_frequency = np.array(sorted(set(responses.spatial_frequency.values)))
orientation = np.array(sorted(set(responses.orientation.values)))
phase = np.array(sorted(set(responses.phase.values)))
responses = responses.values
baseline = baseline.values
assert responses.shape[0] == baseline.shape[0]
n_neuroids = responses.shape[0]
responses = responses.reshape((n_neuroids, len(radius), len(spatial_frequency), len(orientation), len(phase)))
responses = responses.mean(axis=4)
max_response = responses.reshape((n_neuroids, -1)).max(axis=1, keepdims=True)
spatial_frequency_bandwidth = np.zeros((n_neuroids, 1))
spatial_frequency_selective = np.ones((n_neuroids, 1))
for neur in range(n_neuroids):
pref_radius, pref_spatial_frequency, pref_orientation = \
np.unravel_index(np.argmax(responses[neur, :, :, :]),
(len(radius), len(spatial_frequency), len(orientation)))
spatial_frequency_curve = responses[neur, pref_radius, :, pref_orientation]
spatial_frequency_bandwidth[neur] = \
calc_spatial_frequency_tuning(spatial_frequency_curve, spatial_frequency, thrsh=0.707, filt_type='smooth',
mode='ratio')[0]
spatial_frequency_selective[np.isnan(spatial_frequency_bandwidth)] = 0
properties_data = np.concatenate((spatial_frequency_selective, spatial_frequency_bandwidth), axis=1)
good_neuroids = max_response > baseline + RESPONSE_THRESHOLD
properties_data = properties_data[np.argwhere(good_neuroids)[:, 0], :]
properties_data = DataAssembly(properties_data, coords={'neuroid_id': ('neuroid', range(properties_data.shape[0])),
'region': ('neuroid', ['V1'] * properties_data.shape[0]),
'neuronal_property': PROPERTY_NAMES},
dims=['neuroid', 'neuronal_property'])
return properties_data
| [
"brainscore.get_assembly",
"brainscore.benchmarks._properties_common.PropertiesBenchmark",
"numpy.ones",
"numpy.argmax",
"brainscore.benchmarks._properties_common.calc_spatial_frequency_tuning",
"brainscore.metrics.distribution_similarity.BootstrapDistributionSimilarity",
"numpy.zeros",
"numpy.argwher... | [((2276, 2326), 'result_caching.store', 'store', ([], {'identifier_ignore': "['responses', 'baseline']"}), "(identifier_ignore=['responses', 'baseline'])\n", (2281, 2326), False, 'from result_caching import store\n'), ((1318, 1356), 'brainscore.get_assembly', 'brainscore.get_assembly', (['ASSEMBLY_NAME'], {}), '(ASSEMBLY_NAME)\n', (1341, 1356), False, 'import brainscore\n'), ((1381, 1476), 'brainscore.metrics.distribution_similarity.BootstrapDistributionSimilarity', 'BootstrapDistributionSimilarity', ([], {'similarity_func': 'ks_similarity', 'property_name': 'property_name'}), '(similarity_func=ks_similarity,\n property_name=property_name)\n', (1412, 1476), False, 'from brainscore.metrics.distribution_similarity import BootstrapDistributionSimilarity, ks_similarity\n'), ((1489, 1531), 'brainscore.metrics.ceiling.NeuronalPropertyCeiling', 'NeuronalPropertyCeiling', (['similarity_metric'], {}), '(similarity_metric)\n', (1512, 1531), False, 'from brainscore.metrics.ceiling import NeuronalPropertyCeiling\n'), ((1563, 1844), 'brainscore.benchmarks._properties_common.PropertiesBenchmark', 'PropertiesBenchmark', ([], {'identifier': 'f"""dicarlo.Marques_schiller1976-{property_name}"""', 'assembly': 'assembly', 'neuronal_property': 'schiller1976_properties', 'similarity_metric': 'similarity_metric', 'timebins': 'TIMEBINS', 'parent': 'parent', 'ceiling_func': 'ceil_func', 'bibtex': 'BIBTEX', 'version': '(1)'}), "(identifier=\n f'dicarlo.Marques_schiller1976-{property_name}', assembly=assembly,\n neuronal_property=schiller1976_properties, similarity_metric=\n similarity_metric, timebins=TIMEBINS, parent=parent, ceiling_func=\n ceil_func, bibtex=BIBTEX, version=1)\n", (1582, 1844), False, 'from brainscore.benchmarks._properties_common import PropertiesBenchmark, _assert_grating_activations\n'), ((2399, 2437), 'brainscore.benchmarks._properties_common._assert_grating_activations', '_assert_grating_activations', (['responses'], {}), '(responses)\n', (2426, 2437), False, 'from brainscore.benchmarks._properties_common import PropertiesBenchmark, _assert_grating_activations\n'), ((3133, 3158), 'numpy.zeros', 'np.zeros', (['(n_neuroids, 1)'], {}), '((n_neuroids, 1))\n', (3141, 3158), True, 'import numpy as np\n'), ((3193, 3217), 'numpy.ones', 'np.ones', (['(n_neuroids, 1)'], {}), '((n_neuroids, 1))\n', (3200, 3217), True, 'import numpy as np\n'), ((3881, 3967), 'numpy.concatenate', 'np.concatenate', (['(spatial_frequency_selective, spatial_frequency_bandwidth)'], {'axis': '(1)'}), '((spatial_frequency_selective, spatial_frequency_bandwidth),\n axis=1)\n', (3895, 3967), True, 'import numpy as np\n'), ((3815, 3852), 'numpy.isnan', 'np.isnan', (['spatial_frequency_bandwidth'], {}), '(spatial_frequency_bandwidth)\n', (3823, 3852), True, 'import numpy as np\n'), ((3349, 3384), 'numpy.argmax', 'np.argmax', (['responses[neur, :, :, :]'], {}), '(responses[neur, :, :, :])\n', (3358, 3384), True, 'import numpy as np\n'), ((3616, 3740), 'brainscore.benchmarks._properties_common.calc_spatial_frequency_tuning', 'calc_spatial_frequency_tuning', (['spatial_frequency_curve', 'spatial_frequency'], {'thrsh': '(0.707)', 'filt_type': '"""smooth"""', 'mode': '"""ratio"""'}), "(spatial_frequency_curve, spatial_frequency,\n thrsh=0.707, filt_type='smooth', mode='ratio')\n", (3645, 3740), False, 'from brainscore.benchmarks._properties_common import calc_spatial_frequency_tuning\n'), ((4068, 4094), 'numpy.argwhere', 'np.argwhere', (['good_neuroids'], {}), '(good_neuroids)\n', (4079, 4094), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
class MaxoutNN():
def __init__(self, input_dim, hidden_layers, output_dim):
self.input_dim = input_dim
self.hidden_layers = hidden_layers
self.output_dim = output_dim
self.inp = tf.placeholder(tf.float32, [None, self.input_dim], 'inp')
self.target = tf.placeholder(tf.float32, [None, self.output_dim], 'target')
self.train = tf.placeholder(tf.bool, [], 'train')
self.construct_model()
self.sess = tf.Session()
self.optim = None
def construct_model(self, initializer='glorot_normal',
norm='None', activation='maxout2'):
self.initializer = get_initializer(initializer)
if 'maxout' not in activation:
self.activation = get_activation(activation)
self.normalize = get_normalize(norm, self.activation, self.train)
if 'maxout' in activation:
self.maxout_hidden_layers = self.hidden_layers
self.hidden_layers = [h*int(activation.lstrip('maxout')) for h in self.hidden_layers]
self.ws = [tf.get_variable('w0',
shape = [self.input_dim, self.hidden_layers[0]],
dtype = tf.float32, initializer = self.initializer)]
self.bs = [tf.get_variable('b0',
shape = [self.hidden_layers[0]],
dtype = tf.float32, initializer = tf.initializers.zeros)]
if 'maxout' in activation:
self.activation = get_activation(activation, self.maxout_hidden_layers[0])
self.normalize = get_normalize(norm, self.activation, self.train)
self.hiddens = [self.normalize(self.inp @ self.ws[0] + self.bs[0])]
else:
self.hiddens = [self.normalize(self.inp @ self.ws[0] + self.bs[0])]
for i in range(1, len(self.hidden_layers)):
self.ws += [tf.get_variable('w'+str(i),
shape = [self.hidden_layers[i-1] if 'maxout' not in activation else
self.maxout_hidden_layers[i-1], self.hidden_layers[i]],
dtype = tf.float32, initializer = self.initializer)]
self.bs += [tf.get_variable('b'+str(i),
shape = [self.hidden_layers[i]],
dtype = tf.float32, initializer = tf.initializers.zeros)]
if 'maxout' in activation:
self.activation = get_activation(activation, self.maxout_hidden_layers[i])
self.normalize = get_normalize(norm, self.activation, self.train)
self.hiddens.append(self.normalize(self.hiddens[-1] @ self.ws[i] + self.bs[i]))
else:
self.hiddens.append(self.normalize(self.hiddens[-1] @ self.ws[i] + self.bs[i]))
self.ws += [tf.get_variable('w'+str(len(self.hidden_layers)),
shape = [self.hidden_layers[-1] if 'maxout' not in activation else
self.maxout_hidden_layers[-1], self.output_dim],
dtype = tf.float32, initializer = self.initializer)]
self.bs += [tf.get_variable('b'+str(len(self.hidden_layers)),
shape = [self.output_dim],
dtype = tf.float32, initializer = tf.initializers.zeros)]
self.logits = self.hiddens[-1] @ self.ws[len(self.hidden_layers)] + self.bs[len(self.hidden_layers)]
self.pred = tf.nn.softmax(self.logits, 1)
self.loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(labels = self.target,
logits = self.logits))
self.correct_pred = tf.equal(
tf.argmax(self.pred, 1), tf.argmax(self.target, 1))
self.accuracy = tf.reduce_mean(
tf.cast(self.correct_pred, "float"))
self.confusion_matrix = tf.confusion_matrix(labels = tf.argmax(self.target, 1),
predictions = tf.argmax(self.pred, 1))
self.sess = tf.Session()
self.optim = None
def model_init(self):
self.sess.run(tf.global_variables_initializer())
def train_setting(self, optimizer='Adam'):
self.optim = get_optimizer(optimizer)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.step = self.optim.minimize(self.loss)
def train_step(self, x, y):
outputs = [self.step]
return self.sess.run(outputs, feed_dict = {self.inp:x, self.target:y, self.train:True})
def forward(self, x, y=None, train=False):
if len(x.shape) != 2:
x = x.reshape(-1, np.prod(x.shape[1:]))
# assert len(x.shape) == 2, x.shape
assert x.shape[1] == self.input_dim, x.shape
outputs = [self.pred]
feed_dict = {self.inp:x, self.train:train}
if y is not None:
outputs += [self.loss, self.accuracy, self.confusion_matrix]
feed_dict.update({self.target:y})
return self.sess.run(outputs, feed_dict=feed_dict)
def get_activation(activation, param=1):
if activation == 'relu':
return tf.nn.relu
elif activation == 'sigmoid':
return tf.nn.sigmoid
elif activation == 'tanh':
return tf.nn.tanh
elif 'maxout' in activation:
def activation(inp):
return tf.contrib.layers.maxout(inp, param)
return activation
else:
assert False, activation
def get_normalize(norm, activation, train):
if norm == 'batch_norm':
def normalize(inp):
return tf.contrib.layers.batch_norm(inp,
activation_fn = activation)
elif norm == 'layer_norm':
def normalize(inp):
return tf.contrib.layers.layer_norm(inp,
activation_fn = activation)
elif 'dropout' in norm:
def normalize(inp):
return tf.contrib.layers.dropout(activation(inp),
keep_prob=1-float(norm.lstrip('dropout')),
is_training=train)
elif norm == 'None':
def normalize(inp):
return activation(inp)
else:
assert False, norm
return normalize
def get_initializer(initializer):
if initializer == 'truncated_normal':
return tf.initializers.truncated_normal
elif initializer == 'glorot_normal':
return tf.initializers.glorot_normal
elif initializer == 'glorot_uniform':
return tf.initializers.glorot_uniform
elif initializer == 'he_normal':
return tf.initializers.hi_normal
elif initializer == 'he_uniform':
return tf.initializers.hi_uniform
elif initializer == 'lecun_normal':
return tf.initializers.lecun_normal
elif initializer == 'lecun_uniform':
return tf.initializers.lecun_uniform
else:
assert False, initializer
def get_optimizer(optimizer):
if optimizer == 'Adam':
return tf.train.AdamOptimizer()
| [
"tensorflow.contrib.layers.batch_norm",
"numpy.prod",
"tensorflow.get_variable",
"tensorflow.contrib.layers.layer_norm",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.global_variables_initializer",
"tensorflow.argmax",
"tensorflow... | [((259, 316), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.input_dim]', '"""inp"""'], {}), "(tf.float32, [None, self.input_dim], 'inp')\n", (273, 316), True, 'import tensorflow as tf\n'), ((339, 400), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.output_dim]', '"""target"""'], {}), "(tf.float32, [None, self.output_dim], 'target')\n", (353, 400), True, 'import tensorflow as tf\n'), ((422, 458), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool', '[]', '"""train"""'], {}), "(tf.bool, [], 'train')\n", (436, 458), True, 'import tensorflow as tf\n'), ((511, 523), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (521, 523), True, 'import tensorflow as tf\n'), ((3322, 3351), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.logits', '(1)'], {}), '(self.logits, 1)\n', (3335, 3351), True, 'import tensorflow as tf\n'), ((3878, 3890), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3888, 3890), True, 'import tensorflow as tf\n'), ((4119, 4161), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (4136, 4161), True, 'import tensorflow as tf\n'), ((6827, 6851), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (6849, 6851), True, 'import tensorflow as tf\n'), ((1111, 1232), 'tensorflow.get_variable', 'tf.get_variable', (['"""w0"""'], {'shape': '[self.input_dim, self.hidden_layers[0]]', 'dtype': 'tf.float32', 'initializer': 'self.initializer'}), "('w0', shape=[self.input_dim, self.hidden_layers[0]], dtype=\n tf.float32, initializer=self.initializer)\n", (1126, 1232), True, 'import tensorflow as tf\n'), ((1279, 1388), 'tensorflow.get_variable', 'tf.get_variable', (['"""b0"""'], {'shape': '[self.hidden_layers[0]]', 'dtype': 'tf.float32', 'initializer': 'tf.initializers.zeros'}), "('b0', shape=[self.hidden_layers[0]], dtype=tf.float32,\n initializer=tf.initializers.zeros)\n", (1294, 1388), True, 'import tensorflow as tf\n'), ((3405, 3492), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'labels': 'self.target', 'logits': 'self.logits'}), '(labels=self.target, logits=self.\n logits)\n', (3447, 3492), True, 'import tensorflow as tf\n'), ((3568, 3591), 'tensorflow.argmax', 'tf.argmax', (['self.pred', '(1)'], {}), '(self.pred, 1)\n', (3577, 3591), True, 'import tensorflow as tf\n'), ((3593, 3618), 'tensorflow.argmax', 'tf.argmax', (['self.target', '(1)'], {}), '(self.target, 1)\n', (3602, 3618), True, 'import tensorflow as tf\n'), ((3676, 3711), 'tensorflow.cast', 'tf.cast', (['self.correct_pred', '"""float"""'], {}), "(self.correct_pred, 'float')\n", (3683, 3711), True, 'import tensorflow as tf\n'), ((3969, 4002), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4000, 4002), True, 'import tensorflow as tf\n'), ((4175, 4210), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), '(update_ops)\n', (4198, 4210), True, 'import tensorflow as tf\n'), ((5471, 5530), 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', (['inp'], {'activation_fn': 'activation'}), '(inp, activation_fn=activation)\n', (5499, 5530), True, 'import tensorflow as tf\n'), ((3775, 3800), 'tensorflow.argmax', 'tf.argmax', (['self.target', '(1)'], {}), '(self.target, 1)\n', (3784, 3800), True, 'import tensorflow as tf\n'), ((3832, 3855), 'tensorflow.argmax', 'tf.argmax', (['self.pred', '(1)'], {}), '(self.pred, 1)\n', (3841, 3855), True, 'import tensorflow as tf\n'), ((4534, 4554), 'numpy.prod', 'np.prod', (['x.shape[1:]'], {}), '(x.shape[1:])\n', (4541, 4554), True, 'import numpy as np\n'), ((5632, 5691), 'tensorflow.contrib.layers.layer_norm', 'tf.contrib.layers.layer_norm', (['inp'], {'activation_fn': 'activation'}), '(inp, activation_fn=activation)\n', (5660, 5691), True, 'import tensorflow as tf\n'), ((5237, 5273), 'tensorflow.contrib.layers.maxout', 'tf.contrib.layers.maxout', (['inp', 'param'], {}), '(inp, param)\n', (5261, 5273), True, 'import tensorflow as tf\n')] |
import warnings
warnings.simplefilter("ignore", UserWarning)
import pandas as pd
import dill as pickle
import functools
import os
from sklearn.feature_selection import f_regression, mutual_info_regression
from sklearn.mixture import BayesianGaussianMixture as GMM
from scipy.stats import spearmanr, pearsonr
import scipy.stats as st
import numpy as np
from tqdm import tqdm
import timeit
def load_sif():
return pd.read_csv(sif_file, names = ['UpGene', 'Type', 'DownGene'], sep = '\t', header = None)
def filter_sif(sif, intx_type = 'controls-expression-of'):
return sif[(sif['Type'] == intx_type)]
def load_secondary_itx_sif():
""" Load precompiled secondary interaction sif
Returns:
(pandas.DataFrame): pandas.DataFrame obj of length: n interactions and
columns: ['UpGene','Type',DownGene']
"""
return pd.read_csv(sec_intx_file, names = ['UpGene', 'Type', 'DownGene'], sep = '\t', header = None)
def write_pickle(obj, relnm):
""" Serialize object to pickle and write to disk at relnm
Args:
obj (`:obj:`) : Python object to be pickled
relnm (str) : Relative name/path to pickle on disk
Returns:
'Serialized object to disk at {}'.format(relnm)
"""
with open(relnm, 'wb') as f:
pickle.dump(obj, f, protocol = -1)
return 'Serialized object to disk at {}'.format(relnm)
def read_pickle(relnm):
""" Read serialized object from pickle on disk at relnm
Args:
relnm (str) : Relative name/path to pickled object
Returns:
obj (`:obj: unpickled object`)
"""
with open(relnm, 'rb') as f:
obj = pickle.load(f)
print('Loaded object from disk at {}'.format(relnm))
return obj
def ensure_dir(relnm):
""" Accept relative filepath string, create it if it doesnt already exist
return filepath string
Args:
relnm (str) : Relative name/path
Returns:
relnm (str)
"""
d = os.path.join(os.getcwd(), relnm)
if not os.path.exists(d):
print('--- path does not exist : {} ---'.format(d))
print('--- constructing path : {} ---'.format(d))
os.makedirs(d)
return relnm
def traverse_interactions(regulator, filt_sif):
""" Parse interaction network and add secondary interactions on a per regulator basis
Args:
regulator (str): Regulator to expand interaction network
filt_sif (pandas.DataFrame): pandas.DataFrame obj of length: n interactions and
columns: ['UpGene','Type',DownGene']
Returns:
comb_idx (pandas.DataFrame):pandas.DataFrame obj of length: n interactions + secondary interactions and
columns: ['UpGene','Type',DownGene']
"""
sub_reg = filt_sif[(filt_sif.UpGene == regulator)]
down_genes = sub_reg.DownGene.unique()
secondary_itx = filt_sif[(filt_sif.UpGene.isin(down_genes))]
secondary_itx.UpGene = regulator
comb_idx = pd.concat([sub_reg, secondary_itx])
comb_idx.Type = 'controls-expression-of'
comb_idx = comb_idx.drop_duplicates()
comb_idx = comb_idx[(comb_idx.DownGene != regulator)]
return comb_idx
def generate_expanded_regulon():
""" Generates an expanded Pathway Commons regulon with secondary down-stream interactions for
regulators that control the expression of other regulators
Returns:
Nothing - Generates a pickled pandas dataframe for future reference/use
"""
print('--- Generating regulon with primary and secondary interactions ---')
sif = load_sif()
filt_sif = filter_sif(sif)
regulators = filt_sif.UpGene.unique()
regulon_list = list(map(functools.partial(traverse_interactions, filt_sif = filt_sif), regulators))
regulon = pd.concat(regulon_list)
regulon.set_index('UpGene', inplace = True)
regulon.reset_index(inplace=True)
print('---- Regulon constructed ---')
write_pickle(regulon, '../data/secondary_intx_regulon.pkl')
def bgm_moa(regul_weights):
""" Fits regulon mode of activation weights to a bayesian gaussian mixture model with three components and computes
the probability of the three distributions (repression, non-influential, activation) for each regulator
Args:
regul_weights (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator and
downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood']
Returns:
"""
g = GMM(n_components = 3, max_iter = 1000)
sub_reg = regul_weights.copy()
sub_reg_vals = sub_reg.MoA.values.reshape(-1, 1)
g.fit(sub_reg_vals)
mu = g.means_.flatten()
sigma = np.sqrt(g.covariances_).flatten()
fit = sorted(list(zip(mu, sigma)))
activation = (st.norm.cdf(sub_reg_vals, fit[2][0], fit[2][1]))
repression = 1 - st.norm.cdf(sub_reg_vals, fit[0][0], fit[0][1])
total_lower = 1 - st.norm.cdf(sub_reg_vals, fit[1][0], fit[1][1])
total_upper = (st.norm.cdf(sub_reg_vals, fit[1][0], fit[1][1]))
copy_target = sub_reg.copy()
copy_target['up'] = 0
copy_target['down'] = 0
copy_target.loc[(copy_target.MoA >= 0), 'up'] = 1
copy_target.loc[(copy_target.MoA <= 0), 'down'] = 1
up_moa = copy_target.up.values.reshape(copy_target.shape[0], 1)
down_moa = copy_target.down.values.reshape(copy_target.shape[0], 1)
Mode = (activation / (repression + total_lower + activation) * up_moa) -\
(repression / (repression + total_upper + activation) * down_moa)
return Mode
def prune_regulon(expr, regulon, regulon_size):
""" Prunes regulon with secondary interactions that do not meet the necessary number of downstream interactions
metric {regulon_size}
Args:
expr (pandas DataFrame obj): pandas DataFrame of [n_samples, n_features]
regulon (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator and
downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood']
regulon_size (int) : number of downstream interactions required for a given regulator in order to calculate
enrichment score
Returns:
filtered_regulon (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator
and downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood']
"""
expr_filtered_regulon = regulon[((regulon.UpGene.isin(expr.columns)) & (regulon.DownGene.isin(expr.columns)))]
expr_filtered_regulon.set_index('UpGene', inplace=True)
idx = (expr_filtered_regulon.index.value_counts() >= regulon_size)
filt_idx = idx[idx==True]
filtered_regulon = expr_filtered_regulon.loc[filt_idx.index]
filtered_regulon.reset_index(inplace=True)
return filtered_regulon
def regulon_weight_assignment(regulator, expr, filtered_regulon):
""" Assigns probability and weights for regulator - target interactions
Args:
regulator (str): Regulator to expand interaction network
expr (:obj: `pandas DataFrame`) : pandas DataFrame containing scaled expression data of
shape [n_samples, n_features]
filtered_regulon (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator
and downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood']
Returns:
regul_weights (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator and
downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood']
"""
sub_reg = filtered_regulon[(filtered_regulon['UpGene'] == regulator)]
X = expr.reindex(sub_reg.DownGene.values, axis = 1).dropna(axis = 1)
y = expr.reindex([regulator], axis = 1)
spr_results = X.apply(lambda col: spearmanr(col, y.iloc[:, 0]), axis = 0).apply(pd.Series)
spr_result = spr_results[0]
spr_pvalues = spr_results[1]
f_test, _ = f_regression(X, y.values.ravel())
weights = f_test
weights_spr = weights + abs(spr_result)
regul_weights = (weights_spr * np.sign(spr_result)).to_frame()
regul_weights.columns = ['MoA']
regul_weights.index.name = 'Target'
regul_weights.reset_index(inplace = True)
regul_weights['Regulator'] = regulator
regul_weights['likelihood'] = spr_pvalues.values
regul_weights = regul_weights.reindex(['Regulator', 'Target', 'MoA', 'likelihood'], axis = 1)
regul_weights.set_index('Regulator', inplace = True)
regul_weights = regul_weights[~np.isinf(regul_weights.MoA)]
return regul_weights
def structure_weights(regulator, pruned_regulon, f_statistics, r_frame, p_frame):
""" Calculates weights associated with regulators. Weights are the summation of the F-statistic and absolute
spearman correlation coefficient. The weight retains the sign of the spearman correlation coefficient.
Args:
regulator (str): A feature to assign weights to downstream interactions
pruned_regulon (:obj:`pd.DataFrame`, shape = [n_interactions, 3]
f_statistics (dict) : Dictionary with key:{regulator} key and
r_frame (:obj:`pd.DataFrame`), shape = [n_features, n_features]
p_frame (:obj:`pd.DataFrame`), shape = [n_features, n_features]
Returns:
weights_ordered (:obj:`pd.DataFrame`), shape = [n_interactions, 3]
"""
sub_regul = pruned_regulon[(pruned_regulon['UpGene'] == regulator)]
targs = sub_regul.DownGene
p_ = p_frame.loc[targs, regulator]
p_.name = 'likelihood'
f_ = f_statistics[regulator][0]
r_ = r_frame.loc[targs, regulator]
w_ = (f_ + abs(r_)) * np.sign(r_)
w_.index.name = 'Target'
w_.name = 'MoA'
weights = w_.to_frame()
weights['likelihood'] = p_
weights['Regulator'] = regulator
weights_ordered = weights.reset_index().reindex(['Regulator', 'Target', 'MoA', 'likelihood'],
axis = 1).set_index('Regulator')
return weights_ordered
def generate_bolstered_regulon(expr, cohort, regulon_size=15):
""" Calculate weights for PC regulon and a dataset using mutual information, f-statistic to test for linear
relationships, and the spearman correlation coefficient to determine the mode of regulation
Args:
expr (:obj: `pandas DataFrame`) : pandas DataFrame containing scaled expression data of
shape [n_samples, n_features]
cohort (str) : name of cohort to associate with compiled regulon
regulon_size (int) : required number of downstream interactions for a give regulator
Returns:
regul_weights (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator and
downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood']
"""
bolstered_relnm = os.path.join(dirname, '../experiments/{0}/data/{0}_bolstered_regulon.pkl'.format(cohort))
# Check to see if bolstered regulon exists
if os.path.isfile(bolstered_relnm):
print('--- loading context specific regulon ---')
total_regulon = read_pickle(bolstered_relnm)
else:
if os.path.isfile(sec_intx_file):
print('--- loading unfiltered regulon ---')
regulon = read_pickle(sec_intx_file)
else:
generate_expanded_regulon()
regulon = read_pickle(sec_intx_file)
print('--- pruning regulon ---')
filtered_regulon = prune_regulon(expr, regulon, regulon_size)
regulators = filtered_regulon.UpGene.unique()
print('--- compiling regulon of {} regulators and {} interactions with a minimum of {} interactions ---'.
format(len(regulators), filtered_regulon.shape[0], regulon_size))
regulon_list = list(map(functools.partial(regulon_weight_assignment, expr=expr,
filtered_regulon = filtered_regulon), tqdm(regulators)))
total_regulon = pd.concat(regulon_list)
relnm = os.path.join(dirname, '../experiments/{0}/data'.format(cohort))
ensure_dir(relnm)
write_pickle(total_regulon, os.path.join(relnm, '{}_bolstered_regulon.pkl'.format(cohort)))
return total_regulon
| [
"scipy.stats.spearmanr",
"os.path.exists",
"numpy.sqrt",
"pandas.read_csv",
"os.makedirs",
"tqdm.tqdm",
"os.getcwd",
"os.path.isfile",
"dill.dump",
"sklearn.mixture.BayesianGaussianMixture",
"functools.partial",
"numpy.sign",
"warnings.simplefilter",
"scipy.stats.norm.cdf",
"numpy.isinf"... | [((16, 60), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'UserWarning'], {}), "('ignore', UserWarning)\n", (37, 60), False, 'import warnings\n'), ((418, 504), 'pandas.read_csv', 'pd.read_csv', (['sif_file'], {'names': "['UpGene', 'Type', 'DownGene']", 'sep': '"""\t"""', 'header': 'None'}), "(sif_file, names=['UpGene', 'Type', 'DownGene'], sep='\\t',\n header=None)\n", (429, 504), True, 'import pandas as pd\n'), ((856, 947), 'pandas.read_csv', 'pd.read_csv', (['sec_intx_file'], {'names': "['UpGene', 'Type', 'DownGene']", 'sep': '"""\t"""', 'header': 'None'}), "(sec_intx_file, names=['UpGene', 'Type', 'DownGene'], sep='\\t',\n header=None)\n", (867, 947), True, 'import pandas as pd\n'), ((2946, 2981), 'pandas.concat', 'pd.concat', (['[sub_reg, secondary_itx]'], {}), '([sub_reg, secondary_itx])\n', (2955, 2981), True, 'import pandas as pd\n'), ((3745, 3768), 'pandas.concat', 'pd.concat', (['regulon_list'], {}), '(regulon_list)\n', (3754, 3768), True, 'import pandas as pd\n'), ((4499, 4533), 'sklearn.mixture.BayesianGaussianMixture', 'GMM', ([], {'n_components': '(3)', 'max_iter': '(1000)'}), '(n_components=3, max_iter=1000)\n', (4502, 4533), True, 'from sklearn.mixture import BayesianGaussianMixture as GMM\n'), ((4784, 4831), 'scipy.stats.norm.cdf', 'st.norm.cdf', (['sub_reg_vals', 'fit[2][0]', 'fit[2][1]'], {}), '(sub_reg_vals, fit[2][0], fit[2][1])\n', (4795, 4831), True, 'import scipy.stats as st\n'), ((4991, 5038), 'scipy.stats.norm.cdf', 'st.norm.cdf', (['sub_reg_vals', 'fit[1][0]', 'fit[1][1]'], {}), '(sub_reg_vals, fit[1][0], fit[1][1])\n', (5002, 5038), True, 'import scipy.stats as st\n'), ((11171, 11202), 'os.path.isfile', 'os.path.isfile', (['bolstered_relnm'], {}), '(bolstered_relnm)\n', (11185, 11202), False, 'import os\n'), ((1287, 1319), 'dill.dump', 'pickle.dump', (['obj', 'f'], {'protocol': '(-1)'}), '(obj, f, protocol=-1)\n', (1298, 1319), True, 'import dill as pickle\n'), ((1647, 1661), 'dill.load', 'pickle.load', (['f'], {}), '(f)\n', (1658, 1661), True, 'import dill as pickle\n'), ((1985, 1996), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1994, 1996), False, 'import os\n'), ((2016, 2033), 'os.path.exists', 'os.path.exists', (['d'], {}), '(d)\n', (2030, 2033), False, 'import os\n'), ((2161, 2175), 'os.makedirs', 'os.makedirs', (['d'], {}), '(d)\n', (2172, 2175), False, 'import os\n'), ((4854, 4901), 'scipy.stats.norm.cdf', 'st.norm.cdf', (['sub_reg_vals', 'fit[0][0]', 'fit[0][1]'], {}), '(sub_reg_vals, fit[0][0], fit[0][1])\n', (4865, 4901), True, 'import scipy.stats as st\n'), ((4924, 4971), 'scipy.stats.norm.cdf', 'st.norm.cdf', (['sub_reg_vals', 'fit[1][0]', 'fit[1][1]'], {}), '(sub_reg_vals, fit[1][0], fit[1][1])\n', (4935, 4971), True, 'import scipy.stats as st\n'), ((9789, 9800), 'numpy.sign', 'np.sign', (['r_'], {}), '(r_)\n', (9796, 9800), True, 'import numpy as np\n'), ((11337, 11366), 'os.path.isfile', 'os.path.isfile', (['sec_intx_file'], {}), '(sec_intx_file)\n', (11351, 11366), False, 'import os\n'), ((12157, 12180), 'pandas.concat', 'pd.concat', (['regulon_list'], {}), '(regulon_list)\n', (12166, 12180), True, 'import pandas as pd\n'), ((3654, 3713), 'functools.partial', 'functools.partial', (['traverse_interactions'], {'filt_sif': 'filt_sif'}), '(traverse_interactions, filt_sif=filt_sif)\n', (3671, 3713), False, 'import functools\n'), ((4692, 4715), 'numpy.sqrt', 'np.sqrt', (['g.covariances_'], {}), '(g.covariances_)\n', (4699, 4715), True, 'import numpy as np\n'), ((8679, 8706), 'numpy.isinf', 'np.isinf', (['regul_weights.MoA'], {}), '(regul_weights.MoA)\n', (8687, 8706), True, 'import numpy as np\n'), ((8239, 8258), 'numpy.sign', 'np.sign', (['spr_result'], {}), '(spr_result)\n', (8246, 8258), True, 'import numpy as np\n'), ((11970, 12065), 'functools.partial', 'functools.partial', (['regulon_weight_assignment'], {'expr': 'expr', 'filtered_regulon': 'filtered_regulon'}), '(regulon_weight_assignment, expr=expr, filtered_regulon=\n filtered_regulon)\n', (11987, 12065), False, 'import functools\n'), ((12114, 12130), 'tqdm.tqdm', 'tqdm', (['regulators'], {}), '(regulators)\n', (12118, 12130), False, 'from tqdm import tqdm\n'), ((7963, 7991), 'scipy.stats.spearmanr', 'spearmanr', (['col', 'y.iloc[:, 0]'], {}), '(col, y.iloc[:, 0])\n', (7972, 7991), False, 'from scipy.stats import spearmanr, pearsonr\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.