code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import (auc, confusion_matrix, precision_recall_curve,
r2_score, roc_curve)
def best_scores(allstars_model):
keys = list(allstars_model.best_scores.keys())
values = allstars_model.best_scores.values()
plt.figure(figsize=(6, int(len(keys) / 3)))
plt.title("Best scores")
plt.barh(keys, values)
plt.grid()
plt.show()
def training_summary(objective):
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(16, 8))
names = [n for n in reversed(list(objective.get_model_names()))]
score_means = []
score_stds = []
second_means = []
second_stds = []
selected = []
sum_second = []
for name in names:
score_means.append(np.array(objective.scores[name]).mean())
score_stds.append(np.array(objective.scores[name]).std())
second_means.append(np.array(objective.times[name]).mean())
second_stds.append(np.array(objective.times[name]).std())
selected.append(len(objective.times[name]))
sum_second.append(sum(objective.times[name]))
axes[0].barh(names, score_means, xerr=score_stds)
axes[0].set_xlabel("score")
axes[0].set_xlim([0.0, 1.0])
axes[0].grid()
axes[1].barh(names, selected)
axes[1].set_xlabel("selected (times)")
axes[1].grid()
axes[1].yaxis.set_visible(False)
axes[2].barh(names, second_means, xerr=second_stds)
axes[2].set_xlabel("calculation time (seconds)")
axes[2].grid()
axes[2].yaxis.set_visible(False)
axes[3].barh(names, sum_second)
axes[3].set_xlabel("total calculation time (seconds)")
axes[3].grid()
axes[3].yaxis.set_visible(False)
plt.show()
def feature_importances(allstars_model):
barh_dict = {}
for key, value in zip(
list(allstars_model.x_train.iloc[:, allstars_model.support].columns),
allstars_model.best_models["RandomForest"].model.feature_importances_,
):
barh_dict[key] = value
keys = list(barh_dict.keys())
values = barh_dict.values()
plt.figure(figsize=(6, int(len(keys) / 3) + 1))
plt.title("Feature importances in RF")
plt.barh(keys, values)
plt.grid()
plt.show()
def model_importances(stacking_model):
plt.title("Model importances in stacking")
plt.barh(
list(stacking_model.best_model.named_estimators_.keys()),
stacking_model.best_model.final_estimator_.feature_importances_,
)
plt.grid()
plt.show()
def metrics(model, X_train, y_train, X_test=None, y_test=None):
X_train = pd.DataFrame(X_train)
if type(y_train) is not pd.core.series.Series:
y_train = pd.DataFrame(y_train)[0]
if X_test is not None:
X_test = pd.DataFrame(X_test)
if y_test is not None:
y_test = pd.DataFrame(y_test)
if hasattr(model, "is_regressor"):
if model.is_regressor:
regression_metrics(model, X_train, y_train, X_test, y_test)
else:
classification_metrics(model, X_train, y_train, X_test, y_test)
elif hasattr(model, "predict_proba") or hasattr(model, "decision_function"):
classification_metrics(model, X_train, y_train, X_test, y_test)
else:
regression_metrics(model, X_train, y_train, X_test, y_test)
def regression_metrics(model, X_train, y_train, X_test=None, y_test=None):
if X_test is None:
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8, 4))
ax = axes
else:
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 4))
ax = axes[0]
y_pred = model.predict(X_train)
score = model.score(X_train, y_train)
y_min = min(y_train.min(), y_pred.min())
y_max = min(y_train.max(), y_pred.max())
ax.set_title("Training data")
ax.scatter(y_train, y_pred, alpha=0.5)
ax.plot([y_min, y_max], [y_min, y_max])
ax.text(
y_max - 0.3,
y_min + 0.3,
("%.3f" % score).lstrip("0"),
size=15,
horizontalalignment="right",
)
ax.set_xlabel("Real")
ax.set_ylabel("Predicted")
if X_test is not None:
y_pred = model.predict(X_test)
score = model.score(X_test, y_test)
# y_min = min(y_test.ravel()min(), y_pred.min())
# y_max = min(y_test.max(), y_pred.max())
axes[1].set_title("Test data")
axes[1].scatter(y_test, y_pred, alpha=0.5)
axes[1].plot([y_min, y_max], [y_min, y_max])
axes[1].text(
y_max - 0.3,
y_min + 0.3,
("%.3f" % score).lstrip("0"),
size=15,
horizontalalignment="right",
)
axes[1].set_xlabel("Real")
axes[1].set_ylabel("Predicted")
plt.show()
def make_autopct(values):
def my_autopct(pct):
total = sum(values)
val = int(round(pct*total/100.0))
return '{v:d}'.format(v=val)
return my_autopct
def classification_metrics(model, X_train, y_train, X_test, y_test):
if model.support is not None:
X_train = X_train.iloc[:, model.support]
X_test = X_test.iloc[:, model.support]
if hasattr(model, "best_model"):
if hasattr(model.best_model, "model"):
model = model.best_model.model
else:
model = model.best_model
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(4 * 2, 4 * 3))
i = 0
for XX, YY, name in [
[X_train, y_train, "Training data"],
[X_test, y_test, "Test data"],
]:
if hasattr(model, "predict_proba"):
probas = model.predict_proba(XX)
elif hasattr(model, "decision_function"):
probas = np.array([[x, x] for x in model.decision_function(XX)])
else:
probas = np.array([[x, x] for x in model.model.decision_function(XX)])
fpr, tpr, thresholds = roc_curve(YY, probas[:, 1])
roc_auc = auc(fpr, tpr)
precision, recall, thresholds = precision_recall_curve(YY, probas[:, 1])
area = auc(recall, precision)
matrix = confusion_matrix(model.predict(XX), YY)
TN = matrix[0][0]
FP = matrix[1][0]
FN = matrix[0][1]
TP = matrix[1][1]
data = [TP, FN, FP, TN]
axes[0][i].set_title(name)
axes[0][i].pie(
data,
counterclock=False,
startangle=90,
autopct=make_autopct(data),
labels=["TP", "FN", "FP", "TN"],
wedgeprops=dict(width=1, edgecolor="w"),
colors=["skyblue", "orange", "tan", "lime"],
)
axes[0][i].text(
1.0 - 0.5,
0.0 + 0.7,
("%.3f" % ((TN + TP) / (TN + TP + FN + FP))).lstrip("0"),
size=20,
horizontalalignment="right",
)
axes[1][i].plot([0, 1], [0, 1])
axes[1][i].plot(fpr, tpr, label="ROC curve (area = %0.2f)" % roc_auc)
axes[1][i].fill_between(fpr, tpr, alpha=0.5)
axes[1][i].set_xlim([0.0, 1.0])
axes[1][i].set_ylim([0.0, 1.0])
axes[1][i].set_xlabel("False Positive Rate")
if i == 0:
axes[1][i].set_ylabel("True Positive Rate")
axes[1][i].text(
1.0 - 0.3,
0.0 + 0.3,
("%.3f" % roc_auc).lstrip("0"),
size=20,
horizontalalignment="right",
)
axes[2][i].plot(recall, precision, label="Precision-Recall curve")
axes[2][i].fill_between(recall, precision, alpha=0.5)
axes[2][i].set_xlabel("Recall")
if i == 0:
axes[2][i].set_ylabel("Precision")
axes[2][i].set_xlim([0.0, 1.0])
axes[2][i].set_ylim([0.0, 1.0])
axes[2][i].text(
1.0 - 0.3,
0.0 + 0.3,
("%.3f" % area).lstrip("0"),
size=20,
horizontalalignment="right",
)
i += 1
plt.show()
def all_classification_metrics(objective, X_test, y_test):
fig, axes = plt.subplots(
nrows=3,
ncols=len(objective.best_models.keys()),
figsize=(4 * len(objective.best_models.keys()), 4 * 3),
)
i = 0
for name in objective.best_models.keys():
model = objective.best_models[name]
if hasattr(model.model, "predict_proba"):
probas = model.predict_proba(X_test.iloc[:, objective.support])
else:
probas = np.array(
[
[x, x]
for x in model.model.decision_function(
X_test.iloc[:, objective.support]
)
]
)
fpr, tpr, thresholds = roc_curve(y_test, probas[:, 1])
roc_auc = auc(fpr, tpr)
precision, recall, thresholds = precision_recall_curve(y_test, probas[:, 1])
area = auc(recall, precision)
matrix = confusion_matrix(
model.predict(X_test.iloc[:, objective.support]), y_test
)
TN = matrix[0][0]
FP = matrix[1][0]
FN = matrix[0][1]
TP = matrix[1][1]
data = [TP, FN, FP, TN]
axes[0][i].set_title(name)
axes[0][i].pie(
data,
counterclock=False,
startangle=90,
autopct=make_autopct(data),
labels=["TP", "FN", "FP", "TN"],
wedgeprops=dict(width=1, edgecolor="w"),
colors=["skyblue", "orange", "tan", "lime"],
)
axes[0][i].text(
1.0 - 0.5,
0.0 + 0.7,
("%.3f" % ((TN + TP) / (TN + TP + FN + FP))).lstrip("0"),
size=20,
horizontalalignment="right",
)
axes[1][i].plot([0, 1], [0, 1])
axes[1][i].plot(fpr, tpr, label="ROC curve (area = %0.2f)" % roc_auc)
axes[1][i].fill_between(fpr, tpr, alpha=0.5)
axes[1][i].set_xlim([0.0, 1.0])
axes[1][i].set_ylim([0.0, 1.0])
axes[1][i].set_xlabel("False Positive Rate")
if i == 0:
axes[1][i].set_ylabel("True Positive Rate")
axes[1][i].text(
1.0 - 0.3,
0.0 + 0.3,
("%.3f" % roc_auc).lstrip("0"),
size=20,
horizontalalignment="right",
)
axes[2][i].plot(recall, precision, label="Precision-Recall curve")
axes[2][i].fill_between(recall, precision, alpha=0.5)
axes[2][i].set_xlabel("Recall")
if i == 0:
axes[2][i].set_ylabel("Precision")
axes[2][i].set_xlim([0.0, 1.0])
axes[2][i].set_ylim([0.0, 1.0])
axes[2][i].text(
1.0 - 0.3,
0.0 + 0.3,
("%.3f" % area).lstrip("0"),
size=20,
horizontalalignment="right",
)
i += 1
plt.show()
def all_regression_metrics(objective, X_test, y_test):
fig, axes = plt.subplots(
nrows=1,
ncols=len(objective.best_models.keys()),
figsize=(4 * len(objective.best_models.keys()), 4),
)
i = 0
for name in objective.best_models.keys():
y_pred = objective.best_models[name].predict(X_test, support=objective.support)
score = r2_score(np.array(y_pred).ravel(), np.array(y_test).ravel())
axes[i].set_title(name)
axes[i].scatter(y_test, y_pred, alpha=0.5)
y_min = min(y_test.min(), y_pred.min())
y_max = min(y_test.max(), y_pred.max())
axes[i].plot([y_min, y_max], [y_min, y_max])
axes[i].text(
y_max - 0.3,
y_min + 0.3,
("%.3f" % score).lstrip("0"),
size=15,
horizontalalignment="right",
)
axes[i].set_xlabel("Real")
if i == 0:
axes[i].set_ylabel("Predicted")
i += 1
plt.show()
def all_metrics(objective, X_test, y_test):
X_test = pd.DataFrame(X_test)
if type(y_test) is not pd.core.series.Series:
y_test = pd.DataFrame(y_test)[0]
if objective.is_regressor:
all_regression_metrics(objective, X_test, y_test)
else:
all_classification_metrics(objective, X_test, y_test)
| [
"matplotlib.pyplot.grid",
"sklearn.metrics.auc",
"matplotlib.pyplot.barh",
"sklearn.metrics.precision_recall_curve",
"numpy.array",
"sklearn.metrics.roc_curve",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((384, 408), 'matplotlib.pyplot.title', 'plt.title', (['"""Best scores"""'], {}), "('Best scores')\n", (393, 408), True, 'import matplotlib.pyplot as plt\n'), ((413, 435), 'matplotlib.pyplot.barh', 'plt.barh', (['keys', 'values'], {}), '(keys, values)\n', (421, 435), True, 'import matplotlib.pyplot as plt\n'), ((440, 450), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (448, 450), True, 'import matplotlib.pyplot as plt\n'), ((455, 465), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (463, 465), True, 'import matplotlib.pyplot as plt\n'), ((517, 564), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(4)', 'figsize': '(16, 8)'}), '(nrows=1, ncols=4, figsize=(16, 8))\n', (529, 564), True, 'import matplotlib.pyplot as plt\n'), ((1747, 1757), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1755, 1757), True, 'import matplotlib.pyplot as plt\n'), ((2166, 2204), 'matplotlib.pyplot.title', 'plt.title', (['"""Feature importances in RF"""'], {}), "('Feature importances in RF')\n", (2175, 2204), True, 'import matplotlib.pyplot as plt\n'), ((2209, 2231), 'matplotlib.pyplot.barh', 'plt.barh', (['keys', 'values'], {}), '(keys, values)\n', (2217, 2231), True, 'import matplotlib.pyplot as plt\n'), ((2236, 2246), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2244, 2246), True, 'import matplotlib.pyplot as plt\n'), ((2251, 2261), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2259, 2261), True, 'import matplotlib.pyplot as plt\n'), ((2307, 2349), 'matplotlib.pyplot.title', 'plt.title', (['"""Model importances in stacking"""'], {}), "('Model importances in stacking')\n", (2316, 2349), True, 'import matplotlib.pyplot as plt\n'), ((2513, 2523), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2521, 2523), True, 'import matplotlib.pyplot as plt\n'), ((2528, 2538), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2536, 2538), True, 'import matplotlib.pyplot as plt\n'), ((2619, 2640), 'pandas.DataFrame', 'pd.DataFrame', (['X_train'], {}), '(X_train)\n', (2631, 2640), True, 'import pandas as pd\n'), ((4740, 4750), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4748, 4750), True, 'import matplotlib.pyplot as plt\n'), ((5326, 5380), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'ncols': '(2)', 'figsize': '(4 * 2, 4 * 3)'}), '(nrows=3, ncols=2, figsize=(4 * 2, 4 * 3))\n', (5338, 5380), True, 'import matplotlib.pyplot as plt\n'), ((7871, 7881), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7879, 7881), True, 'import matplotlib.pyplot as plt\n'), ((10712, 10722), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10720, 10722), True, 'import matplotlib.pyplot as plt\n'), ((11698, 11708), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11706, 11708), True, 'import matplotlib.pyplot as plt\n'), ((11768, 11788), 'pandas.DataFrame', 'pd.DataFrame', (['X_test'], {}), '(X_test)\n', (11780, 11788), True, 'import pandas as pd\n'), ((2779, 2799), 'pandas.DataFrame', 'pd.DataFrame', (['X_test'], {}), '(X_test)\n', (2791, 2799), True, 'import pandas as pd\n'), ((2844, 2864), 'pandas.DataFrame', 'pd.DataFrame', (['y_test'], {}), '(y_test)\n', (2856, 2864), True, 'import pandas as pd\n'), ((3449, 3495), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(8, 4)'}), '(nrows=1, ncols=1, figsize=(8, 4))\n', (3461, 3495), True, 'import matplotlib.pyplot as plt\n'), ((3544, 3590), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(8, 4)'}), '(nrows=1, ncols=2, figsize=(8, 4))\n', (3556, 3590), True, 'import matplotlib.pyplot as plt\n'), ((5853, 5880), 'sklearn.metrics.roc_curve', 'roc_curve', (['YY', 'probas[:, 1]'], {}), '(YY, probas[:, 1])\n', (5862, 5880), False, 'from sklearn.metrics import auc, confusion_matrix, precision_recall_curve, r2_score, roc_curve\n'), ((5899, 5912), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (5902, 5912), False, 'from sklearn.metrics import auc, confusion_matrix, precision_recall_curve, r2_score, roc_curve\n'), ((5953, 5993), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['YY', 'probas[:, 1]'], {}), '(YY, probas[:, 1])\n', (5975, 5993), False, 'from sklearn.metrics import auc, confusion_matrix, precision_recall_curve, r2_score, roc_curve\n'), ((6009, 6031), 'sklearn.metrics.auc', 'auc', (['recall', 'precision'], {}), '(recall, precision)\n', (6012, 6031), False, 'from sklearn.metrics import auc, confusion_matrix, precision_recall_curve, r2_score, roc_curve\n'), ((8629, 8660), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'probas[:, 1]'], {}), '(y_test, probas[:, 1])\n', (8638, 8660), False, 'from sklearn.metrics import auc, confusion_matrix, precision_recall_curve, r2_score, roc_curve\n'), ((8679, 8692), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (8682, 8692), False, 'from sklearn.metrics import auc, confusion_matrix, precision_recall_curve, r2_score, roc_curve\n'), ((8733, 8777), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['y_test', 'probas[:, 1]'], {}), '(y_test, probas[:, 1])\n', (8755, 8777), False, 'from sklearn.metrics import auc, confusion_matrix, precision_recall_curve, r2_score, roc_curve\n'), ((8793, 8815), 'sklearn.metrics.auc', 'auc', (['recall', 'precision'], {}), '(recall, precision)\n', (8796, 8815), False, 'from sklearn.metrics import auc, confusion_matrix, precision_recall_curve, r2_score, roc_curve\n'), ((2710, 2731), 'pandas.DataFrame', 'pd.DataFrame', (['y_train'], {}), '(y_train)\n', (2722, 2731), True, 'import pandas as pd\n'), ((11856, 11876), 'pandas.DataFrame', 'pd.DataFrame', (['y_test'], {}), '(y_test)\n', (11868, 11876), True, 'import pandas as pd\n'), ((808, 840), 'numpy.array', 'np.array', (['objective.scores[name]'], {}), '(objective.scores[name])\n', (816, 840), True, 'import numpy as np\n'), ((875, 907), 'numpy.array', 'np.array', (['objective.scores[name]'], {}), '(objective.scores[name])\n', (883, 907), True, 'import numpy as np\n'), ((943, 974), 'numpy.array', 'np.array', (['objective.times[name]'], {}), '(objective.times[name])\n', (951, 974), True, 'import numpy as np\n'), ((1010, 1041), 'numpy.array', 'np.array', (['objective.times[name]'], {}), '(objective.times[name])\n', (1018, 1041), True, 'import numpy as np\n'), ((11111, 11127), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (11119, 11127), True, 'import numpy as np\n'), ((11137, 11153), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (11145, 11153), True, 'import numpy as np\n')] |
#! /usr/bin/env python3
# Copyright(c) 2017-2018 Intel Corporation.
# License: MIT See LICENSE file in root directory.
GREEN = '\033[1;32m'
RED = '\033[1;31m'
NOCOLOR = '\033[0m'
YELLOW = '\033[1;33m'
try:
from openvino.inference_engine import IENetwork, ExecutableNetwork, IECore
import openvino.inference_engine.ie_api
except:
print(RED + '\nPlease make sure your OpenVINO environment variables are set by sourcing the' + YELLOW + ' setupvars.sh ' + RED + 'script found in <your OpenVINO install location>/bin/ folder.\n' + NOCOLOR)
exit(1)
import cv2
import numpy
import time
import sys
import threading
import os
from sys import argv
import datetime
import queue
from queue import *
INFERENCE_DEV = "MYRIAD"
sep = os.path.sep
DEFAULT_IMAGE_DIR = "." + sep + "images"
DEFAULT_MODEL_XML = "." + sep + "googlenet-v1.xml"
DEFAULT_MODEL_BIN = "." + sep + "googlenet-v1.bin"
cv_window_name = "benchmark_ncs"
# how long to wait for queues
QUEUE_WAIT_SECONDS = 10
# set some global parameters to initial values that may get overriden with arguments to the application.
inference_device = INFERENCE_DEV
image_dir = DEFAULT_IMAGE_DIR
number_of_devices = 1
number_of_inferences = 1000
run_async = True
time_threads = True
time_main = False
threads_per_dev = 3 # for each device one executable network will be created and this many threads will be
simultaneous_infer_per_thread = 6 # Each thread will start this many async inferences at at time.
# it should be at least the number of NCEs on board. The Myriad X has 2
# seem to get slightly better results more. Myriad X does well with 4
report_interval = int(number_of_inferences / 10) #report out the current FPS every this many inferences
model_xml_fullpath = DEFAULT_MODEL_XML
model_bin_fullpath = DEFAULT_MODEL_BIN
net_config = {'HW_STAGES_OPTIMIZATION': 'YES', 'COMPUTE_LAYOUT':'VPU_NCHW', 'RESHAPE_OPTIMIZATION':'NO'}
INFER_RES_QUEUE_SIZE = 6
def handle_args():
"""Reads the commandline args and adjusts initial values of globals values to match
:return: False if there was an error with the args, or True if args processed ok.
"""
global number_of_devices, number_of_inferences, model_xml_fullpath, model_bin_fullpath, run_async, \
time_threads, time_main, num_ncs_devs, threads_per_dev, simultaneous_infer_per_thread, report_interval, \
image_dir, inference_device
have_model_xml = False
have_model_bin = False
for an_arg in argv:
lower_arg = str(an_arg).lower()
if (an_arg == argv[0]):
continue
elif (lower_arg == 'help'):
return False
elif (lower_arg.startswith('num_devices=') or lower_arg.startswith("nd=")):
try:
arg, val = str(an_arg).split('=', 1)
num_dev_str = val
number_of_devices = int(num_dev_str)
if (number_of_devices < 0):
print('Error - num_devices argument invalid. It must be > 0')
return False
print('setting num_devices: ' + str(number_of_devices))
except:
print('Error - num_devices argument invalid. It must be between 1 and number of devices in system')
return False;
elif (lower_arg.startswith('device=') or lower_arg.startswith("dev=")):
try:
arg, val = str(an_arg).split('=', 1)
dev = val
inference_device = str(dev)
print("inference device:", inference_device)
if (inference_device != "MYRIAD" and inference_device != "CPU" ):
print('Error - Device must be CPU or MYRIAD')
return False
print('setting device: ' + str(inference_device))
except:
print('Error - Device must be CPU or MYRIAD')
return False;
elif (lower_arg.startswith('report_interval=') or lower_arg.startswith("ri=")):
try:
arg, val = str(an_arg).split('=', 1)
val_str = val
report_interval = int(val_str)
if (report_interval < 0):
print('Error - report_interval must be greater than or equal to 0')
return False
print('setting report_interval: ' + str(report_interval))
except:
print('Error - report_interval argument invalid. It must be greater than or equal to zero')
return False;
elif (lower_arg.startswith('num_inferences=') or lower_arg.startswith('ni=')):
try:
arg, val = str(an_arg).split('=', 1)
num_infer_str = val
number_of_inferences = int(num_infer_str)
if (number_of_inferences < 0):
print('Error - num_inferences argument invalid. It must be > 0')
return False
print('setting num_inferences: ' + str(number_of_inferences))
except:
print('Error - num_inferences argument invalid. It must be between 1 and number of devices in system')
return False;
elif (lower_arg.startswith('num_threads_per_device=') or lower_arg.startswith('ntpd=')):
try:
arg, val = str(an_arg).split('=', 1)
val_str = val
threads_per_dev = int(val_str)
if (threads_per_dev < 0):
print('Error - threads_per_dev argument invalid. It must be > 0')
return False
print('setting num_threads_per_device: ' + str(threads_per_dev))
except:
print('Error - num_threads_per_device argument invalid, it must be a positive integer.')
return False;
elif (lower_arg.startswith('num_simultaneous_inferences_per_thread=') or lower_arg.startswith('nsipt=')):
try:
arg, val = str(an_arg).split('=', 1)
val_str = val
simultaneous_infer_per_thread = int(val_str)
if (simultaneous_infer_per_thread < 0):
print('Error - simultaneous_infer_per_thread argument invalid. It must be > 0')
return False
print('setting num_simultaneous_inferences_per_thread: ' + str(simultaneous_infer_per_thread))
except:
print('Error - num_simultaneous_inferences_per_thread argument invalid, it must be a positive integer.')
return False;
elif (lower_arg.startswith('model_xml=') or lower_arg.startswith('mx=')):
try:
arg, val = str(an_arg).split('=', 1)
model_xml_fullpath = val
if not (os.path.isfile(model_xml_fullpath)):
print("Error - Model XML file passed does not exist or isn't a file")
return False
print('setting model_xml: ' + str(model_xml_fullpath))
have_model_xml = True
except:
print('Error with model_xml argument. It must be a valid model file generated by the OpenVINO Model Optimizer')
return False;
elif (lower_arg.startswith('model_bin=') or lower_arg.startswith('mb=')):
try:
arg, val = str(an_arg).split('=', 1)
model_bin_fullpath = val
if not (os.path.isfile(model_bin_fullpath)):
print("Error - Model bin file passed does not exist or isn't a file")
return False
print('setting model_bin: ' + str(model_bin_fullpath))
have_model_bin = True
except:
print('Error with model_bin argument. It must be a valid model file generated by the OpenVINO Model Optimizer')
return False;
elif (lower_arg.startswith('run_async=') or lower_arg.startswith('ra=')) :
try:
arg, val = str(an_arg).split('=', 1)
run_async = (val.lower() == 'true')
print ('setting run_async: ' + str(run_async))
except:
print("Error with run_async argument. It must be 'True' or 'False' ")
return False;
elif (lower_arg.startswith('image_dir=') or lower_arg.startswith('id=')):
try:
arg, val = str(an_arg).split('=', 1)
image_dir = val
if not (os.path.isdir(image_dir)):
print("Error - Image directory passed does not exist or isn't a directory:")
print(" passed value: " + image_dir)
return False
print('setting image_dir: ' + str(image_dir))
except:
print('Error with model_xml argument. It must be a valid model file generated by the OpenVINO Model Optimizer')
return False;
elif (lower_arg.startswith('time_threads=') or lower_arg.startswith('tt=')) :
try:
arg, val = str(an_arg).split('=', 1)
time_threads = (val.lower() == 'true')
print ('setting time_threads: ' + str(time_threads))
except:
print("Error with time_threads argument. It must be 'True' or 'False' ")
return False;
elif (lower_arg.startswith('time_main=') or lower_arg.startswith('tm=')) :
try:
arg, val = str(an_arg).split('=', 1)
time_main = (val.lower() == 'true')
print ('setting time_main: ' + str(time_main))
except:
print("Error with time_main argument. It must be 'True' or 'False' ")
return False;
if (time_main == False and time_threads == False):
print("Error - Both time_threads and time_main args were set to false. One of these must be true. ")
return False
if ((have_model_bin and not have_model_xml) or (have_model_xml and not have_model_bin)):
print("Error - only one of model_bin and model_xml were specified. You must specify both or neither.")
return False
if (run_async == False) and (simultaneous_infer_per_thread != 1):
print("Warning - If run_async is False then num_simultaneous_inferences_per_thread must be 1.")
print("Setting num_simultaneous_inferences_per_thread to 1")
simultaneous_infer_per_thread = 1
return True
def print_arg_vals():
print("")
print("--------------------------------------------------------")
print("Current date and time: " + str(datetime.datetime.now()))
print("")
print("program arguments:")
print("------------------")
print('device: ' + inference_device)
print('num_devices: ' + str(number_of_devices))
print('num_inferences: ' + str(number_of_inferences))
print('num_threads_per_device: ' + str(threads_per_dev))
print('num_simultaneous_inferences_per_thread: ' + str(simultaneous_infer_per_thread))
print('report_interval: ' + str(report_interval))
print('model_xml: ' + str(model_xml_fullpath))
print('model_bin: ' + str(model_bin_fullpath))
print('image_dir: ' + str(image_dir))
print('run_async: ' + str(run_async))
print('time_threads: ' + str(time_threads))
print('time_main: ' + str(time_main))
print("--------------------------------------------------------")
def print_usage():
print('\nusage: ')
print('python3 benchmark_ncs [help][nd=<number of devices to use>] [ni=<number of inferences per device>]')
print(' [report_interval=<num inferences between reporting>] [ntpd=<number of threads to use per device>]')
print(' [nsipt=<simultaneous inference on each thread>] [mx=<path to model xml file> mb=<path to model bin file>]')
print('')
print('options:')
print(" num_devices or nd - The number of devices to use for inferencing ")
print(" The value must be between 1 and the total number of devices in the system.")
print(" Default is to use 1 device. ")
print(" num_inferences or ni - The number of inferences to run on each device. ")
print(" Default is to run 200 inferences. ")
print(" report_interval or ri - Report the current FPS every time this many inferences are complete. To surpress reporting set to 0")
print(" Default is to report FPS ever 400 inferences. ")
print(" num_threads_per_device or ntpd - The number of threads to create that will run inferences in parallel for each device. ")
print(" Default is to create 2 threads per device. ")
print(" num_simultaneous_inferences_per_thread or nsipt - The number of inferences that each thread will create asynchronously. ")
print(" This should be at least equal to the number of NCEs on board or more.")
print(" Default is 4 simultaneous inference per thread.")
print(" model_xml or mx - Full path to the model xml file generated by the model optimizer. ")
print(" Default is " + DEFAULT_MODEL_XML)
print(" model_bin or mb - Full path to the model bin file generated by the model optimizer. ")
print(" Default is " + DEFAULT_MODEL_BIN)
print(" image_dir or id - Path to directory with images to use. ")
print(" Default is " + DEFAULT_IMAGE_DIR)
print(" run_async or ra - Set to true to run asynchronous inferences using two threads per device")
print(" Default is True ")
print(" time_main or tm - Set to true to use the time and calculate FPS from the main loop")
print(" Default is False ")
print(" time_threads or tt - Set to true to use the time and calculate FPS from the time reported from inference threads")
print(" Default is True ")
def preprocess_image(n:int, c:int, h:int, w:int, image_filename:str) :
image = cv2.imread(image_filename)
image = cv2.resize(image, (w, h))
image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW
preprocessed_image = image.reshape((n, c, h, w))
return preprocessed_image
def main():
"""Main function for the program. Everything starts here.
:return: None
"""
if (handle_args() != True):
print_usage()
exit()
print_arg_vals()
num_ncs_devs = number_of_devices
# Calculate the number of number of inferences to be made per thread
total_number_of_threads = threads_per_dev * num_ncs_devs
inferences_per_thread = int(number_of_inferences / total_number_of_threads)
# This total will be the total number of inferences to be made
inferences_per_thread = int(inferences_per_thread / simultaneous_infer_per_thread) * simultaneous_infer_per_thread
# Total number of threads that need to be spawned
total_number_threads = num_ncs_devs * threads_per_dev
# Lists and queues to hold data
infer_result_queue = queue.Queue(INFER_RES_QUEUE_SIZE)
infer_time_list = [None] * (total_number_threads)
thread_list = [None] * (total_number_threads)
# Threading barrier to sync all thread processing
start_barrier = threading.Barrier(total_number_threads + 1)
end_barrier = threading.Barrier(total_number_threads + 1)
ie = IECore()
# Create the network object
net = IENetwork(model=model_xml_fullpath, weights=model_bin_fullpath)
# Get the input and output blob names and the network input information
input_blob = next(iter(net.inputs))
output_blob = next(iter(net.outputs))
n, c, h, w = net.inputs[input_blob].shape
# Get a list of all the .mp4 files in the image directory and put them in a list
image_filename_list = os.listdir(image_dir)
image_filename_list = [image_dir + sep + i for i in image_filename_list if (i.endswith('.jpg') or i.endswith(".png"))]
if (len(image_filename_list) < 1):
# no images to show
print('No image files found (.jpg or .png)')
return 1
print("Found " + str(len(image_filename_list)) + " images.")
# Preprocess all images in the list
preprocessed_image_list = [None]*len(image_filename_list)
preprocessed_image_index = 0
for one_image_filename in image_filename_list:
one_preprocessed_image = preprocess_image(n,c,h,w,one_image_filename)
preprocessed_image_list[preprocessed_image_index] = one_preprocessed_image
preprocessed_image_index += 1
# Number of images to be inferred per thread
images_per_thread = int(len(preprocessed_image_list) / total_number_threads)
exec_net_list = [None] * num_ncs_devs
# creates an executable network for each device
for dev_index in range(0, num_ncs_devs):
exec_net_list[dev_index] = ie.load_network(network = net, num_requests = threads_per_dev * simultaneous_infer_per_thread, device_name=inference_device)
# create threads (3) for each executable network (one executable network per device)
for dev_thread_index in range(0, threads_per_dev):
# divide up the images to be processed by each thread
# device thread index starts at 0. device indexes start at 0. 3 threads per device. (0, 1, 2)
total_thread_index = dev_thread_index + (threads_per_dev * dev_index)
# Find out which index in preprocessed_image_list to start from
first_image_index = int(total_thread_index * images_per_thread)
# Find out which index in preprocessed_image_list to stop at
last_image_index = int(first_image_index + images_per_thread - 1)
if (run_async):
dev_thread_req_id = dev_thread_index * simultaneous_infer_per_thread
thread_list[total_thread_index] = threading.Thread(target=infer_async_thread_proc,
args=[net, exec_net_list[dev_index], dev_thread_req_id,
preprocessed_image_list,
first_image_index, last_image_index,
inferences_per_thread,
infer_time_list, total_thread_index,
start_barrier, end_barrier, simultaneous_infer_per_thread,
infer_result_queue, input_blob, output_blob], daemon = True)
else:
print("run_async=false not yet supported")
exit(-1)
del net
# Start the threads
try:
for one_thread in thread_list:
one_thread.start()
except (KeyboardInterrupt, SystemExit):
cleanup_stop_thread()
sys.exit()
start_barrier.wait()
# Save the main starting time
main_start_time = time.time()
interval_start_time = time.time()
print("Inferences started...")
cur_fps = 0.0
result_counter = 0
accum_fps = 0.0
frames_since_last_report = 0
total_number_inferences = total_number_threads * inferences_per_thread
# Report intermediate results
while (result_counter < total_number_inferences):
# Get the number of completed inferences
infer_res = infer_result_queue.get(True, QUEUE_WAIT_SECONDS)
infer_res_index = infer_res[0]
infer_res_probability = infer_res[1]
result_counter += 1
frames_since_last_report += 1
if (report_interval > 0):
if ((frames_since_last_report > report_interval)):
cur_time = time.time()
accum_duration = cur_time - main_start_time
cur_duration = cur_time - interval_start_time
cur_fps = frames_since_last_report / cur_duration
accum_fps = result_counter / accum_duration
print(str(result_counter) + " inferences completed. Current fps: " + '{0:.1f}'.format(accum_fps))
frames_since_last_report = 0
interval_start_time = time.time()
infer_result_queue.task_done()
# wait for all the inference threads to reach end barrier
print("Main end barrier reached")
end_barrier.wait()
# Save main end time
main_end_time = time.time()
print("Inferences finished.")
# wait for all threads to finish
for one_thread in thread_list:
one_thread.join()
total_thread_fps = 0.0
total_thread_time = 0.0
# Calculate total time and fps
for thread_index in range(0, (num_ncs_devs*threads_per_dev)):
total_thread_time += infer_time_list[thread_index]
total_thread_fps += (inferences_per_thread / infer_time_list[thread_index])
devices_count = str(number_of_devices)
if (time_threads):
print("\n------------------- Thread timing -----------------------")
print("--- Device: " + str(inference_device))
print("--- Model: " + model_xml_fullpath)
print("--- Total FPS: " + '{0:.1f}'.format(total_thread_fps))
print("--- FPS per device: " + '{0:.1f}'.format(total_thread_fps / num_ncs_devs))
print("---------------------------------------------------------")
main_time = main_end_time - main_start_time
if (time_main):
main_fps = result_counter / main_time
print ("\n------------------ Main timing -------------------------")
print ("--- FPS: " + str(main_fps))
print ("--- FPS per device: " + str(main_fps/num_ncs_devs))
print ("--------------------------------------------------------")
# Clean up
for one_exec_net in exec_net_list:
del one_exec_net
# use this thread proc to try to implement:
# 1 plugin per app
# 1 executable Network per device
# multiple threads per executable network
# multiple requests per executable network per thread
def infer_async_thread_proc(net, exec_net: ExecutableNetwork, dev_thread_request_id: int,
image_list: list,
first_image_index:int, last_image_index:int,
num_total_inferences: int, result_list: list, result_index:int,
start_barrier: threading.Barrier, end_barrier: threading.Barrier,
simultaneous_infer_per_thread:int, infer_result_queue:queue.Queue, input_blob, output_blob):
# Sync with the main start barrier
start_barrier.wait()
# Start times for the fps counter
start_time = time.time()
end_time = start_time
handle_list = [None]*simultaneous_infer_per_thread
image_index = first_image_index
image_result_start_index = 0
inferences_per_req = int(num_total_inferences/simultaneous_infer_per_thread)
# For each thread, 6 async inference requests will be created
for outer_index in range(0, inferences_per_req):
# Start the simultaneous async inferences
for infer_id in range(0, simultaneous_infer_per_thread):
new_request_id = dev_thread_request_id + infer_id
handle_list[infer_id] = exec_net.start_async(request_id = new_request_id, inputs={input_blob: image_list[image_index]})
image_index += 1
if (image_index > last_image_index):
image_index = first_image_index
# Wait for the simultaneous async inferences to finish.
for wait_index in range(0, simultaneous_infer_per_thread):
infer_status = handle_list[wait_index].wait()
result = handle_list[wait_index].outputs[output_blob]
top_index = numpy.argsort(result, axis = 1)[0, -1:][::-1]
top_index = top_index[0]
prob = result[0][top_index]
infer_result_queue.put((top_index, prob))
handle_list[wait_index] = None
# Save the time spent on inferences within this inference thread and associated reader thread
end_time = time.time()
total_inference_time = end_time - start_time
result_list[result_index] = total_inference_time
print("Thread " + str(result_index) + " end barrier reached")
# Wait for all inference threads to finish
end_barrier.wait()
# main entry point for program. we'll call main() to do what needs to be done.
if __name__ == "__main__":
sys.exit(main())
| [
"os.listdir",
"sys.exit",
"openvino.inference_engine.IENetwork",
"queue.Queue",
"numpy.argsort",
"datetime.datetime.now",
"os.path.isfile",
"openvino.inference_engine.IECore",
"os.path.isdir",
"time.time",
"threading.Thread",
"cv2.resize",
"threading.Barrier",
"cv2.imread"
] | [((14365, 14391), 'cv2.imread', 'cv2.imread', (['image_filename'], {}), '(image_filename)\n', (14375, 14391), False, 'import cv2\n'), ((14404, 14429), 'cv2.resize', 'cv2.resize', (['image', '(w, h)'], {}), '(image, (w, h))\n', (14414, 14429), False, 'import cv2\n'), ((15411, 15444), 'queue.Queue', 'queue.Queue', (['INFER_RES_QUEUE_SIZE'], {}), '(INFER_RES_QUEUE_SIZE)\n', (15422, 15444), False, 'import queue\n'), ((15628, 15671), 'threading.Barrier', 'threading.Barrier', (['(total_number_threads + 1)'], {}), '(total_number_threads + 1)\n', (15645, 15671), False, 'import threading\n'), ((15690, 15733), 'threading.Barrier', 'threading.Barrier', (['(total_number_threads + 1)'], {}), '(total_number_threads + 1)\n', (15707, 15733), False, 'import threading\n'), ((15744, 15752), 'openvino.inference_engine.IECore', 'IECore', ([], {}), '()\n', (15750, 15752), False, 'from openvino.inference_engine import IENetwork, ExecutableNetwork, IECore\n'), ((15801, 15864), 'openvino.inference_engine.IENetwork', 'IENetwork', ([], {'model': 'model_xml_fullpath', 'weights': 'model_bin_fullpath'}), '(model=model_xml_fullpath, weights=model_bin_fullpath)\n', (15810, 15864), False, 'from openvino.inference_engine import IENetwork, ExecutableNetwork, IECore\n'), ((16181, 16202), 'os.listdir', 'os.listdir', (['image_dir'], {}), '(image_dir)\n', (16191, 16202), False, 'import os\n'), ((19555, 19566), 'time.time', 'time.time', ([], {}), '()\n', (19564, 19566), False, 'import time\n'), ((19593, 19604), 'time.time', 'time.time', ([], {}), '()\n', (19602, 19604), False, 'import time\n'), ((20994, 21005), 'time.time', 'time.time', ([], {}), '()\n', (21003, 21005), False, 'import time\n'), ((23227, 23238), 'time.time', 'time.time', ([], {}), '()\n', (23236, 23238), False, 'import time\n'), ((24643, 24654), 'time.time', 'time.time', ([], {}), '()\n', (24652, 24654), False, 'import time\n'), ((19453, 19463), 'sys.exit', 'sys.exit', ([], {}), '()\n', (19461, 19463), False, 'import sys\n'), ((10805, 10828), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10826, 10828), False, 'import datetime\n'), ((18223, 18588), 'threading.Thread', 'threading.Thread', ([], {'target': 'infer_async_thread_proc', 'args': '[net, exec_net_list[dev_index], dev_thread_req_id, preprocessed_image_list,\n first_image_index, last_image_index, inferences_per_thread,\n infer_time_list, total_thread_index, start_barrier, end_barrier,\n simultaneous_infer_per_thread, infer_result_queue, input_blob, output_blob]', 'daemon': '(True)'}), '(target=infer_async_thread_proc, args=[net, exec_net_list[\n dev_index], dev_thread_req_id, preprocessed_image_list,\n first_image_index, last_image_index, inferences_per_thread,\n infer_time_list, total_thread_index, start_barrier, end_barrier,\n simultaneous_infer_per_thread, infer_result_queue, input_blob,\n output_blob], daemon=True)\n', (18239, 18588), False, 'import threading\n'), ((20311, 20322), 'time.time', 'time.time', ([], {}), '()\n', (20320, 20322), False, 'import time\n'), ((20768, 20779), 'time.time', 'time.time', ([], {}), '()\n', (20777, 20779), False, 'import time\n'), ((24306, 24335), 'numpy.argsort', 'numpy.argsort', (['result'], {'axis': '(1)'}), '(result, axis=1)\n', (24319, 24335), False, 'import numpy\n'), ((6918, 6952), 'os.path.isfile', 'os.path.isfile', (['model_xml_fullpath'], {}), '(model_xml_fullpath)\n', (6932, 6952), False, 'import os\n'), ((7584, 7618), 'os.path.isfile', 'os.path.isfile', (['model_bin_fullpath'], {}), '(model_bin_fullpath)\n', (7598, 7618), False, 'import os\n'), ((8647, 8671), 'os.path.isdir', 'os.path.isdir', (['image_dir'], {}), '(image_dir)\n', (8660, 8671), False, 'import os\n')] |
# yellowbrick.cluster.silhouette
# Implements visualizers using the silhouette metric for cluster evaluation.
#
# Author: <NAME> <<EMAIL>>
# Created: Mon Mar 27 10:09:24 2017 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: silhouette.py [57b563b] <EMAIL> $
"""
Implements visualizers that use the silhouette metric for cluster evaluation.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
import matplotlib.ticker as ticker
from ..style import resolve_colors
from .base import ClusteringScoreVisualizer
from sklearn.metrics import silhouette_score, silhouette_samples
## Packages for export
__all__ = [
"SilhouetteVisualizer"
]
##########################################################################
## Silhouette Method for K Selection
##########################################################################
class SilhouetteVisualizer(ClusteringScoreVisualizer):
"""
The Silhouette Visualizer displays the silhouette coefficient for each
sample on a per-cluster basis, visually evaluating the density and
separation between clusters. The score is calculated by averaging the
silhouette coefficient for each sample, computed as the difference
between the average intra-cluster distance and the mean nearest-cluster
distance for each sample, normalized by the maximum value. This produces a
score between -1 and +1, where scores near +1 indicate high separation
and scores near -1 indicate that the samples may have been assigned to
the wrong cluster.
In SilhouetteVisualizer plots, clusters with higher scores have wider
silhouettes, but clusters that are less cohesive will fall short of the
average score across all clusters, which is plotted as a vertical dotted
red line.
This is particularly useful for determining cluster imbalance, or for
selecting a value for K by comparing multiple visualizers.
Parameters
----------
model : a Scikit-Learn clusterer
Should be an instance of a centroidal clustering algorithm (``KMeans``
or ``MiniBatchKMeans``).
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
colors : iterable or string, default: None
A collection of colors to use for each cluster group. If there are
fewer colors than cluster groups, colors will repeat. May also be a
matplotlib colormap string.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
silhouette_score_ : float
Mean Silhouette Coefficient for all samples. Computed via scikit-learn
`sklearn.metrics.silhouette_score`.
silhouette_samples_ : array, shape = [n_samples]
Silhouette Coefficient for each samples. Computed via scikit-learn
`sklearn.metrics.silhouette_samples`.
n_samples_ : integer
Number of total samples in the dataset (X.shape[0])
n_clusters_ : integer
Number of clusters (e.g. n_clusters or k value) passed to internal
scikit-learn model.
y_tick_pos_ : array of shape (n_clusters,)
The computed center positions of each cluster on the y-axis
Examples
--------
>>> from yellowbrick.cluster import SilhouetteVisualizer
>>> from sklearn.cluster import KMeans
>>> model = SilhouetteVisualizer(KMeans(10))
>>> model.fit(X)
>>> model.poof()
"""
def __init__(self, model, ax=None, colors=None, **kwargs):
super(SilhouetteVisualizer, self).__init__(model, ax=ax, **kwargs)
# Visual Properties
# Use colors if it is given, otherwise attempt to use colormap which
# which will override colors. If neither is found, default to None.
# The colormap may yet still be found in resolve_colors
self.colors = colors
if 'colormap' in kwargs:
self.colors = kwargs['colormap']
def fit(self, X, y=None, **kwargs):
"""
Fits the model and generates the silhouette visualization.
"""
# TODO: decide to use this method or the score method to draw.
# NOTE: Probably this would be better in score, but the standard score
# is a little different and I'm not sure how it's used.
# Fit the wrapped estimator
self.estimator.fit(X, y, **kwargs)
# Get the properties of the dataset
self.n_samples_ = X.shape[0]
self.n_clusters_ = self.estimator.n_clusters
# Compute the scores of the cluster
labels = self.estimator.predict(X)
self.silhouette_score_ = silhouette_score(X, labels)
self.silhouette_samples_ = silhouette_samples(X, labels)
# Draw the silhouette figure
self.draw(labels)
# Return the estimator
return self
def draw(self, labels):
"""
Draw the silhouettes for each sample and the average score.
Parameters
----------
labels : array-like
An array with the cluster label for each silhouette sample,
usually computed with ``predict()``. Labels are not stored on the
visualizer so that the figure can be redrawn with new data.
"""
# Track the positions of the lines being drawn
y_lower = 10 # The bottom of the silhouette
# Get the colors from the various properties
color_kwargs = {'n_colors': self.n_clusters_}
if self.colors is None:
color_kwargs['colormap'] = 'Set1'
elif isinstance(self.colors, str):
color_kwargs['colormap'] = self.colors
else:
color_kwargs['colors'] = self.colors
colors = resolve_colors(**color_kwargs)
# For each cluster, plot the silhouette scores
self.y_tick_pos_ = []
for idx in range(self.n_clusters_):
# Collect silhouette scores for samples in the current cluster .
values = self.silhouette_samples_[labels == idx]
values.sort()
# Compute the size of the cluster and find upper limit
size = values.shape[0]
y_upper = y_lower + size
color = colors[idx]
self.ax.fill_betweenx(
np.arange(y_lower, y_upper), 0, values,
facecolor=color, edgecolor=color, alpha=0.5
)
# Collect the tick position for each cluster
self.y_tick_pos_.append(y_lower + 0.5 * size)
# Compute the new y_lower for next plot
y_lower = y_upper + 10
# The vertical line for average silhouette score of all the values
self.ax.axvline(
x=self.silhouette_score_, color="red", linestyle="--",
label="Average Silhouette Score"
)
return self.ax
def finalize(self):
"""
Prepare the figure for rendering by setting the title and adjusting
the limits on the axes, adding labels and a legend.
"""
# Set the title
self.set_title((
"Silhouette Plot of {} Clustering for {} Samples in {} Centers"
).format(
self.name, self.n_samples_, self.n_clusters_
))
# Set the X and Y limits
# The silhouette coefficient can range from -1, 1;
# but here we scale the plot according to our visualizations
# l_xlim and u_xlim are lower and upper limits of the x-axis,
# set according to our calculated maximum and minimum silhouette score along with necessary padding
l_xlim = max(-1, min(-0.1, round(min(self.silhouette_samples_) - 0.1, 1)))
u_xlim = min(1, round(max(self.silhouette_samples_) + 0.1, 1))
self.ax.set_xlim([l_xlim, u_xlim])
# The (n_clusters_+1)*10 is for inserting blank space between
# silhouette plots of individual clusters, to demarcate them clearly.
self.ax.set_ylim([0, self.n_samples_ + (self.n_clusters_ + 1) * 10])
# Set the x and y labels
self.ax.set_xlabel("silhouette coefficient values")
self.ax.set_ylabel("cluster label")
# Set the ticks on the axis object.
self.ax.set_yticks(self.y_tick_pos_)
self.ax.set_yticklabels(str(idx) for idx in range(self.n_clusters_))
self.ax.xaxis.set_major_locator(ticker.MultipleLocator(0.1)) # Set the ticks at multiples of 0.1
# Show legend (Average Silhouette Score axis)
self.ax.legend(loc='best')
| [
"matplotlib.ticker.MultipleLocator",
"sklearn.metrics.silhouette_samples",
"sklearn.metrics.silhouette_score",
"numpy.arange"
] | [((4916, 4943), 'sklearn.metrics.silhouette_score', 'silhouette_score', (['X', 'labels'], {}), '(X, labels)\n', (4932, 4943), False, 'from sklearn.metrics import silhouette_score, silhouette_samples\n'), ((4979, 5008), 'sklearn.metrics.silhouette_samples', 'silhouette_samples', (['X', 'labels'], {}), '(X, labels)\n', (4997, 5008), False, 'from sklearn.metrics import silhouette_score, silhouette_samples\n'), ((8627, 8654), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(0.1)'], {}), '(0.1)\n', (8649, 8654), True, 'import matplotlib.ticker as ticker\n'), ((6556, 6583), 'numpy.arange', 'np.arange', (['y_lower', 'y_upper'], {}), '(y_lower, y_upper)\n', (6565, 6583), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from ome_zarr.scale import Scaler
class TestScaler:
@pytest.fixture(
params=(
(1, 2, 1, 256, 256),
(3, 512, 512),
(256, 256),
),
ids=["5D", "3D", "2D"],
)
def shape(self, request):
return request.param
def create_data(self, shape, dtype=np.uint8, mean_val=10):
rng = np.random.default_rng(0)
return rng.poisson(mean_val, size=shape).astype(dtype)
def check_downscaled(self, downscaled, shape, scale_factor=2):
expected_shape = shape
for data in downscaled:
assert data.shape == expected_shape
expected_shape = expected_shape[:-2] + tuple(
sh // scale_factor for sh in expected_shape[-2:]
)
def test_nearest(self, shape):
data = self.create_data(shape)
scaler = Scaler()
downscaled = scaler.nearest(data)
self.check_downscaled(downscaled, shape)
# this fails because of wrong channel dimension; need to fix in follow-up PR
@pytest.mark.xfail
def test_gaussian(self, shape):
data = self.create_data(shape)
scaler = Scaler()
downscaled = scaler.gaussian(data)
self.check_downscaled(downscaled, shape)
# this fails because of wrong channel dimension; need to fix in follow-up PR
@pytest.mark.xfail
def test_laplacian(self, shape):
data = self.create_data(shape)
scaler = Scaler()
downscaled = scaler.laplacian(data)
self.check_downscaled(downscaled, shape)
def test_local_mean(self, shape):
data = self.create_data(shape)
scaler = Scaler()
downscaled = scaler.local_mean(data)
self.check_downscaled(downscaled, shape)
@pytest.mark.skip(reason="This test does not terminate")
def test_zoom(self, shape):
data = self.create_data(shape)
scaler = Scaler()
downscaled = scaler.zoom(data)
self.check_downscaled(downscaled, shape)
| [
"pytest.fixture",
"pytest.mark.skip",
"numpy.random.default_rng",
"ome_zarr.scale.Scaler"
] | [((93, 193), 'pytest.fixture', 'pytest.fixture', ([], {'params': '((1, 2, 1, 256, 256), (3, 512, 512), (256, 256))', 'ids': "['5D', '3D', '2D']"}), "(params=((1, 2, 1, 256, 256), (3, 512, 512), (256, 256)), ids\n =['5D', '3D', '2D'])\n", (107, 193), False, 'import pytest\n'), ((1794, 1849), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""This test does not terminate"""'}), "(reason='This test does not terminate')\n", (1810, 1849), False, 'import pytest\n'), ((396, 420), 'numpy.random.default_rng', 'np.random.default_rng', (['(0)'], {}), '(0)\n', (417, 420), True, 'import numpy as np\n'), ((892, 900), 'ome_zarr.scale.Scaler', 'Scaler', ([], {}), '()\n', (898, 900), False, 'from ome_zarr.scale import Scaler\n'), ((1189, 1197), 'ome_zarr.scale.Scaler', 'Scaler', ([], {}), '()\n', (1195, 1197), False, 'from ome_zarr.scale import Scaler\n'), ((1488, 1496), 'ome_zarr.scale.Scaler', 'Scaler', ([], {}), '()\n', (1494, 1496), False, 'from ome_zarr.scale import Scaler\n'), ((1685, 1693), 'ome_zarr.scale.Scaler', 'Scaler', ([], {}), '()\n', (1691, 1693), False, 'from ome_zarr.scale import Scaler\n'), ((1938, 1946), 'ome_zarr.scale.Scaler', 'Scaler', ([], {}), '()\n', (1944, 1946), False, 'from ome_zarr.scale import Scaler\n')] |
# BSD 3-Clause License
# Copyright (c) 2019, regain authors
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
import time
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
from collections import defaultdict
from functools import partial
from itertools import product
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import binom
from scipy.stats import rankdata
from sklearn.base import clone, is_classifier
from sklearn.metrics.scorer import _check_multimetric_scoring
from sklearn.model_selection import GridSearchCV, ParameterGrid, ShuffleSplit, StratifiedShuffleSplit
from sklearn.model_selection._split import check_cv
from sklearn.model_selection._validation import _aggregate_score_dicts, _fit_and_score
from sklearn.utils import deprecated
from sklearn.utils._joblib import Parallel, delayed
from sklearn.utils.fixes import MaskedArray
from sklearn.utils.validation import indexable
warnings.simplefilter("ignore")
def global_instability(estimators):
"""Computes instability of the graphs inferred from estimators.
Parameters
----------
estimators: list of fitted graphical models estimator
Each estimator contains the inferred adjacency matrix that is taken
to compute the global instability of the list of estimator.
Returns
-------
float:
Instability value.
"""
precisions = [estimator.get_precision() for estimator in estimators]
if precisions[0].ndim == 2:
n_times = 1
triu_idx = np.triu_indices_from(precisions[0], 1)
mean_connectivity = np.zeros_like(precisions[0])[triu_idx]
for c in precisions:
mean_connectivity += (c[triu_idx].copy() != 0).astype(int)
else:
# for tri dimensional matrices
n_times = precisions[0].shape[0]
triu_idx = np.triu_indices_from(precisions[0][0], 1)
mean_connectivity = np.array([np.zeros_like(precisions[0][0])[triu_idx] for i in range(n_times)])
for c in precisions:
for i in range(n_times):
mean_connectivity[i] += (c[i][triu_idx].copy() != 0).astype(int)
mean_connectivity /= len(estimators)
xi_matrix = 2 * mean_connectivity * (1 - mean_connectivity)
return np.sum(xi_matrix) / (binom(precisions[0].shape[1], 2) * n_times)
def graphlet_instability(estimators):
"""
Computes graphlet instability of the graphs inferred from estimators.
Parameters
----------
estimators: list of fitted graphical models estimator
Each estimator contains the inferred adjacency matrix that is taken
to compute the global instability of the list of estimator.
Returns
-------
float:
Graphlet instability value.
"""
from netanalytics.graphlets import GCD, graphlet_degree_vectors
import networkx as nx
n = len(estimators)
precisions = [estimator.get_precision() for estimator in estimators]
if precisions[0].ndim == 2:
gdvs = []
for p in precisions:
g = nx.from_numpy_array(p - np.diag(np.diag(p)))
gdvs.append(graphlet_degree_vectors(list(g.nodes), list(g.edges), graphlet_size=4))
distances = []
for i in range(len(gdvs)):
for j in range(i + 1, len(gdvs)):
distances.append(GCD(gdvs[i], gdvs[j])[1])
else:
n_times = precisions[0].shape[0]
gdvs = []
for p in precisions:
times = []
for t in range(n_times):
g = nx.from_numpy_array(p[t] - np.diag(np.diag(p[t])))
times.append(graphlet_degree_vectors(list(g.nodes), list(g.edges), graphlet_size=4))
gdvs.append(times)
distances = []
for i in range(len(gdvs)):
for j in range(i + 1, len(gdvs)):
distance = 0
for t in range(n_times):
distance += GCD(gdvs[i][t], gdvs[j][t])[1]
distances.append(distance / n_times)
return 2 / (n * (n - 1)) * np.sum(distances)
def upper_bound(estimators):
precisions = [estimator.get_precision() for estimator in estimators]
if precisions[0].ndim == 2:
n_times = 1
triu_idx = np.triu_indices_from(precisions[0], 1)
mean_connectivity = np.zeros_like(precisions[0])[triu_idx]
for c in precisions:
mean_connectivity += (c[triu_idx].copy() != 0).astype(int)
else:
# for tri-dimensional matrices
n_times = precisions[0].shape[0]
triu_idx = np.triu_indices_from(precisions[0][0], 1)
mean_connectivity = np.array([np.zeros_like(precisions[0][0])[triu_idx] for i in range(n_times)])
for c in precisions:
for i in range(n_times):
mean_connectivity[i] += (c[i][triu_idx].copy() != 0).astype(int)
mean_connectivity /= len(estimators)
xi_matrix = 2 * mean_connectivity * (1 - mean_connectivity)
p = precisions[0].shape[1]
theta_hat = np.sum(xi_matrix)
theta_hat = theta_hat / (p * (p - 1) / 2) if precisions[0].ndim == 2 else theta_hat / (n_times * p * (p - 1) / 2)
return 4 * theta_hat * (1 - theta_hat)
def _check_param_order(param_grid):
"""Ensure that the parameters are in descending order.
This is required for stability to be correctly computed.
"""
if hasattr(param_grid, "items"):
param_grid = [param_grid]
pg = []
for p in param_grid:
for name, v in p.items():
pg.append((name, np.sort(v)[::-1]))
return dict(pg)
class GraphicalModelStabilitySelection(GridSearchCV):
"""Stability based search over specified parameter values for an estimator.
It implements a "fit" and a "score" method.
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable, list/tuple, dict or None, default: None
Not used.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
cv : int, cross-validation generator or an iterable, optional
Not used.
refit : boolean, string, or callable, default=True
Refit an estimator using the best found parameters on the whole
dataset.
For multiple metric evaluation, this needs to be a string denoting the
scorer that would be used to find the best parameters for refitting
the estimator at the end.
Where there are considerations other than maximum score in
choosing a best estimator, ``refit`` can be set to a function which
returns the selected ``best_index_`` given ``cv_results_``. In that
case, the ``best_estimator_`` and ``best_parameters_`` will be set
according to the returned ``best_index_`` while the ``best_score_``
attribute will not be available.
The refitted estimator is made available at the ``best_estimator_``
attribute.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error. Default is ``np.nan``.
return_train_score : boolean, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
mode: string, optional default='stars'
The alternative option is gstars.
If set to stars computes only the single edge stability. If gstars is
passed it computes graphlet stability.
sampling_size: int, optional default None
The sample size to each repetition of the stability procedure. If None
value is taken as ` int(min(10*np.sqrt(X.shape[0]), X.shape[0]-10))`
"""
def __init__(
self,
estimator,
param_grid,
scoring=None,
n_jobs=None,
iid="deprecated",
refit=True,
cv="warn",
verbose=0,
pre_dispatch="2*n_jobs",
error_score="raise-deprecating",
mode="stars",
return_train_score=False,
n_repetitions=10,
sampling_size=None,
):
super(GraphicalModelStabilitySelection, self).__init__(
estimator=estimator,
scoring=scoring,
n_jobs=n_jobs,
iid=iid,
refit=refit,
cv=StratifiedShuffleSplit(train_size=sampling_size, n_splits=n_repetitions),
verbose=verbose,
pre_dispatch=pre_dispatch,
error_score=error_score,
return_train_score=return_train_score,
param_grid=param_grid,
)
self.mode = mode
self.n_repetitions = n_repetitions
self.sampling_size = sampling_size
self.param_grid = _check_param_order(param_grid)
def fit(self, X, y=None, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
if self.sampling_size is None:
self.sampling_size = int(min(10 * np.sqrt(X.shape[0]), X.shape[0] - 10))
self.cv = StratifiedShuffleSplit(train_size=self.sampling_size, n_splits=self.n_repetitions)
if y is not None:
n_classes = np.unique(y).shape[0]
if self.sampling_size % n_classes != 0:
warnings.warn("Changing sampling size, divisible for the " "number of classes.")
self.sampling_size = (self.sampling_size // n_classes) * n_classes
self.cv = StratifiedShuffleSplit(
n_splits=self.n_repetitions,
train_size=self.sampling_size,
test_size=X.shape[0] - self.sampling_size,
)
else:
y = np.ones(X.shape[0])
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
scorers, self.multimetric_ = _check_multimetric_scoring(self.estimator, scoring=self.scoring)
if self.multimetric_:
if (
self.refit is not False
and (
not isinstance(self.refit, str)
or self.refit not in scorers # This will work for both dict / list (tuple)
)
and not callable(self.refit)
):
raise ValueError(
"For multi-metric scoring, the parameter "
"refit must be set to a scorer key or a "
"callable to refit an estimator with the "
"best parameter setting on the whole "
"data and make the best_* attributes "
"available for that metric. If this is "
"not needed, refit should be set to "
"False explicitly. %r was passed." % self.refit
)
else:
refit_metric = self.refit
else:
# refit_metric = 'score'
refit_metric = "instability"
X, y, groups = indexable(X, y, groups)
n_splits = cv.get_n_splits(X, y, groups)
base_estimator = clone(self.estimator)
parallel = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=self.pre_dispatch)
fit_and_score_kwargs = dict(
scorer=scorers,
fit_params=fit_params,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True,
return_parameters=False,
error_score=self.error_score,
verbose=self.verbose,
return_estimator=True,
)
results = {}
with parallel:
all_candidate_params = []
all_out = []
def evaluate_candidates(candidate_params):
candidate_params = list(candidate_params)
n_candidates = len(candidate_params)
if self.verbose > 0:
print(
"Fitting {0} folds for each of {1} candidates,"
" totalling {2} fits".format(n_splits, n_candidates, n_candidates * n_splits)
)
out = parallel(
delayed(_fit_and_score)(
clone(base_estimator),
X,
y,
train=train,
test=test,
parameters=parameters,
**fit_and_score_kwargs
)
for parameters, (train, test) in product(candidate_params, cv.split(X, y, groups))
)
if len(out) < 1:
raise ValueError(
"No fits were performed. " "Was the CV iterator empty? " "Were there no candidates?"
)
elif len(out) != n_candidates * n_splits:
raise ValueError(
"cv.split and cv.get_n_splits returned "
"inconsistent results. Expected {} "
"splits, got {}".format(n_splits, len(out) // n_candidates)
)
all_candidate_params.extend(candidate_params)
all_out.extend(out)
nonlocal results
results = self._format_results(all_candidate_params, scorers, n_splits, all_out)
return results
self._run_search(evaluate_candidates)
# For multi-metric evaluation, store the best_index_, best_params_ and
# best_score_ iff refit is one of the scorer names
# In single metric evaluation, refit_metric is "score"
if self.refit or not self.multimetric_:
# If callable, refit is expected to return the index of the best
# parameter set.
if callable(self.refit):
self.best_index_ = self.refit(results)
if not isinstance(self.best_index_, (int, np.integer)):
raise TypeError("best_index_ returned is not an integer")
if self.best_index_ < 0 or self.best_index_ >= len(results["params"]):
raise IndexError("best_index_ index out of range")
else:
self.best_index_ = results["rank_test_%s" % refit_metric].argmin()
print(self.best_index_)
self.best_score_ = results["mean_test_%s" % refit_metric][self.best_index_]
self.best_params_ = results["params"][self.best_index_]
if self.refit:
self.best_estimator_ = clone(base_estimator).set_params(**self.best_params_)
refit_start_time = time.time()
if y is not None:
self.best_estimator_.fit(X, y, **fit_params)
else:
self.best_estimator_.fit(X, **fit_params)
refit_end_time = time.time()
self.refit_time_ = refit_end_time - refit_start_time
# Store the only scorer not as a dict for single metric evaluation
self.scorer_ = scorers if self.multimetric_ else scorers["score"]
self.cv_results_ = results
self.n_splits_ = n_splits
return self
def _format_results(self, candidate_params, scorers, n_splits, out):
n_candidates = len(candidate_params)
# if one choose to see train score, "out" will contain train score info
if self.return_train_score:
(train_score_dicts, test_score_dicts, test_sample_counts, fit_time, score_time, estimators) = zip(*out)
else:
(test_score_dicts, test_sample_counts, fit_time, score_time, estimators) = zip(*out)
# test_score_dicts and train_score dicts are lists of dictionaries and
# we make them into dict of lists
test_scores = _aggregate_score_dicts(test_score_dicts)
if self.return_train_score:
train_scores = _aggregate_score_dicts(train_score_dicts)
results = {}
def _store(key_name, array, weights=None, splits=False, rank=False):
"""A small helper to store the scores/times to the cv_results_"""
# When iterated first by splits, then by parameters
# We want `array` to have `n_candidates` rows and `n_splits` cols.
array = np.array(array, dtype=np.float64).reshape(n_candidates, n_splits)
if splits:
for split_i in range(n_splits):
# Uses closure to alter the results
results["split%d_%s" % (split_i, key_name)] = array[:, split_i]
array_means = np.average(array, axis=1, weights=weights)
results["mean_%s" % key_name] = array_means
# Weighted std is not directly available in numpy
array_stds = np.sqrt(np.average((array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights))
results["std_%s" % key_name] = array_stds
if rank:
results["rank_%s" % key_name] = np.asarray(rankdata(-array_means, method="min"), dtype=np.int32)
_store("fit_time", fit_time)
_store("score_time", score_time)
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(
partial(
MaskedArray,
np.empty(
n_candidates,
),
mask=True,
dtype=object,
)
)
for cand_i, params in enumerate(candidate_params):
for name, value in params.items():
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurrence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
# Store a list of param dicts at the key 'params'
results["params"] = candidate_params
# NOTE test_sample counts (weights) remain the same for all candidates
test_sample_counts = np.array(test_sample_counts[:n_splits], dtype=np.int)
if self.iid != "deprecated":
warnings.warn(
"The parameter 'iid' is deprecated in 0.22 and will be " "removed in 0.24.", DeprecationWarning
)
iid = self.iid
else:
iid = False
for scorer_name in scorers.keys():
# Computed the (weighted) mean and std for test scores alone
_store(
"test_%s" % scorer_name,
test_scores[scorer_name],
splits=True,
rank=True,
weights=test_sample_counts if iid else None,
)
if self.return_train_score:
_store("train_%s" % scorer_name, train_scores[scorer_name], splits=True)
estimators = np.asarray(estimators).reshape(n_candidates, n_splits)
array_means = np.array([global_instability(e_split) for e_split in estimators])
# monotonize instabilities - require ordered parameters,
# from high sparsity to low
monotonized_instabilities = [array_means[0]] + [np.max(array_means[:i]) for i in range(1, array_means.size)]
monotonized_instabilities = np.array(monotonized_instabilities)
self.monotonized_instabilities = np.copy(monotonized_instabilities)
if self.mode.lower() == "gstars":
graphlets_stability = np.array([graphlet_instability(e_split) for e_split in estimators])
self.graphlets_instabilities = np.copy(graphlets_stability)
upper_bounds = np.array([upper_bound(e_split) for e_split in estimators])
upper_bounds = [upper_bounds[0]] + [np.max(upper_bounds[:i]) for i in range(1, upper_bounds.size)]
self.upper_bounds = np.array(upper_bounds)
lb = np.where(np.array(monotonized_instabilities) <= 0.05)[0]
ub = np.where(np.array(upper_bounds) <= 0.05)[0]
lb = lb[-1] if lb.size != 0 else len(monotonized_instabilities)
ub = ub[-1] if ub.size != 0 else 0
self.lower_bound = lb
self.upper_bound = ub
graphlets_stability[0:ub] = np.inf
graphlets_stability[lb + 1 :] = np.inf
key_name = "test_instability"
results["raw_%s" % key_name] = array_means
results["mean_%s" % key_name] = monotonized_instabilities
results["rank_%s" % key_name] = np.asarray(rankdata(graphlets_stability, method="min"), dtype=np.int32)
else:
# discard high values
monotonized_instabilities[monotonized_instabilities > 0.05] = -np.inf
key_name = "test_instability"
results["raw_%s" % key_name] = array_means
results["mean_%s" % key_name] = monotonized_instabilities
results["rank_%s" % key_name] = np.asarray(
rankdata(-monotonized_instabilities, method="min"), dtype=np.int32
)
self.results = results
return results
def plot(self, axis=None, figsize=(15, 10), filename="", fontsize=15):
"""
Function that plots the instability curves obtained on data.
"""
matplotlib.rcParams.update({"font.size": fontsize})
if self.mode.lower() == "gstars":
if axis is None:
fig, axis = plt.subplots(2, figsize=figsize)
axis[0].plot(self.monotonized_instabilities, label="Instabilities")
axis[0].plot(np.array(self.upper_bounds), label="Upper bound instabilities")
axis[0].axhline(0.05, color="red")
axis[0].axvline(self.lower_bound, color="violet", label="Lower bound")
axis[0].axvline(self.upper_bound, color="green", label="Upper bound")
axis[0].grid()
axis[0].legend()
axis[0].set_xticks(np.arange(len(self.monotonized_instabilities)))
axis[0].set_xticklabels(self.results["params"])
axis[1].plot(self.graphlets_instabilities, label="Graphlet instabilities")
axis[1].axvline(self.lower_bound, color="violet")
axis[1].axvline(self.upper_bound, color="green")
axis[1].grid()
axis[1].legend()
axis[1].set_xticks(np.arange(len(self.monotonized_instabilities)))
axis[1].set_xticklabels(self.results["params"])
for tick in axis[0].get_xticklabels():
tick.set_rotation(90)
for tick in axis[1].get_xticklabels():
tick.set_rotation(90)
plt.tight_layout()
if filename != "":
plt.savefig(filename, dpi=300, bbox_inches="tight", transparent=True)
plt.show()
else:
if axis is None:
fig, axis = plt.subplots(figsize=figsize)
axis.set_title("Monotonized instabilities")
axis.plot(self.monotonized_instabilities)
axis.axhline(0.05, color="red")
axis.set_xticks(np.arange(len(self.monotonized_instabilities)))
axis.set_xticklabels(self.results["params"])
for tick in axis.get_xticklabels():
tick.set_rotation(90)
if filename != "":
plt.savefig(filename, dpi=300, bbox_inches="tight", transparent=True)
plt.show()
| [
"sklearn.model_selection.StratifiedShuffleSplit",
"numpy.sqrt",
"sklearn.model_selection._validation._aggregate_score_dicts",
"numpy.array",
"netanalytics.graphlets.GCD",
"sklearn.utils._joblib.delayed",
"sklearn.metrics.scorer._check_multimetric_scoring",
"sklearn.base.is_classifier",
"numpy.sort",... | [((2350, 2381), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (2371, 2381), False, 'import warnings\n'), ((6401, 6418), 'numpy.sum', 'np.sum', (['xi_matrix'], {}), '(xi_matrix)\n', (6407, 6418), True, 'import numpy as np\n'), ((2940, 2978), 'numpy.triu_indices_from', 'np.triu_indices_from', (['precisions[0]', '(1)'], {}), '(precisions[0], 1)\n', (2960, 2978), True, 'import numpy as np\n'), ((3255, 3296), 'numpy.triu_indices_from', 'np.triu_indices_from', (['precisions[0][0]', '(1)'], {}), '(precisions[0][0], 1)\n', (3275, 3296), True, 'import numpy as np\n'), ((3667, 3684), 'numpy.sum', 'np.sum', (['xi_matrix'], {}), '(xi_matrix)\n', (3673, 3684), True, 'import numpy as np\n'), ((5445, 5462), 'numpy.sum', 'np.sum', (['distances'], {}), '(distances)\n', (5451, 5462), True, 'import numpy as np\n'), ((5638, 5676), 'numpy.triu_indices_from', 'np.triu_indices_from', (['precisions[0]', '(1)'], {}), '(precisions[0], 1)\n', (5658, 5676), True, 'import numpy as np\n'), ((5953, 5994), 'numpy.triu_indices_from', 'np.triu_indices_from', (['precisions[0][0]', '(1)'], {}), '(precisions[0][0], 1)\n', (5973, 5994), True, 'import numpy as np\n'), ((13199, 13263), 'sklearn.metrics.scorer._check_multimetric_scoring', '_check_multimetric_scoring', (['self.estimator'], {'scoring': 'self.scoring'}), '(self.estimator, scoring=self.scoring)\n', (13225, 13263), False, 'from sklearn.metrics.scorer import _check_multimetric_scoring\n'), ((14321, 14344), 'sklearn.utils.validation.indexable', 'indexable', (['X', 'y', 'groups'], {}), '(X, y, groups)\n', (14330, 14344), False, 'from sklearn.utils.validation import indexable\n'), ((14419, 14440), 'sklearn.base.clone', 'clone', (['self.estimator'], {}), '(self.estimator)\n', (14424, 14440), False, 'from sklearn.base import clone, is_classifier\n'), ((14461, 14548), 'sklearn.utils._joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs', 'verbose': 'self.verbose', 'pre_dispatch': 'self.pre_dispatch'}), '(n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=self.\n pre_dispatch)\n', (14469, 14548), False, 'from sklearn.utils._joblib import Parallel, delayed\n'), ((19130, 19170), 'sklearn.model_selection._validation._aggregate_score_dicts', '_aggregate_score_dicts', (['test_score_dicts'], {}), '(test_score_dicts)\n', (19152, 19170), False, 'from sklearn.model_selection._validation import _aggregate_score_dicts, _fit_and_score\n'), ((21539, 21592), 'numpy.array', 'np.array', (['test_sample_counts[:n_splits]'], {'dtype': 'np.int'}), '(test_sample_counts[:n_splits], dtype=np.int)\n', (21547, 21592), True, 'import numpy as np\n'), ((22750, 22785), 'numpy.array', 'np.array', (['monotonized_instabilities'], {}), '(monotonized_instabilities)\n', (22758, 22785), True, 'import numpy as np\n'), ((22827, 22861), 'numpy.copy', 'np.copy', (['monotonized_instabilities'], {}), '(monotonized_instabilities)\n', (22834, 22861), True, 'import numpy as np\n'), ((24721, 24772), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': fontsize}"], {}), "({'font.size': fontsize})\n", (24747, 24772), False, 'import matplotlib\n'), ((3007, 3035), 'numpy.zeros_like', 'np.zeros_like', (['precisions[0]'], {}), '(precisions[0])\n', (3020, 3035), True, 'import numpy as np\n'), ((3688, 3720), 'scipy.special.binom', 'binom', (['precisions[0].shape[1]', '(2)'], {}), '(precisions[0].shape[1], 2)\n', (3693, 3720), False, 'from scipy.special import binom\n'), ((5705, 5733), 'numpy.zeros_like', 'np.zeros_like', (['precisions[0]'], {}), '(precisions[0])\n', (5718, 5733), True, 'import numpy as np\n'), ((12422, 12509), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'train_size': 'self.sampling_size', 'n_splits': 'self.n_repetitions'}), '(train_size=self.sampling_size, n_splits=self.\n n_repetitions)\n', (12444, 12509), False, 'from sklearn.model_selection import GridSearchCV, ParameterGrid, ShuffleSplit, StratifiedShuffleSplit\n'), ((13070, 13089), 'numpy.ones', 'np.ones', (['X.shape[0]'], {}), '(X.shape[0])\n', (13077, 13089), True, 'import numpy as np\n'), ((17997, 18008), 'time.time', 'time.time', ([], {}), '()\n', (18006, 18008), False, 'import time\n'), ((18205, 18216), 'time.time', 'time.time', ([], {}), '()\n', (18214, 18216), False, 'import time\n'), ((19234, 19275), 'sklearn.model_selection._validation._aggregate_score_dicts', '_aggregate_score_dicts', (['train_score_dicts'], {}), '(train_score_dicts)\n', (19256, 19275), False, 'from sklearn.model_selection._validation import _aggregate_score_dicts, _fit_and_score\n'), ((19921, 19963), 'numpy.average', 'np.average', (['array'], {'axis': '(1)', 'weights': 'weights'}), '(array, axis=1, weights=weights)\n', (19931, 19963), True, 'import numpy as np\n'), ((21643, 21759), 'warnings.warn', 'warnings.warn', (['"""The parameter \'iid\' is deprecated in 0.22 and will be removed in 0.24."""', 'DeprecationWarning'], {}), '(\n "The parameter \'iid\' is deprecated in 0.22 and will be removed in 0.24.",\n DeprecationWarning)\n', (21656, 21759), False, 'import warnings\n'), ((23050, 23078), 'numpy.copy', 'np.copy', (['graphlets_stability'], {}), '(graphlets_stability)\n', (23057, 23078), True, 'import numpy as np\n'), ((23309, 23331), 'numpy.array', 'np.array', (['upper_bounds'], {}), '(upper_bounds)\n', (23317, 23331), True, 'import numpy as np\n'), ((26077, 26095), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (26093, 26095), True, 'import matplotlib.pyplot as plt\n'), ((26225, 26235), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26233, 26235), True, 'import matplotlib.pyplot as plt\n'), ((26839, 26849), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26847, 26849), True, 'import matplotlib.pyplot as plt\n'), ((10984, 11056), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'train_size': 'sampling_size', 'n_splits': 'n_repetitions'}), '(train_size=sampling_size, n_splits=n_repetitions)\n', (11006, 11056), False, 'from sklearn.model_selection import GridSearchCV, ParameterGrid, ShuffleSplit, StratifiedShuffleSplit\n'), ((12645, 12722), 'warnings.warn', 'warnings.warn', (['"""Changing sampling size, divisible for the number of classes."""'], {}), "('Changing sampling size, divisible for the number of classes.')\n", (12658, 12722), False, 'import warnings\n'), ((12835, 12965), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': 'self.n_repetitions', 'train_size': 'self.sampling_size', 'test_size': '(X.shape[0] - self.sampling_size)'}), '(n_splits=self.n_repetitions, train_size=self.\n sampling_size, test_size=X.shape[0] - self.sampling_size)\n', (12857, 12965), False, 'from sklearn.model_selection import GridSearchCV, ParameterGrid, ShuffleSplit, StratifiedShuffleSplit\n'), ((13135, 13159), 'sklearn.base.is_classifier', 'is_classifier', (['estimator'], {}), '(estimator)\n', (13148, 13159), False, 'from sklearn.base import clone, is_classifier\n'), ((20115, 20193), 'numpy.average', 'np.average', (['((array - array_means[:, np.newaxis]) ** 2)'], {'axis': '(1)', 'weights': 'weights'}), '((array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights)\n', (20125, 20193), True, 'import numpy as np\n'), ((20759, 20781), 'numpy.empty', 'np.empty', (['n_candidates'], {}), '(n_candidates)\n', (20767, 20781), True, 'import numpy as np\n'), ((22351, 22373), 'numpy.asarray', 'np.asarray', (['estimators'], {}), '(estimators)\n', (22361, 22373), True, 'import numpy as np\n'), ((22653, 22676), 'numpy.max', 'np.max', (['array_means[:i]'], {}), '(array_means[:i])\n', (22659, 22676), True, 'import numpy as np\n'), ((23979, 24022), 'scipy.stats.rankdata', 'rankdata', (['graphlets_stability'], {'method': '"""min"""'}), "(graphlets_stability, method='min')\n", (23987, 24022), False, 'from scipy.stats import rankdata\n'), ((24409, 24459), 'scipy.stats.rankdata', 'rankdata', (['(-monotonized_instabilities)'], {'method': '"""min"""'}), "(-monotonized_instabilities, method='min')\n", (24417, 24459), False, 'from scipy.stats import rankdata\n'), ((24872, 24904), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'figsize': 'figsize'}), '(2, figsize=figsize)\n', (24884, 24904), True, 'import matplotlib.pyplot as plt\n'), ((25010, 25037), 'numpy.array', 'np.array', (['self.upper_bounds'], {}), '(self.upper_bounds)\n', (25018, 25037), True, 'import numpy as np\n'), ((26143, 26212), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'dpi': '(300)', 'bbox_inches': '"""tight"""', 'transparent': '(True)'}), "(filename, dpi=300, bbox_inches='tight', transparent=True)\n", (26154, 26212), True, 'import matplotlib.pyplot as plt\n'), ((26307, 26336), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (26319, 26336), True, 'import matplotlib.pyplot as plt\n'), ((26757, 26826), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'dpi': '(300)', 'bbox_inches': '"""tight"""', 'transparent': '(True)'}), "(filename, dpi=300, bbox_inches='tight', transparent=True)\n", (26768, 26826), True, 'import matplotlib.pyplot as plt\n'), ((3335, 3366), 'numpy.zeros_like', 'np.zeros_like', (['precisions[0][0]'], {}), '(precisions[0][0])\n', (3348, 3366), True, 'import numpy as np\n'), ((6033, 6064), 'numpy.zeros_like', 'np.zeros_like', (['precisions[0][0]'], {}), '(precisions[0][0])\n', (6046, 6064), True, 'import numpy as np\n'), ((12555, 12567), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (12564, 12567), True, 'import numpy as np\n'), ((17912, 17933), 'sklearn.base.clone', 'clone', (['base_estimator'], {}), '(base_estimator)\n', (17917, 17933), False, 'from sklearn.base import clone, is_classifier\n'), ((19617, 19650), 'numpy.array', 'np.array', (['array'], {'dtype': 'np.float64'}), '(array, dtype=np.float64)\n', (19625, 19650), True, 'import numpy as np\n'), ((20330, 20366), 'scipy.stats.rankdata', 'rankdata', (['(-array_means)'], {'method': '"""min"""'}), "(-array_means, method='min')\n", (20338, 20366), False, 'from scipy.stats import rankdata\n'), ((23214, 23238), 'numpy.max', 'np.max', (['upper_bounds[:i]'], {}), '(upper_bounds[:i])\n', (23220, 23238), True, 'import numpy as np\n'), ((4490, 4500), 'numpy.diag', 'np.diag', (['p'], {}), '(p)\n', (4497, 4500), True, 'import numpy as np\n'), ((4736, 4757), 'netanalytics.graphlets.GCD', 'GCD', (['gdvs[i]', 'gdvs[j]'], {}), '(gdvs[i], gdvs[j])\n', (4739, 4757), False, 'from netanalytics.graphlets import GCD, graphlet_degree_vectors\n'), ((5330, 5357), 'netanalytics.graphlets.GCD', 'GCD', (['gdvs[i][t]', 'gdvs[j][t]'], {}), '(gdvs[i][t], gdvs[j][t])\n', (5333, 5357), False, 'from netanalytics.graphlets import GCD, graphlet_degree_vectors\n'), ((6919, 6929), 'numpy.sort', 'np.sort', (['v'], {}), '(v)\n', (6926, 6929), True, 'import numpy as np\n'), ((12361, 12380), 'numpy.sqrt', 'np.sqrt', (['X.shape[0]'], {}), '(X.shape[0])\n', (12368, 12380), True, 'import numpy as np\n'), ((23358, 23393), 'numpy.array', 'np.array', (['monotonized_instabilities'], {}), '(monotonized_instabilities)\n', (23366, 23393), True, 'import numpy as np\n'), ((23432, 23454), 'numpy.array', 'np.array', (['upper_bounds'], {}), '(upper_bounds)\n', (23440, 23454), True, 'import numpy as np\n'), ((4975, 4988), 'numpy.diag', 'np.diag', (['p[t]'], {}), '(p[t])\n', (4982, 4988), True, 'import numpy as np\n'), ((15518, 15541), 'sklearn.utils._joblib.delayed', 'delayed', (['_fit_and_score'], {}), '(_fit_and_score)\n', (15525, 15541), False, 'from sklearn.utils._joblib import Parallel, delayed\n'), ((15567, 15588), 'sklearn.base.clone', 'clone', (['base_estimator'], {}), '(base_estimator)\n', (15572, 15588), False, 'from sklearn.base import clone, is_classifier\n')] |
#!/usr/bin/env python
# -*- noplot -*-
"""
This example demonstrates how to set a hyperlinks on various kinds of elements.
This currently only works with the SVG backend.
"""
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
f = plt.figure()
s = plt.scatter([1,2,3],[4,5,6])
s.set_urls(['http://www.bbc.co.uk/news','http://www.google.com',None])
f.canvas.print_figure('scatter.svg')
f = plt.figure()
delta = 0.025
x = y = np.arange(-3.0, 3.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
Z = Z2-Z1 # difference of Gaussians
im = plt.imshow(Z, interpolation='bilinear', cmap=cm.gray,
origin='lower', extent=[-3,3,-3,3])
im.set_url('http://www.google.com')
f.canvas.print_figure('image.svg')
| [
"matplotlib.pyplot.imshow",
"matplotlib.mlab.bivariate_normal",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"numpy.meshgrid",
"numpy.arange"
] | [((292, 304), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (302, 304), True, 'import matplotlib.pyplot as plt\n'), ((309, 342), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[1, 2, 3]', '[4, 5, 6]'], {}), '([1, 2, 3], [4, 5, 6])\n', (320, 342), True, 'import matplotlib.pyplot as plt\n'), ((451, 463), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (461, 463), True, 'import matplotlib.pyplot as plt\n'), ((486, 513), 'numpy.arange', 'np.arange', (['(-3.0)', '(3.0)', 'delta'], {}), '(-3.0, 3.0, delta)\n', (495, 513), True, 'import numpy as np\n'), ((521, 538), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (532, 538), True, 'import numpy as np\n'), ((544, 591), 'matplotlib.mlab.bivariate_normal', 'mlab.bivariate_normal', (['X', 'Y', '(1.0)', '(1.0)', '(0.0)', '(0.0)'], {}), '(X, Y, 1.0, 1.0, 0.0, 0.0)\n', (565, 591), True, 'import matplotlib.mlab as mlab\n'), ((597, 640), 'matplotlib.mlab.bivariate_normal', 'mlab.bivariate_normal', (['X', 'Y', '(1.5)', '(0.5)', '(1)', '(1)'], {}), '(X, Y, 1.5, 0.5, 1, 1)\n', (618, 640), True, 'import matplotlib.mlab as mlab\n'), ((684, 780), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Z'], {'interpolation': '"""bilinear"""', 'cmap': 'cm.gray', 'origin': '"""lower"""', 'extent': '[-3, 3, -3, 3]'}), "(Z, interpolation='bilinear', cmap=cm.gray, origin='lower',\n extent=[-3, 3, -3, 3])\n", (694, 780), True, 'import matplotlib.pyplot as plt\n')] |
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
#intensity of green
blue_lower=np.array([100,150,0])
blue_upper=np.array([140,255,255])
kernel_open=np.ones((5,5))
kernel_close=np.ones((15,15))
while True:
ret,photo= cap.read()
img=cv2.resize(photo,(340,220))
# convert image to HSv
imgHsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
mask=cv2.inRange(imgHsv,blue_lower,blue_upper)
#morphology
#kernel opening removes small white patches (noise)
maskO=cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernel_open)
#kernel close removes small black patches (noise)
maskC=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernel_close)
conts,h=cv2.findContours(maskC,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img,conts,-1,(0,0,255),4)
for i in range(len(conts)):
x,y,w,h=cv2.boundingRect(conts[i]) #Retreives boundary
cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)
cv2.putText(img,str(i+1),(x,y+h),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), lineType=cv2.LINE_AA)
cv2.imshow("close",maskC)
cv2.imshow("open",maskO)
cv2.imshow("HSV",mask)
cv2.imshow("Normal",img)
if (cv2.waitKey(1)==13):
break
cap.release()
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"cv2.drawContours",
"numpy.ones",
"cv2.inRange",
"cv2.imshow",
"numpy.array",
"cv2.morphologyEx",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.findContours",
"cv2.resize",
"cv2.waitKey",
"cv2.boundingRect"
] | [((37, 56), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (53, 56), False, 'import cv2\n'), ((88, 111), 'numpy.array', 'np.array', (['[100, 150, 0]'], {}), '([100, 150, 0])\n', (96, 111), True, 'import numpy as np\n'), ((121, 146), 'numpy.array', 'np.array', (['[140, 255, 255]'], {}), '([140, 255, 255])\n', (129, 146), True, 'import numpy as np\n'), ((157, 172), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (164, 172), True, 'import numpy as np\n'), ((185, 202), 'numpy.ones', 'np.ones', (['(15, 15)'], {}), '((15, 15))\n', (192, 202), True, 'import numpy as np\n'), ((1204, 1227), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1225, 1227), False, 'import cv2\n'), ((249, 278), 'cv2.resize', 'cv2.resize', (['photo', '(340, 220)'], {}), '(photo, (340, 220))\n', (259, 278), False, 'import cv2\n'), ((315, 351), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (327, 351), False, 'import cv2\n'), ((360, 403), 'cv2.inRange', 'cv2.inRange', (['imgHsv', 'blue_lower', 'blue_upper'], {}), '(imgHsv, blue_lower, blue_upper)\n', (371, 403), False, 'import cv2\n'), ((485, 536), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_OPEN', 'kernel_open'], {}), '(mask, cv2.MORPH_OPEN, kernel_open)\n', (501, 536), False, 'import cv2\n'), ((600, 653), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_CLOSE', 'kernel_close'], {}), '(mask, cv2.MORPH_CLOSE, kernel_close)\n', (616, 653), False, 'import cv2\n'), ((664, 727), 'cv2.findContours', 'cv2.findContours', (['maskC', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(maskC, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (680, 727), False, 'import cv2\n'), ((730, 778), 'cv2.drawContours', 'cv2.drawContours', (['img', 'conts', '(-1)', '(0, 0, 255)', '(4)'], {}), '(img, conts, -1, (0, 0, 255), 4)\n', (746, 778), False, 'import cv2\n'), ((1036, 1062), 'cv2.imshow', 'cv2.imshow', (['"""close"""', 'maskC'], {}), "('close', maskC)\n", (1046, 1062), False, 'import cv2\n'), ((1066, 1091), 'cv2.imshow', 'cv2.imshow', (['"""open"""', 'maskO'], {}), "('open', maskO)\n", (1076, 1091), False, 'import cv2\n'), ((1095, 1118), 'cv2.imshow', 'cv2.imshow', (['"""HSV"""', 'mask'], {}), "('HSV', mask)\n", (1105, 1118), False, 'import cv2\n'), ((1122, 1147), 'cv2.imshow', 'cv2.imshow', (['"""Normal"""', 'img'], {}), "('Normal', img)\n", (1132, 1147), False, 'import cv2\n'), ((821, 847), 'cv2.boundingRect', 'cv2.boundingRect', (['conts[i]'], {}), '(conts[i])\n', (837, 847), False, 'import cv2\n'), ((878, 936), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (0, 0, 255), 2)\n', (891, 936), False, 'import cv2\n'), ((1155, 1169), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1166, 1169), False, 'import cv2\n')] |
import numpy as np
import pandas as pd
class Simulation:
''' Simulate a process of randomly selecting one of N coins, flipping the
selected coin a certain number of times, and then repeating this a few times
'''
def __init__(self,
n_sequences=10,
n_reps_per_sequence=7,
p=(0.1, 0.8),
seed=0):
np.random.seed(seed)
self.n_sequences = n_sequences
self.n_reps_per_sequence = n_reps_per_sequence
self.p = p
self.n_coins = len(p)
def choose_coin(self):
return np.random.choice(range(self.n_coins), 1)[0]
def one_sequence(self):
which_coin = self.choose_coin()
prob = self.p[which_coin]
return {
'values': np.random.binomial(1, prob, size=self.n_reps_per_sequence),
'true_p': prob
}
def run(self):
data = [self.one_sequence() for k in range(self.n_sequences)]
df = pd.DataFrame(np.array([d['values'] for d in data]))
df.columns = ['flip_' + str(k) for k in range(self.n_reps_per_sequence)]
df['true_p'] = [d['true_p'] for d in data]
return df
class EM:
'''
Problem: Given a sequence of sequences of coin flips and given integer K>1, model the data as
a multinomial choice of coin followed by a sequence of flips for the chosen coin.
Thus we are estimating two length-K vectors of parameters; the bernoulli P(heads) for each
coin and the multinomial probabilities of drawing each coin.
z_i is a length-n_coin vector of one-hot assignment to a coin
Proceed iteratively by
-- Fix Q_ik = p(z_ik | x_i, theta) = p(z_ik, x_i | theta) / p(x_i | theta)
-- Fix theta as the optimizer of sum_i sum_ik Q_ik log p(z_ik, x_i | theta)
'''
def __init__(self, X, n_coins=2, p_diff_tol=0.0001, max_iter=100):
self.seq_n = X.shape[1]
self.X_full = X
# Assuming independence between flips within-sequence, we only need the simpler sufficient stats:
self.X = X.sum(axis=1)
self.Xc = X.shape[1] - self.X
self.K = n_coins
self.p_diff_tol = p_diff_tol
self.max_iter = max_iter
def _as_vec_theta(self, theta):
return np.concatenate((theta['bernoulli_p'], theta['multinomial_p']))
def _as_dict_theta(self, theta):
return {
'bernoulli_p': theta[:self.K],
'multinomial_p': theta[self.K:]
}
def random_start(self):
self.theta = {
'bernoulli_p': np.random.uniform(size=self.K),
'multinomial_p': np.ones(self.K) / self.K
}
self.theta_vec = self._as_vec_theta(self.theta)
def _lph(self, p):
''' Log probability of the observed heads for a given p '''
return self.X * np.log(p)
def _lpt(self, p):
''' Log probability of the observed tails for a given p '''
return self.Xc * np.log(1-p)
def log_pX_given_z(self):
''' Log p(x | z, theta) '''
return np.array([
self._lph(p) + self._lpt(p) for p in self.theta['bernoulli_p']
]).transpose()
def log_pX_and_z(self, logPx_given_z):
''' Log p(x,z | theta)
'''
log_p_z = np.ones((len(self.X), len(self.theta['multinomial_p'])))
log_p_z *= np.log(self.theta['multinomial_p'])
return logPx_given_z + log_p_z
def E_step(self):
''' Compute the matrix Q_ik with nrow(X) rows and K columns '''
logPx_given_z = self.log_pX_given_z()
logPxz = self.log_pX_and_z(logPx_given_z)
# Sum over levels of z to get p(x) marginally:
logPx = np.log(np.exp(logPxz).sum(axis=1))
logPx_mat = np.repeat(logPx.reshape((len(logPx), 1)), self.K, axis = 1)
self.Q = np.exp(logPxz - logPx_mat)
def maximize_step(self):
''' Select the maximum-likelihood assignments assuming self.p is correct
In general this requires solving the system of first-order conditions, but here
I've just kind of guessed at the formula for the optimum
'''
xp = self.X/self.seq_n
p_hat = (xp @ self.Q)/self.Q.sum(axis=0)
q_hat = self.Q.sum(axis=0)/self.Q.sum()
return np.concatenate((p_hat, q_hat))
def run(self):
self.random_start()
iter = 0
step_size = np.inf
while (step_size > self.p_diff_tol) and (iter < self.max_iter):
iter += 1
self.E_step()
new_theta_vec = self.maximize_step()
diff = self.theta_vec - new_theta_vec
step_size = np.sqrt(np.sum(diff ** 2))
self.theta_vec = new_theta_vec
self.theta = self._as_dict_theta(new_theta_vec)
self.iter=iter
def fitted_bernoulli_p(self, mode=False):
'''
Based on the fitted model, return the estimated bernoulli P(heads) behind each
data sequence.
:param mode:
- Default `False` means compute this as sum_z p_z_hat P(z |x, theta), the
probability of heads, averaged over the coins according to the posterior distribution
over coins for each sequence.
- If `True`, instead return simply p_z_hat, the probability of heads for coin
that maximizing P(z |x, theta)
'''
if mode:
return [self.theta['bernoulli_p'][q.argmax()] for q in self.Q]
else:
return self.Q @ self.theta['bernoulli_p']
| [
"numpy.ones",
"numpy.log",
"numpy.exp",
"numpy.array",
"numpy.sum",
"numpy.random.seed",
"numpy.concatenate",
"numpy.random.uniform",
"numpy.random.binomial"
] | [((387, 407), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (401, 407), True, 'import numpy as np\n'), ((2258, 2320), 'numpy.concatenate', 'np.concatenate', (["(theta['bernoulli_p'], theta['multinomial_p'])"], {}), "((theta['bernoulli_p'], theta['multinomial_p']))\n", (2272, 2320), True, 'import numpy as np\n'), ((3335, 3370), 'numpy.log', 'np.log', (["self.theta['multinomial_p']"], {}), "(self.theta['multinomial_p'])\n", (3341, 3370), True, 'import numpy as np\n'), ((3804, 3830), 'numpy.exp', 'np.exp', (['(logPxz - logPx_mat)'], {}), '(logPxz - logPx_mat)\n', (3810, 3830), True, 'import numpy as np\n'), ((4251, 4281), 'numpy.concatenate', 'np.concatenate', (['(p_hat, q_hat)'], {}), '((p_hat, q_hat))\n', (4265, 4281), True, 'import numpy as np\n'), ((780, 838), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'prob'], {'size': 'self.n_reps_per_sequence'}), '(1, prob, size=self.n_reps_per_sequence)\n', (798, 838), True, 'import numpy as np\n'), ((993, 1030), 'numpy.array', 'np.array', (["[d['values'] for d in data]"], {}), "([d['values'] for d in data])\n", (1001, 1030), True, 'import numpy as np\n'), ((2552, 2582), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'self.K'}), '(size=self.K)\n', (2569, 2582), True, 'import numpy as np\n'), ((2820, 2829), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (2826, 2829), True, 'import numpy as np\n'), ((2947, 2960), 'numpy.log', 'np.log', (['(1 - p)'], {}), '(1 - p)\n', (2953, 2960), True, 'import numpy as np\n'), ((2613, 2628), 'numpy.ones', 'np.ones', (['self.K'], {}), '(self.K)\n', (2620, 2628), True, 'import numpy as np\n'), ((4625, 4642), 'numpy.sum', 'np.sum', (['(diff ** 2)'], {}), '(diff ** 2)\n', (4631, 4642), True, 'import numpy as np\n'), ((3679, 3693), 'numpy.exp', 'np.exp', (['logPxz'], {}), '(logPxz)\n', (3685, 3693), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# ase.py
# Created by <NAME> on 2017-09-12.
# Email: <EMAIL>
# Copyright (c) 2017. All rights reserved.
import numpy as np
from typing import Sequence, TypeVar, Union, Dict
import networkx
import os
from scipy.stats import norm
from scipy.stats import rankdata
from sklearn.decomposition import TruncatedSVD
from d3m.primitive_interfaces.transformer import TransformerPrimitiveBase
from d3m import utils, container
from d3m.metadata import hyperparams, base as metadata_module, params
from d3m.primitive_interfaces import base
from d3m.primitive_interfaces.base import CallResult
from graspy.embed import AdjacencySpectralEmbed as graspyASE
from graspy.embed import OmnibusEmbed as graspyOMNI
from graspy.utils import pass_to_ranks as graspyPTR
Inputs = container.List
Outputs = container.List
class Params(params.Params):
pass
class Hyperparams(hyperparams.Hyperparams):
max_dimension = hyperparams.Bounded[int](
default=2,
semantic_types= [
'https://metadata.datadrivendiscovery.org/types/TuningParameter'
],
lower = 1,
upper = None
)
which_elbow = hyperparams.Bounded[int](
default = 1,
semantic_types= [
'https://metadata.datadrivendiscovery.org/types/TuningParameter'
],
lower = 1,
upper = 2
)
use_attributes = hyperparams.Hyperparameter[bool](
default = False,
semantic_types = [
'https://metadata.datadrivendiscovery.org/types/TuningParameter'
])
class AdjacencySpectralEmbedding(TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]):
"""
Spectral-based trasformation of weighted or unweighted adjacency matrix.
"""
# This should contain only metadata which cannot be automatically determined from the code.
metadata = metadata_module.PrimitiveMetadata({
# Simply an UUID generated once and fixed forever. Generated using "uuid.uuid4()".
'id': 'b940ccbd-9e9b-3166-af50-210bfd79251b',
'version': "0.1.0",
'name': "jhu.ase",
# Keywords do not have a controlled vocabulary. Authors can put here whatever they find suitable.
'keywords': ['ase primitive', 'graph', 'spectral', 'embedding', 'spectral method', 'adjacency', 'matrix'],
'source': {
'name': "JHU",
'uris': [
# Unstructured URIs. Link to file and link to repo in this case.
'https://github.com/neurodata/primitives-interfaces/blob/master/jhu_primitives/ase/ase.py',
'https://github.com/neurodata/primitives-interfaces',
],
'contact': 'mailto:<EMAIL>'
},
'description': 'Spectral-based trasformation of weighted or unweighted adjacency matrix',
'hyperparams_configuration': {
'max_dimension': 'The maximum dimension that can be used for eigendecomposition',
'which_elbow': 'The scree plot "elbow" to use for dimensionality reduction. High values leads to more dimensions selected.',
'use_attributes': 'Boolean which indicates whether to use the attributes of the nodes.'
},
# A list of dependencies in order. These can be Python packages, system packages, or Docker images.
# Of course Python packages can also have their own dependencies, but sometimes it is necessary to
# install a Python package first to be even able to run setup.py of another package. Or you have
# a dependency which is not on PyPi.
'installation': [
{
'type': 'UBUNTU',
'package': 'libxml2-dev',
'version': '2.9.4'
},
{
'type': 'UBUNTU',
'package': 'libpcre3-dev',
'version': '2.9.4'
},
{
'type': 'PIP',
'package_uri': 'git+https://github.com/neurodata/primitives-interfaces.git@{git_commit}#egg=jhu_primitives'.format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
}],
# URIs at which one can obtain code for the primitive, if available.
# 'location_uris': [
# 'https://gitlab.com/datadrivendiscovery/tests-data/raw/{git_commit}/primitives/test_primitives/monomial.py'.format(
# git_commit=utils.current_git_commit(os.path.dirname(__file__)),
# ),
# ],
# The same path the primitive is registered with entry points in setup.py.
'python_path': 'd3m.primitives.data_transformation.adjacency_spectral_embedding.JHU',
# Choose these from a controlled vocabulary in the schema. If anything is missing which would
# best describe the primitive, make a merge request.
'algorithm_types': [
"SINGULAR_VALUE_DECOMPOSITION"
],
'primitive_family': "DATA_TRANSFORMATION",
'preconditions': ['NO_MISSING_VALUES']
})
def __init__(self, *, hyperparams: Hyperparams, random_seed: int = 0, docker_containers: Dict[str, base.DockerContainer] = None) -> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed, docker_containers=docker_containers)
def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:
np.random.seed(1234)
G = inputs[0].copy()
try:
link_predicton = inputs[3]
if type(link_predicton) is not bool:
link_predicton = False
except:
link_predicton = False
if link_predicton:
g = np.array(G.copy())
else:
g = graspyPTR(G)
n = g.shape[0]
max_dimension = self.hyperparams['max_dimension']
if max_dimension > n:
max_dimension = n
n_elbows = self.hyperparams['which_elbow']
if self.hyperparams['use_attributes']:
adj = [g]
MORE_ATTR = True
attr_number = 1
while MORE_ATTR:
attr = 'attr'
temp_attr = np.array(list(networkx.get_node_attributes(G, 'attr' + str(attr_number)).values()))
if len(temp_attr) == 0:
MORE_ATTR = False
else:
K = np.sum((temp_attr[:, np.newaxis][:, np.newaxis, :] - temp_attr[:, np.newaxis][np.newaxis, :, :])**2, axis = -1)
adj.append(graspyPTR(K))
attr_number += 1
M = len(adj)
if M > 1:
omni_object = graspyOMNI(n_components = max_dimension, n_elbows = n_elbows)
X_hats = omni_object.fit_transform(adj)
X_hat = np.mean(X_hats, axis = 0)
embedding = X_hat.copy()
inputs[0] = container.ndarray(embedding)
return base.CallResult(inputs)
ase_object = graspyASE(n_components=max_dimension, n_elbows = n_elbows)
X_hat = ase_object.fit_transform(g)
inputs[0] = container.ndarray(X_hat)
return base.CallResult(inputs)
| [
"graspy.utils.pass_to_ranks",
"d3m.primitive_interfaces.base.CallResult",
"numpy.mean",
"d3m.container.ndarray",
"graspy.embed.AdjacencySpectralEmbed",
"graspy.embed.OmnibusEmbed",
"numpy.sum",
"os.path.dirname",
"numpy.random.seed"
] | [((5340, 5360), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (5354, 5360), True, 'import numpy as np\n'), ((6930, 6986), 'graspy.embed.AdjacencySpectralEmbed', 'graspyASE', ([], {'n_components': 'max_dimension', 'n_elbows': 'n_elbows'}), '(n_components=max_dimension, n_elbows=n_elbows)\n', (6939, 6986), True, 'from graspy.embed import AdjacencySpectralEmbed as graspyASE\n'), ((7054, 7078), 'd3m.container.ndarray', 'container.ndarray', (['X_hat'], {}), '(X_hat)\n', (7071, 7078), False, 'from d3m import utils, container\n'), ((7095, 7118), 'd3m.primitive_interfaces.base.CallResult', 'base.CallResult', (['inputs'], {}), '(inputs)\n', (7110, 7118), False, 'from d3m.primitive_interfaces import base\n'), ((5676, 5688), 'graspy.utils.pass_to_ranks', 'graspyPTR', (['G'], {}), '(G)\n', (5685, 5688), True, 'from graspy.utils import pass_to_ranks as graspyPTR\n'), ((6592, 6649), 'graspy.embed.OmnibusEmbed', 'graspyOMNI', ([], {'n_components': 'max_dimension', 'n_elbows': 'n_elbows'}), '(n_components=max_dimension, n_elbows=n_elbows)\n', (6602, 6649), True, 'from graspy.embed import OmnibusEmbed as graspyOMNI\n'), ((6734, 6757), 'numpy.mean', 'np.mean', (['X_hats'], {'axis': '(0)'}), '(X_hats, axis=0)\n', (6741, 6757), True, 'import numpy as np\n'), ((6831, 6859), 'd3m.container.ndarray', 'container.ndarray', (['embedding'], {}), '(embedding)\n', (6848, 6859), False, 'from d3m import utils, container\n'), ((6884, 6907), 'd3m.primitive_interfaces.base.CallResult', 'base.CallResult', (['inputs'], {}), '(inputs)\n', (6899, 6907), False, 'from d3m.primitive_interfaces import base\n'), ((6308, 6424), 'numpy.sum', 'np.sum', (['((temp_attr[:, np.newaxis][:, np.newaxis, :] - temp_attr[:, np.newaxis][np.\n newaxis, :, :]) ** 2)'], {'axis': '(-1)'}), '((temp_attr[:, np.newaxis][:, np.newaxis, :] - temp_attr[:, np.\n newaxis][np.newaxis, :, :]) ** 2, axis=-1)\n', (6314, 6424), True, 'import numpy as np\n'), ((6451, 6463), 'graspy.utils.pass_to_ranks', 'graspyPTR', (['K'], {}), '(K)\n', (6460, 6463), True, 'from graspy.utils import pass_to_ranks as graspyPTR\n'), ((4032, 4057), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4047, 4057), False, 'import os\n')] |
"""This module defines the DataSeries class, the elementary data structure of ixdat
An ixdat DataSeries is a wrapper around a numpy array containing the metadata needed
to combine it with other DataSeries. Typically this means a reference to the time
variable corresponding to the rows of the array. The time variable itself is a special
case, TimeSeries, which must know its absolute (unix) timestamp.
"""
import numpy as np
from .db import Saveable
from .units import Unit
from .exceptions import TimeError, AxisError
class DataSeries(Saveable):
"""The base class for all numerical data representation in ixdat.
These class's objects are saved and loaded as rows in the data_series table
"""
table_name = "data_series"
column_attrs = {
"name",
"unit_name",
"data",
}
def __init__(self, name, unit_name, data):
"""initialize a data series with its name, unit, and data (id handled by parent)
Args:
name (str): The name of the data series
unit_name (str): The name of the unit in which the data is stored
data (np.array): The numerical data
"""
super().__init__()
self.name = name
self.unit = Unit(unit_name)
self._data = data
@classmethod
def from_dict(cls, obj_as_dict):
"""Return the right type of DataSeries based on the info in its serialization"""
if "tstamp" in obj_as_dict:
return TimeSeries(**obj_as_dict)
elif "t_ids" in obj_as_dict:
return ValueSeries(**obj_as_dict)
elif "a_ids" in obj_as_dict:
return Field(**obj_as_dict)
elif "value" in obj_as_dict:
return ConstantValue(**obj_as_dict)
return cls(**obj_as_dict)
def __repr__(self):
return f"{self.__class__.__name__}(id={self.id}, name='{self.name}')"
@property
def data(self):
"""The data as a np.array, loaded the first time it is needed."""
if self._data is None:
self._data = self.load_data() # inherited from Saveable.
return self._data
@property
def unit_name(self):
"""The name of the data series' unit"""
return self.unit.name
@property
def shape(self):
return self.data.shape
@property
def size(self):
return self.data.size
class TimeSeries(DataSeries):
"""Class to store time data. These are characterized by having a tstamp"""
extra_column_attrs = {"tstamps": {"tstamp"}}
def __init__(self, name, unit_name, data, tstamp):
"""Initiate a TimeSeries with name, unit_name, data, and a tstamp (float)
Args (in addition to those of parent):
tstamp (float): The unix timestamp of the time at which t=0 in the data
"""
super().__init__(name, unit_name, data)
self.tstamp = tstamp
@property
def t(self):
return self.data
@property
def tseries(self):
"""Trivially, a TimeSeries is its own TimeSeries"""
return self
class ValueSeries(DataSeries):
"""Class to store scalar values that are measured over time.
Characterized by a reference to the corresponding time series. This reference is
represented in relational databases as a row in an auxiliary linker table
"""
extra_linkers = {"value_time": ("data_series", "t_ids")}
def __init__(self, name, unit_name, data, t_id=None, t_ids=None, tseries=None):
"""Initiate a ValueSeries with a TimeSeries or a reference thereto
Args (in addition to those of parent):
t_id (int): The id of the corresponding TimeSeries, if not given directly
t_ids (list of int): [t_id], only so that a backend can pass t_id as a list
tseries (TimeSeries): The corresponding TimeSeries, if available
"""
super().__init__(name, unit_name, data)
self._tseries = tseries
# TODO: This could probably be handled more nicely with PlaceHolderObjects
# see: Measurement and
# https://github.com/ixdat/ixdat/pull/1#discussion_r551518461
if t_ids and not t_id:
t_id = t_ids[0]
self._t_id = t_id
if tseries and t_id:
if not t_id == tseries.id:
raise TimeError(f"{self} initiated with non-matching t_id and tseries")
if tseries is None and t_id is None:
raise TimeError(f"{self} initiated without t_id or tseries.")
@property
def t_id(self):
"""int: the id of the TimeSeries"""
if self._tseries:
return self._tseries.id
return self._t_id
@property
def t_ids(self):
"""list: the id of the TimeSeries, in a list for consistent linker table def."""
return [self.t_id]
@property
def tseries(self):
"""The TimeSeries describing when the data in the ValueSeries was recorded"""
if not self._tseries:
self._tseries = TimeSeries.get(i=self.t_id)
self._t_id = None # to avoid any confusion of two t_id's
return self._tseries
@property
def v(self):
"""The value as a 1-d np array"""
return self.data
@property
def t(self):
"""The measurement times as a 1-d np array"""
return self.tseries.data
@property
def tstamp(self):
"""The timestamp, from the TimeSeries of the ValueSeries"""
return self.tseries.tstamp
class Field(DataSeries):
"""Class for storing multi-dimensional data spanning 'axes'
Characterized by a list of references to these axes, which are themselves also
DataSeries. This is represented in the extra linkers.
"""
extra_linkers = {"field_axes": ("data_series", "a_ids")}
def __init__(self, name, unit_name, data, a_ids=None, axes_series=None):
"""Initiate the Field and check that the supplied axes make sense.
Args (in addition to those of parent):
a_ids (list of int): The ids of the corresponding axes DataSeries, if not
the series are not given directly as `axes_series`
axes_series (list of DataSeries): The DataSeries describing the axes which
the field's data spans, if available
"""
super().__init__(name, unit_name, data)
N = len(a_ids) if a_ids is not None else len(axes_series)
self.N_dimensions = N
self._a_ids = a_ids if a_ids is not None else ([None] * N)
# TODO: This could probably be handled more nicely with PlaceHolderObjects
# see: Measurement and
# https://github.com/ixdat/ixdat/pull/1#discussion_r551518461
self._axes_series = axes_series if axes_series is not None else ([None] * N)
self._check_axes() # raises an AxisError if something's wrong
def get_axis_id(self, axis_number):
"""Return the id of the `axis_number`'th axis of the data"""
if self._axes_series[axis_number]:
return self._axes_series[axis_number].id
return self._a_ids[axis_number]
def get_axis_series(self, axis_number):
"""Return the DataSeries of the `axis_number`'th axis of the data"""
if not self._axes_series[axis_number]:
self._axes_series[axis_number] = DataSeries.get(i=self._a_ids[axis_number])
# And so as not have two id's for the axis_number'th axis:
self._a_ids[axis_number] = None
return self._axes_series[axis_number]
@property
def a_ids(self):
"""List of the id's of the axes spanned by the field"""
return [self.get_axis_id(n) for n in range(self.N_dimensions)]
@property
def axes_series(self):
"""List of the DataSeries defining the axes spanned by the field"""
return [self.get_axis_series(n) for n in range(self.N_dimensions)]
def _check_axes(self):
"""Check that there are no contradictions in the Field's axes_series and id's"""
N = self.N_dimensions
if len(self._a_ids) != N:
raise AxisError(
f"{self} is {N}-D but initiated with {len(self._a_ids)} axis id's"
)
if len(self._axes_series) != N:
raise AxisError(
f"{self} is {N}-D but initiated with {len(self._axes_series)} axes"
)
for n, (a_id, axis_series) in enumerate(zip(self._a_ids, self._axes_series)):
if a_id is not None and axis_series is not None and a_id != axis_series.id:
raise AxisError(
f"{self} initiated with contradicting id's for {n}'th axis"
)
elif a_id is None and axis_series is None:
raise AxisError(
f"{self} has no axis id for series or id for its {n}'th axis"
)
@property
def data(self):
"""When loading data, Field checks that its dimensions match its # of axes"""
if self._data is None:
self._data = self.load_data()
if len(self._data.shape) != self.N_dimensions:
raise AxisError(
f"{self} has {self.N_dimensions} axes but its data is "
f"{len(self._data.shape)}-dimensional."
)
return self._data
@property
def tstamp(self):
for s in self.axes_series:
if isinstance(s, (ValueSeries, TimeSeries)):
return s.tstamp
class ConstantValue(DataSeries):
"""This is a stand-in for a VSeries for when we know the value is constant"""
extra_column_attrs = {"constants": {"value"}}
def __init__(self, name, unit_name, data=None, value=None):
super().__init__(name=name, unit_name=unit_name, data=np.array([]))
if not np.array(value).size == 1:
raise AxisError(
f"Can't initiate {self} with data={self.value}. Data must have size 1."
)
self.value = value
def get_vseries(self, tseries):
data = self.value * np.ones(tseries.data.shape)
return ValueSeries(
name=self.name, unit_name=self.unit_name, data=data, tseries=tseries
)
| [
"numpy.array",
"numpy.ones"
] | [((10044, 10071), 'numpy.ones', 'np.ones', (['tseries.data.shape'], {}), '(tseries.data.shape)\n', (10051, 10071), True, 'import numpy as np\n'), ((9765, 9777), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9773, 9777), True, 'import numpy as np\n'), ((9794, 9809), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (9802, 9809), True, 'import numpy as np\n')] |
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import assert_raises
from pyriemann.classification import (MDM, FgMDM, KNearestNeighbor,
TSclassifier)
def generate_cov(Nt, Ne):
"""Generate a set of cavariances matrices for test purpose."""
rs = np.random.RandomState(1234)
diags = 2.0 + 0.1 * rs.randn(Nt, Ne)
A = 2*rs.rand(Ne, Ne) - 1
A /= np.atleast_2d(np.sqrt(np.sum(A**2, 1))).T
covmats = np.empty((Nt, Ne, Ne))
for i in range(Nt):
covmats[i] = np.dot(np.dot(A, np.diag(diags[i])), A.T)
return covmats
def test_MDM_init():
"""Test init of MDM"""
MDM(metric='riemann')
# Should raise if metric not string or dict
assert_raises(TypeError, MDM, metric=42)
# Should raise if metric is not contain bad keys
assert_raises(KeyError, MDM, metric={'universe': 42})
# should works with correct dict
MDM(metric={'mean': 'riemann', 'distance': 'logeuclid'})
def test_MDM_fit():
"""Test Fit of MDM"""
covset = generate_cov(100, 3)
labels = np.array([0, 1]).repeat(50)
mdm = MDM(metric='riemann')
mdm.fit(covset, labels)
def test_MDM_predict():
"""Test prediction of MDM"""
covset = generate_cov(100, 3)
labels = np.array([0, 1]).repeat(50)
mdm = MDM(metric='riemann')
mdm.fit(covset, labels)
mdm.predict(covset)
# test fit_predict
mdm = MDM(metric='riemann')
mdm.fit_predict(covset, labels)
# test transform
mdm.transform(covset)
# predict proba
mdm.predict_proba(covset)
# test n_jobs
mdm = MDM(metric='riemann', n_jobs=2)
mdm.fit(covset, labels)
mdm.predict(covset)
def test_KNN():
"""Test KNearestNeighbor"""
covset = generate_cov(30, 3)
labels = np.array([0, 1, 2]).repeat(10)
knn = KNearestNeighbor(1, metric='riemann')
knn.fit(covset, labels)
preds = knn.predict(covset)
assert_array_equal(labels, preds)
def test_TSclassifier():
"""Test TS Classifier"""
covset = generate_cov(40, 3)
labels = np.array([0, 1]).repeat(20)
assert_raises(TypeError, TSclassifier, clf='666')
clf = TSclassifier()
clf.fit(covset, labels)
assert_array_equal(clf.classes_, np.array([0, 1]))
clf.predict(covset)
clf.predict_proba(covset)
def test_FgMDM_init():
"""Test init of FgMDM"""
FgMDM(metric='riemann')
# Should raise if metric not string or dict
assert_raises(TypeError, FgMDM, metric=42)
# Should raise if metric is not contain bad keys
assert_raises(KeyError, FgMDM, metric={'universe': 42})
# should works with correct dict
FgMDM(metric={'mean': 'riemann', 'distance': 'logeuclid'})
def test_FgMDM_predict():
"""Test prediction of FgMDM"""
covset = generate_cov(100, 3)
labels = np.array([0, 1]).repeat(50)
fgmdm = FgMDM(metric='riemann')
fgmdm.fit(covset, labels)
fgmdm.predict(covset)
fgmdm.transform(covset)
| [
"pyriemann.classification.KNearestNeighbor",
"pyriemann.classification.FgMDM",
"numpy.testing.assert_array_equal",
"numpy.diag",
"nose.tools.assert_raises",
"numpy.array",
"numpy.sum",
"pyriemann.classification.TSclassifier",
"numpy.empty",
"numpy.random.RandomState",
"pyriemann.classification.M... | [((325, 352), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (346, 352), True, 'import numpy as np\n'), ((489, 511), 'numpy.empty', 'np.empty', (['(Nt, Ne, Ne)'], {}), '((Nt, Ne, Ne))\n', (497, 511), True, 'import numpy as np\n'), ((672, 693), 'pyriemann.classification.MDM', 'MDM', ([], {'metric': '"""riemann"""'}), "(metric='riemann')\n", (675, 693), False, 'from pyriemann.classification import MDM, FgMDM, KNearestNeighbor, TSclassifier\n'), ((747, 787), 'nose.tools.assert_raises', 'assert_raises', (['TypeError', 'MDM'], {'metric': '(42)'}), '(TypeError, MDM, metric=42)\n', (760, 787), False, 'from nose.tools import assert_raises\n'), ((846, 899), 'nose.tools.assert_raises', 'assert_raises', (['KeyError', 'MDM'], {'metric': "{'universe': 42}"}), "(KeyError, MDM, metric={'universe': 42})\n", (859, 899), False, 'from nose.tools import assert_raises\n'), ((942, 998), 'pyriemann.classification.MDM', 'MDM', ([], {'metric': "{'mean': 'riemann', 'distance': 'logeuclid'}"}), "(metric={'mean': 'riemann', 'distance': 'logeuclid'})\n", (945, 998), False, 'from pyriemann.classification import MDM, FgMDM, KNearestNeighbor, TSclassifier\n'), ((1132, 1153), 'pyriemann.classification.MDM', 'MDM', ([], {'metric': '"""riemann"""'}), "(metric='riemann')\n", (1135, 1153), False, 'from pyriemann.classification import MDM, FgMDM, KNearestNeighbor, TSclassifier\n'), ((1326, 1347), 'pyriemann.classification.MDM', 'MDM', ([], {'metric': '"""riemann"""'}), "(metric='riemann')\n", (1329, 1347), False, 'from pyriemann.classification import MDM, FgMDM, KNearestNeighbor, TSclassifier\n'), ((1434, 1455), 'pyriemann.classification.MDM', 'MDM', ([], {'metric': '"""riemann"""'}), "(metric='riemann')\n", (1437, 1455), False, 'from pyriemann.classification import MDM, FgMDM, KNearestNeighbor, TSclassifier\n'), ((1620, 1651), 'pyriemann.classification.MDM', 'MDM', ([], {'metric': '"""riemann"""', 'n_jobs': '(2)'}), "(metric='riemann', n_jobs=2)\n", (1623, 1651), False, 'from pyriemann.classification import MDM, FgMDM, KNearestNeighbor, TSclassifier\n'), ((1842, 1879), 'pyriemann.classification.KNearestNeighbor', 'KNearestNeighbor', (['(1)'], {'metric': '"""riemann"""'}), "(1, metric='riemann')\n", (1858, 1879), False, 'from pyriemann.classification import MDM, FgMDM, KNearestNeighbor, TSclassifier\n'), ((1944, 1977), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['labels', 'preds'], {}), '(labels, preds)\n', (1962, 1977), False, 'from numpy.testing import assert_array_equal\n'), ((2113, 2162), 'nose.tools.assert_raises', 'assert_raises', (['TypeError', 'TSclassifier'], {'clf': '"""666"""'}), "(TypeError, TSclassifier, clf='666')\n", (2126, 2162), False, 'from nose.tools import assert_raises\n'), ((2173, 2187), 'pyriemann.classification.TSclassifier', 'TSclassifier', ([], {}), '()\n', (2185, 2187), False, 'from pyriemann.classification import MDM, FgMDM, KNearestNeighbor, TSclassifier\n'), ((2383, 2406), 'pyriemann.classification.FgMDM', 'FgMDM', ([], {'metric': '"""riemann"""'}), "(metric='riemann')\n", (2388, 2406), False, 'from pyriemann.classification import MDM, FgMDM, KNearestNeighbor, TSclassifier\n'), ((2460, 2502), 'nose.tools.assert_raises', 'assert_raises', (['TypeError', 'FgMDM'], {'metric': '(42)'}), '(TypeError, FgMDM, metric=42)\n', (2473, 2502), False, 'from nose.tools import assert_raises\n'), ((2561, 2616), 'nose.tools.assert_raises', 'assert_raises', (['KeyError', 'FgMDM'], {'metric': "{'universe': 42}"}), "(KeyError, FgMDM, metric={'universe': 42})\n", (2574, 2616), False, 'from nose.tools import assert_raises\n'), ((2659, 2717), 'pyriemann.classification.FgMDM', 'FgMDM', ([], {'metric': "{'mean': 'riemann', 'distance': 'logeuclid'}"}), "(metric={'mean': 'riemann', 'distance': 'logeuclid'})\n", (2664, 2717), False, 'from pyriemann.classification import MDM, FgMDM, KNearestNeighbor, TSclassifier\n'), ((2868, 2891), 'pyriemann.classification.FgMDM', 'FgMDM', ([], {'metric': '"""riemann"""'}), "(metric='riemann')\n", (2873, 2891), False, 'from pyriemann.classification import MDM, FgMDM, KNearestNeighbor, TSclassifier\n'), ((2253, 2269), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (2261, 2269), True, 'import numpy as np\n'), ((1094, 1110), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1102, 1110), True, 'import numpy as np\n'), ((1288, 1304), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1296, 1304), True, 'import numpy as np\n'), ((1800, 1819), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (1808, 1819), True, 'import numpy as np\n'), ((2080, 2096), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (2088, 2096), True, 'import numpy as np\n'), ((2828, 2844), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (2836, 2844), True, 'import numpy as np\n'), ((455, 472), 'numpy.sum', 'np.sum', (['(A ** 2)', '(1)'], {}), '(A ** 2, 1)\n', (461, 472), True, 'import numpy as np\n'), ((574, 591), 'numpy.diag', 'np.diag', (['diags[i]'], {}), '(diags[i])\n', (581, 591), True, 'import numpy as np\n')] |
import cv2
import sys, os, glob, re
import json
from os.path import join, dirname, abspath, realpath, isdir
from os import makedirs
import numpy as np
from shutil import rmtree
from ipdb import set_trace
from .bench_utils.bbox_helper import rect_2_cxy_wh, cxy_wh_2_rect
def center_error(rects1, rects2):
"""Center error.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
"""
centers1 = rects1[..., :2] + (rects1[..., 2:] - 1) / 2
centers2 = rects2[..., :2] + (rects2[..., 2:] - 1) / 2
errors = np.sqrt(np.sum(np.power(centers1 - centers2, 2), axis=-1))
return errors
def _intersection(rects1, rects2):
r"""Rectangle intersection.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
"""
assert rects1.shape == rects2.shape
x1 = np.maximum(rects1[..., 0], rects2[..., 0])
y1 = np.maximum(rects1[..., 1], rects2[..., 1])
x2 = np.minimum(rects1[..., 0] + rects1[..., 2],
rects2[..., 0] + rects2[..., 2])
y2 = np.minimum(rects1[..., 1] + rects1[..., 3],
rects2[..., 1] + rects2[..., 3])
w = np.maximum(x2 - x1, 0)
h = np.maximum(y2 - y1, 0)
return np.stack([x1, y1, w, h]).T
def rect_iou(rects1, rects2, bound=None):
r"""Intersection over union.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
bound (numpy.ndarray): A 4 dimensional array, denotes the bound
(min_left, min_top, max_width, max_height) for ``rects1`` and ``rects2``.
"""
assert rects1.shape == rects2.shape
if bound is not None:
# bounded rects1
rects1[:, 0] = np.clip(rects1[:, 0], 0, bound[0])
rects1[:, 1] = np.clip(rects1[:, 1], 0, bound[1])
rects1[:, 2] = np.clip(rects1[:, 2], 0, bound[0] - rects1[:, 0])
rects1[:, 3] = np.clip(rects1[:, 3], 0, bound[1] - rects1[:, 1])
# bounded rects2
rects2[:, 0] = np.clip(rects2[:, 0], 0, bound[0])
rects2[:, 1] = np.clip(rects2[:, 1], 0, bound[1])
rects2[:, 2] = np.clip(rects2[:, 2], 0, bound[0] - rects2[:, 0])
rects2[:, 3] = np.clip(rects2[:, 3], 0, bound[1] - rects2[:, 1])
rects_inter = _intersection(rects1, rects2)
areas_inter = np.prod(rects_inter[..., 2:], axis=-1)
areas1 = np.prod(rects1[..., 2:], axis=-1)
areas2 = np.prod(rects2[..., 2:], axis=-1)
areas_union = areas1 + areas2 - areas_inter
eps = np.finfo(float).eps
ious = areas_inter / (areas_union + eps)
ious = np.clip(ious, 0.0, 1.0)
return ious
def overlap_ratio(rect1, rect2):
'''
Compute overlap ratio between two rects
- rect: 1d array of [x,y,w,h] or
2d array of N x [x,y,w,h]
'''
if rect1.ndim==1:
rect1 = rect1[None,:]
if rect2.ndim==1:
rect2 = rect2[None,:]
left = np.maximum(rect1[:,0], rect2[:,0])
right = np.minimum(rect1[:,0]+rect1[:,2], rect2[:,0]+rect2[:,2])
top = np.maximum(rect1[:,1], rect2[:,1])
bottom = np.minimum(rect1[:,1]+rect1[:,3], rect2[:,1]+rect2[:,3])
intersect = np.maximum(0,right - left) * np.maximum(0,bottom - top)
union = rect1[:,2]*rect1[:,3] + rect2[:,2]*rect2[:,3] - intersect
iou = np.clip(intersect / union, 0, 1)
return iou
def calc_curves(ious, center_errors, nbins_iou, nbins_ce):
ious = np.asarray(ious, float)[:, np.newaxis]
center_errors = np.asarray(center_errors, float)[:, np.newaxis]
thr_iou = np.linspace(0, 1, nbins_iou)[np.newaxis, :]
thr_ce = np.arange(0, nbins_ce)[np.newaxis, :]
bin_iou = np.greater(ious, thr_iou)
bin_ce = np.less_equal(center_errors, thr_ce)
succ_curve = np.mean(bin_iou, axis=0)
prec_curve = np.mean(bin_ce, axis=0)
return succ_curve, prec_curve
def compute_success_overlap(gt_bb, result_bb):
thresholds_overlap = np.arange(0, 1.05, 0.05)
n_frame = len(gt_bb)
success = np.zeros(len(thresholds_overlap))
iou = overlap_ratio(gt_bb, result_bb)
for i in range(len(thresholds_overlap)):
success[i] = sum(iou > thresholds_overlap[i]) / float(n_frame)
return success
def compute_success_error(gt_center, result_center):
thresholds_error = np.arange(0, 51, 1)
n_frame = len(gt_center)
success = np.zeros(len(thresholds_error))
dist = np.sqrt(np.sum(np.power(gt_center - result_center, 2), axis=1))
for i in range(len(thresholds_error)):
success[i] = sum(dist <= thresholds_error[i]) / float(n_frame)
return success
def get_result_bb(arch, seq):
result_path = join(arch, seq + '.txt')
temp = np.loadtxt(result_path, delimiter=',').astype(np.float)
return np.array(temp)
def convert_bb_to_center(bboxes):
return np.array([(bboxes[:, 0] + (bboxes[:, 2] - 1) / 2),
(bboxes[:, 1] + (bboxes[:, 3] - 1) / 2)]).T
def test_otb(v_id, tracker, video, args):
toc, regions = 0, []
image_files, gt = video['image_files'], video['gt']
for f, image_file in enumerate(image_files):
im = cv2.imread(image_file)
tic = cv2.getTickCount()
if f == 0:
init_pos, init_sz = rect_2_cxy_wh(gt[f])
state = tracker.setup(im, init_pos, init_sz)
location = gt[f]
regions.append(gt[f])
elif f > 0:
state = tracker.track(im, state)
location = cxy_wh_2_rect(state['target_pos'], state['target_sz'])
regions.append(location)
toc += cv2.getTickCount() - tic
if args.viz and f > 0: # visualization
if f == 0: cv2.destroyAllWindows()
if len(gt[f]) == 8:
cv2.polylines(im, [np.array(gt[f], np.int).reshape((-1, 1, 2))],
True, (0, 255, 0), 3)
else:
cv2.rectangle(im, (gt[f, 0], gt[f, 1]), (gt[f, 0] + gt[f, 2], gt[f, 1] + gt[f, 3]),
(0, 255, 0), 3)
if len(location) == 8:
cv2.polylines(im, [location.reshape((-1, 1, 2))], True, (0, 255, 255), 3)
else:
location = [int(l) for l in location] #
cv2.rectangle(im, (location[0], location[1]),
(location[0] + location[2], location[1] + location[3]),
(0, 255, 255), 3)
cv2.putText(im, "score: {:.4f}".format(state['score']), (40, 40),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
cv2.imshow(video['name'], im)
cv2.moveWindow(video['name'], 200, 50)
cv2.waitKey(1)
cv2.destroyAllWindows()
toc /= cv2.getTickFrequency()
# save result
video_path = join('benchmark/results/', args.dataset, args.save_path)
if not isdir(video_path): makedirs(video_path)
result_path = join(video_path, '{:s}.txt'.format(video['name']))
with open(result_path, "w") as fin:
for x in regions:
fin.write(','.join([str(i) for i in x])+'\n')
print('({:d}) Video: {:12s} Time: {:02.1f}s Speed: {:3.1f}fps'.format(
v_id, video['name'], toc, f / toc))
return f / toc
def eval_otb(save_path, delete_after):
base_path = join(realpath(dirname(__file__)), '../data', 'OTB2015')
json_path = base_path + '.json'
annos = json.load(open(json_path, 'r'))
seqs = list(annos.keys())
video_path = join('benchmark/results/OTB2015/', save_path)
trackers = glob.glob(join(video_path))
_, _, files = next(os.walk(trackers[0]))
num_files = len(files)
thresholds_overlap = np.arange(0, 1.05, 0.05)
success_overlap = np.zeros((num_files, len(trackers), len(thresholds_overlap)))
thresholds_error = np.arange(0, 51, 1)
success_error = np.zeros((num_files, len(trackers), len(thresholds_error)))
for i, f in enumerate(files):
seq = f.replace('.txt', '')
gt_rect = np.array(annos[seq]['gt_rect']).astype(np.float)
gt_center = convert_bb_to_center(gt_rect)
for j in range(len(trackers)):
tracker = trackers[j]
bb = get_result_bb(tracker, seq)
center = convert_bb_to_center(bb)
success_overlap[i][j] = compute_success_overlap(gt_rect, bb)
success_error[i][j] = compute_success_error(gt_center, center)
max_auc = 0.0
max_prec = 0.0
for i in range(len(trackers)):
auc = success_overlap[:, i, :].mean()
if auc > max_auc:
max_auc = auc
prec = success_error[:, i, :].mean()
if prec > max_prec:
max_prec = prec
if delete_after:
rmtree(trackers[0])
return {'auc': max_auc, 'precision': prec}
| [
"numpy.clip",
"numpy.prod",
"cv2.rectangle",
"numpy.less_equal",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"numpy.arange",
"os.walk",
"numpy.mean",
"numpy.greater",
"cv2.moveWindow",
"numpy.asarray",
"numpy.stack",
"numpy.linspace",
"os.path.isdir",
"numpy.maximum",
"cv... | [((1192, 1234), 'numpy.maximum', 'np.maximum', (['rects1[..., 0]', 'rects2[..., 0]'], {}), '(rects1[..., 0], rects2[..., 0])\n', (1202, 1234), True, 'import numpy as np\n'), ((1244, 1286), 'numpy.maximum', 'np.maximum', (['rects1[..., 1]', 'rects2[..., 1]'], {}), '(rects1[..., 1], rects2[..., 1])\n', (1254, 1286), True, 'import numpy as np\n'), ((1296, 1372), 'numpy.minimum', 'np.minimum', (['(rects1[..., 0] + rects1[..., 2])', '(rects2[..., 0] + rects2[..., 2])'], {}), '(rects1[..., 0] + rects1[..., 2], rects2[..., 0] + rects2[..., 2])\n', (1306, 1372), True, 'import numpy as np\n'), ((1402, 1478), 'numpy.minimum', 'np.minimum', (['(rects1[..., 1] + rects1[..., 3])', '(rects2[..., 1] + rects2[..., 3])'], {}), '(rects1[..., 1] + rects1[..., 3], rects2[..., 1] + rects2[..., 3])\n', (1412, 1478), True, 'import numpy as np\n'), ((1508, 1530), 'numpy.maximum', 'np.maximum', (['(x2 - x1)', '(0)'], {}), '(x2 - x1, 0)\n', (1518, 1530), True, 'import numpy as np\n'), ((1539, 1561), 'numpy.maximum', 'np.maximum', (['(y2 - y1)', '(0)'], {}), '(y2 - y1, 0)\n', (1549, 1561), True, 'import numpy as np\n'), ((2812, 2850), 'numpy.prod', 'np.prod', (['rects_inter[..., 2:]'], {'axis': '(-1)'}), '(rects_inter[..., 2:], axis=-1)\n', (2819, 2850), True, 'import numpy as np\n'), ((2865, 2898), 'numpy.prod', 'np.prod', (['rects1[..., 2:]'], {'axis': '(-1)'}), '(rects1[..., 2:], axis=-1)\n', (2872, 2898), True, 'import numpy as np\n'), ((2912, 2945), 'numpy.prod', 'np.prod', (['rects2[..., 2:]'], {'axis': '(-1)'}), '(rects2[..., 2:], axis=-1)\n', (2919, 2945), True, 'import numpy as np\n'), ((3081, 3104), 'numpy.clip', 'np.clip', (['ious', '(0.0)', '(1.0)'], {}), '(ious, 0.0, 1.0)\n', (3088, 3104), True, 'import numpy as np\n'), ((3408, 3444), 'numpy.maximum', 'np.maximum', (['rect1[:, 0]', 'rect2[:, 0]'], {}), '(rect1[:, 0], rect2[:, 0])\n', (3418, 3444), True, 'import numpy as np\n'), ((3455, 3519), 'numpy.minimum', 'np.minimum', (['(rect1[:, 0] + rect1[:, 2])', '(rect2[:, 0] + rect2[:, 2])'], {}), '(rect1[:, 0] + rect1[:, 2], rect2[:, 0] + rect2[:, 2])\n', (3465, 3519), True, 'import numpy as np\n'), ((3522, 3558), 'numpy.maximum', 'np.maximum', (['rect1[:, 1]', 'rect2[:, 1]'], {}), '(rect1[:, 1], rect2[:, 1])\n', (3532, 3558), True, 'import numpy as np\n'), ((3570, 3634), 'numpy.minimum', 'np.minimum', (['(rect1[:, 1] + rect1[:, 3])', '(rect2[:, 1] + rect2[:, 3])'], {}), '(rect1[:, 1] + rect1[:, 3], rect2[:, 1] + rect2[:, 3])\n', (3580, 3634), True, 'import numpy as np\n'), ((3780, 3812), 'numpy.clip', 'np.clip', (['(intersect / union)', '(0)', '(1)'], {}), '(intersect / union, 0, 1)\n', (3787, 3812), True, 'import numpy as np\n'), ((4131, 4156), 'numpy.greater', 'np.greater', (['ious', 'thr_iou'], {}), '(ious, thr_iou)\n', (4141, 4156), True, 'import numpy as np\n'), ((4170, 4206), 'numpy.less_equal', 'np.less_equal', (['center_errors', 'thr_ce'], {}), '(center_errors, thr_ce)\n', (4183, 4206), True, 'import numpy as np\n'), ((4225, 4249), 'numpy.mean', 'np.mean', (['bin_iou'], {'axis': '(0)'}), '(bin_iou, axis=0)\n', (4232, 4249), True, 'import numpy as np\n'), ((4267, 4290), 'numpy.mean', 'np.mean', (['bin_ce'], {'axis': '(0)'}), '(bin_ce, axis=0)\n', (4274, 4290), True, 'import numpy as np\n'), ((4399, 4423), 'numpy.arange', 'np.arange', (['(0)', '(1.05)', '(0.05)'], {}), '(0, 1.05, 0.05)\n', (4408, 4423), True, 'import numpy as np\n'), ((4751, 4770), 'numpy.arange', 'np.arange', (['(0)', '(51)', '(1)'], {}), '(0, 51, 1)\n', (4760, 4770), True, 'import numpy as np\n'), ((5103, 5127), 'os.path.join', 'join', (['arch', "(seq + '.txt')"], {}), "(arch, seq + '.txt')\n", (5107, 5127), False, 'from os.path import join, dirname, abspath, realpath, isdir\n'), ((5206, 5220), 'numpy.array', 'np.array', (['temp'], {}), '(temp)\n', (5214, 5220), True, 'import numpy as np\n'), ((7136, 7159), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7157, 7159), False, 'import cv2\n'), ((7171, 7193), 'cv2.getTickFrequency', 'cv2.getTickFrequency', ([], {}), '()\n', (7191, 7193), False, 'import cv2\n'), ((7230, 7286), 'os.path.join', 'join', (['"""benchmark/results/"""', 'args.dataset', 'args.save_path'], {}), "('benchmark/results/', args.dataset, args.save_path)\n", (7234, 7286), False, 'from os.path import join, dirname, abspath, realpath, isdir\n'), ((7914, 7959), 'os.path.join', 'join', (['"""benchmark/results/OTB2015/"""', 'save_path'], {}), "('benchmark/results/OTB2015/', save_path)\n", (7918, 7959), False, 'from os.path import join, dirname, abspath, realpath, isdir\n'), ((8101, 8125), 'numpy.arange', 'np.arange', (['(0)', '(1.05)', '(0.05)'], {}), '(0, 1.05, 0.05)\n', (8110, 8125), True, 'import numpy as np\n'), ((8234, 8253), 'numpy.arange', 'np.arange', (['(0)', '(51)', '(1)'], {}), '(0, 51, 1)\n', (8243, 8253), True, 'import numpy as np\n'), ((1574, 1598), 'numpy.stack', 'np.stack', (['[x1, y1, w, h]'], {}), '([x1, y1, w, h])\n', (1582, 1598), True, 'import numpy as np\n'), ((2219, 2253), 'numpy.clip', 'np.clip', (['rects1[:, 0]', '(0)', 'bound[0]'], {}), '(rects1[:, 0], 0, bound[0])\n', (2226, 2253), True, 'import numpy as np\n'), ((2277, 2311), 'numpy.clip', 'np.clip', (['rects1[:, 1]', '(0)', 'bound[1]'], {}), '(rects1[:, 1], 0, bound[1])\n', (2284, 2311), True, 'import numpy as np\n'), ((2335, 2384), 'numpy.clip', 'np.clip', (['rects1[:, 2]', '(0)', '(bound[0] - rects1[:, 0])'], {}), '(rects1[:, 2], 0, bound[0] - rects1[:, 0])\n', (2342, 2384), True, 'import numpy as np\n'), ((2408, 2457), 'numpy.clip', 'np.clip', (['rects1[:, 3]', '(0)', '(bound[1] - rects1[:, 1])'], {}), '(rects1[:, 3], 0, bound[1] - rects1[:, 1])\n', (2415, 2457), True, 'import numpy as np\n'), ((2506, 2540), 'numpy.clip', 'np.clip', (['rects2[:, 0]', '(0)', 'bound[0]'], {}), '(rects2[:, 0], 0, bound[0])\n', (2513, 2540), True, 'import numpy as np\n'), ((2564, 2598), 'numpy.clip', 'np.clip', (['rects2[:, 1]', '(0)', 'bound[1]'], {}), '(rects2[:, 1], 0, bound[1])\n', (2571, 2598), True, 'import numpy as np\n'), ((2622, 2671), 'numpy.clip', 'np.clip', (['rects2[:, 2]', '(0)', '(bound[0] - rects2[:, 0])'], {}), '(rects2[:, 2], 0, bound[0] - rects2[:, 0])\n', (2629, 2671), True, 'import numpy as np\n'), ((2695, 2744), 'numpy.clip', 'np.clip', (['rects2[:, 3]', '(0)', '(bound[1] - rects2[:, 1])'], {}), '(rects2[:, 3], 0, bound[1] - rects2[:, 1])\n', (2702, 2744), True, 'import numpy as np\n'), ((3005, 3020), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (3013, 3020), True, 'import numpy as np\n'), ((3644, 3671), 'numpy.maximum', 'np.maximum', (['(0)', '(right - left)'], {}), '(0, right - left)\n', (3654, 3671), True, 'import numpy as np\n'), ((3673, 3700), 'numpy.maximum', 'np.maximum', (['(0)', '(bottom - top)'], {}), '(0, bottom - top)\n', (3683, 3700), True, 'import numpy as np\n'), ((3899, 3922), 'numpy.asarray', 'np.asarray', (['ious', 'float'], {}), '(ious, float)\n', (3909, 3922), True, 'import numpy as np\n'), ((3958, 3990), 'numpy.asarray', 'np.asarray', (['center_errors', 'float'], {}), '(center_errors, float)\n', (3968, 3990), True, 'import numpy as np\n'), ((4021, 4049), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'nbins_iou'], {}), '(0, 1, nbins_iou)\n', (4032, 4049), True, 'import numpy as np\n'), ((4078, 4100), 'numpy.arange', 'np.arange', (['(0)', 'nbins_ce'], {}), '(0, nbins_ce)\n', (4087, 4100), True, 'import numpy as np\n'), ((5267, 5360), 'numpy.array', 'np.array', (['[bboxes[:, 0] + (bboxes[:, 2] - 1) / 2, bboxes[:, 1] + (bboxes[:, 3] - 1) / 2]'], {}), '([bboxes[:, 0] + (bboxes[:, 2] - 1) / 2, bboxes[:, 1] + (bboxes[:, \n 3] - 1) / 2])\n', (5275, 5360), True, 'import numpy as np\n'), ((5570, 5592), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (5580, 5592), False, 'import cv2\n'), ((5607, 5625), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (5623, 5625), False, 'import cv2\n'), ((7298, 7315), 'os.path.isdir', 'isdir', (['video_path'], {}), '(video_path)\n', (7303, 7315), False, 'from os.path import join, dirname, abspath, realpath, isdir\n'), ((7317, 7337), 'os.makedirs', 'makedirs', (['video_path'], {}), '(video_path)\n', (7325, 7337), False, 'from os import makedirs\n'), ((7985, 8001), 'os.path.join', 'join', (['video_path'], {}), '(video_path)\n', (7989, 8001), False, 'from os.path import join, dirname, abspath, realpath, isdir\n'), ((8026, 8046), 'os.walk', 'os.walk', (['trackers[0]'], {}), '(trackers[0])\n', (8033, 8046), False, 'import sys, os, glob, re\n'), ((9137, 9156), 'shutil.rmtree', 'rmtree', (['trackers[0]'], {}), '(trackers[0])\n', (9143, 9156), False, 'from shutil import rmtree\n'), ((742, 774), 'numpy.power', 'np.power', (['(centers1 - centers2)', '(2)'], {}), '(centers1 - centers2, 2)\n', (750, 774), True, 'import numpy as np\n'), ((4872, 4910), 'numpy.power', 'np.power', (['(gt_center - result_center)', '(2)'], {}), '(gt_center - result_center, 2)\n', (4880, 4910), True, 'import numpy as np\n'), ((5139, 5177), 'numpy.loadtxt', 'np.loadtxt', (['result_path'], {'delimiter': '""","""'}), "(result_path, delimiter=',')\n", (5149, 5177), True, 'import numpy as np\n'), ((6013, 6031), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (6029, 6031), False, 'import cv2\n'), ((7023, 7052), 'cv2.imshow', 'cv2.imshow', (["video['name']", 'im'], {}), "(video['name'], im)\n", (7033, 7052), False, 'import cv2\n'), ((7065, 7103), 'cv2.moveWindow', 'cv2.moveWindow', (["video['name']", '(200)', '(50)'], {}), "(video['name'], 200, 50)\n", (7079, 7103), False, 'import cv2\n'), ((7116, 7130), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7127, 7130), False, 'import cv2\n'), ((7744, 7761), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (7751, 7761), False, 'from os.path import join, dirname, abspath, realpath, isdir\n'), ((6110, 6133), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6131, 6133), False, 'import cv2\n'), ((6333, 6437), 'cv2.rectangle', 'cv2.rectangle', (['im', '(gt[f, 0], gt[f, 1])', '(gt[f, 0] + gt[f, 2], gt[f, 1] + gt[f, 3])', '(0, 255, 0)', '(3)'], {}), '(im, (gt[f, 0], gt[f, 1]), (gt[f, 0] + gt[f, 2], gt[f, 1] + gt\n [f, 3]), (0, 255, 0), 3)\n', (6346, 6437), False, 'import cv2\n'), ((6679, 6803), 'cv2.rectangle', 'cv2.rectangle', (['im', '(location[0], location[1])', '(location[0] + location[2], location[1] + location[3])', '(0, 255, 255)', '(3)'], {}), '(im, (location[0], location[1]), (location[0] + location[2], \n location[1] + location[3]), (0, 255, 255), 3)\n', (6692, 6803), False, 'import cv2\n'), ((8423, 8454), 'numpy.array', 'np.array', (["annos[seq]['gt_rect']"], {}), "(annos[seq]['gt_rect'])\n", (8431, 8454), True, 'import numpy as np\n'), ((6201, 6224), 'numpy.array', 'np.array', (['gt[f]', 'np.int'], {}), '(gt[f], np.int)\n', (6209, 6224), True, 'import numpy as np\n')] |
import os
import re
import sys, time
import numpy as np
final=''#global vars to save results of op
fresult=''#global vars to save results of for
fcall=''#global vars to save results of call
def check(newcontext):
nc=newcontext
#TODO:cannot deal with multiple problems,need help
lk=nc.count('(')
rk=nc.count(')')
ll=nc.count('[')
rl=nc.count(']')
ld=nc.count('{')
rd=nc.count('}')
kc=lk-rk
lc=ll-rl
dc=ld-rd
while kc>0:
nc+=')'
kc-=1
while lc>0:
nc+=']'
lc-=1
while dc>0:
nc+='}'
dc-=1
'''
if tryflag==1:
i=0
for i in range(0,len(trycache)):
if trycache[i]!=' ':
break
nc=nc+'\n'+trycache[:i]+'except Exception:\n'+trycache[:i]+' '+'pass'
'''
return nc
def recheck(l):
line=l
line=re.sub('return ','',line)
line=re.sub('\[\'.*\'\]','',line)
line=re.sub('\[\".*\"\]','',line)
line=re.sub('\(\'.*\'\)','',line)
line=re.sub('\(\".*\"\)','',line)
line=re.sub('\[[0-9\.\-\s\:]+\]','',line)
line=re.sub('\([0-9\.\-\s\:]+\)','',line)
line=re.sub('\{[0-9\.\-\s\:]+\}','',line)
line=re.sub('\[.*[\+\:]+.*\]','',line)
line=re.sub('\+\=','=',line)
#line=re.sub(' ','',line)
line=re.sub('r\'.*\'\,*\s*','',line)
line=re.sub('b\'.*\'\,*\s*','',line)
line=re.sub('rb\'.*\'\,*\s*','',line)
line=re.sub('f\'.*\'\,*\s*','',line)
line=re.sub('\'.*\'\,*\s*','',line)
line=re.sub('\".*\"\,*\s*','',line)
line=re.sub('r\".*\"\,*\s*','',line)
line=re.sub('b\".*\"\,*\s*','',line)
line=re.sub('rb\".*\"\,*\s*','',line)
line=re.sub('f\".*\"\,*\s*','',line)
line=re.sub('\(\)','',line)
line=re.sub('\{\}','',line)
line=re.sub('\[\]','',line)
#line=recheck(line)
line=line.strip()
return line
def del_arg_op(op):
starti=endi=0
for i in range(0,len(op)):
if op[i]=='(':
starti=i
elif op[i]==')':
endi=i
return op[:starti]+'-->'+op[starti+1:endi]+op[endi+1:len(op)]
def dealarg_for(ty):
#print "yes!"
starti=endi=0
left=right=0
ret=''
for i in range(0,len(ty)):
if ty[i]=='(':
if left==right:
starti=i
left=left+1
else:
left=left+1
elif ty[i]==')':
if left==right+1:
endi=i
right=right+1
#print left,right,starti,endi
if starti+1<endi:
#print "okkk",y[starti+1:endi]+" --> "+y[:starti]
#print "here!",ty,starti+1,endi,left,right
ret=ret+ty[:starti]+"-->"+ty[starti+1:endi]
#print ret
break
else:
right=right+1
#ret=ret[:-3]
return ret+ty[(endi+1):len(ty)]
def dealarg_call(ty):
#print "yes!"
starti=endi=0
left=right=0
ret=''
for i in range(0,len(ty)):
if ty[i]=='(':
if left==right:
starti=i
left=left+1
else:
left=left+1
elif ty[i]==')':
if left==right+1:
endi=i
right=right+1
#print left,right,starti,endi
if starti+1<endi:
#print "okkk",y[starti+1:endi]+" --> "+y[:starti]
#print "here!",ty,starti+1,endi,left,right
ret=ret+ty[:starti]+"-->"+ty[starti+1:endi]+ty[endi+1:len(ty)]
#print ret
break
else:
right=right+1
#ret=ret[:-3]
if ret=='':
return ty
else:
return ret
def dealarg(ty):
starti=endi=0
for i in range(0,len(ty)):
if ty[i]=='(':
starti=i
break
i=len(ty)-1
while(i>0):
if ty[i]==')':
endi=i
break
i=i-1
return ty[:starti]+"-->"+ty[starti+1:endi]+ty[endi+1:len(ty)]
#apart from consdering data-flow relationship, also consider which var is more relevant to target api, so the order of list is inverse to arg.
def dealist(ty):
starti=endi=0
for i in range(0,len(ty)):
if ty[i]=='[':
starti=i
break
i=len(ty)-1
while(i>0):
if ty[i]==']':
endi=i
break
i=i-1
return ty[:starti]+'-->'+ty[starti+1:endi]
def deallist(ty):
#print "yes"
starti=endi=0
for i in range(0,len(ty)):
if ty[i]=='[':
starti=i
elif ty[i]==']':
endi=i
return ty[:starti]+"-->"+ty[starti+1:endi]
def del_multi_arg(ty):
si=ei=0
for i in range(0,len(ty)):
if ty[i]=='(':
si=i
break
i=len(ty)-1
while(i>-1):
if ty[i]==')':
ei=i
break
i=i-1
args=ty[si+1:ei]
#print "args:",args
larg=args.split(',')
sarg=''
for arg in larg:
if '=' in arg:
lr=arg.split('=')
sarg=sarg+lr[1]+'-->'+lr[0]+'|'
else:
sarg=sarg+arg+'|'
sarg=sarg[:-1]
return sarg+'-->'+ty[:si]
def addty(ty,i,lsy):
ret=''
#print ty,i,lsy
if len(lsy)==1:
ret = ty
#print "ret:",ret,"\n"
return ret
else:
for j in range(0,i):
ret=ret+lsy[j]+'-->'
ret=ret+ty+"-->"
for j in range(i+1,len(lsy)):
ret=ret+lsy[j]+'-->'
ret=ret[:-3]
#print "ret:",ret,"\n"
return ret
def delop(op):
lsop=op.split('-->')
global final
for i in range(0,len(lsop)):
ty=lsop[i]
if re.match('[_a-zA-Z0-9\.\[\]\|]+\(.*\)',ty) and ',' in ty and '=' in ty:
#print "yes!",ty
ty=del_multi_arg(ty)
#print "multi_arg:",ty
op=addty(ty,i,lsop)
#print "new op:",op
final=op
delop(op)
elif ',' in ty:
ty=re.sub(',','|',ty)
#print "a|b:",ty
op=addty(ty,i,lsop)
#print "new op:",op
final=op
delop(op)
elif re.match('[_a-zA-Z0-9\.\[\]\|]+\(.*=.*\)',ty):
ty=del_arg_op(ty)
#print "call-op:",ty
op=addty(ty,i,lsop)
#print "new op:",op
final=op
delop(op)
elif '=' in ty:
lr=ty.split('=')
ty=lr[1]+'-->'+lr[0]
#print "deal with op:",ty
op=addty(ty,i,lsop)
final=op
#print "new op:",op
delop(op)
elif re.match('[_a-zA-Z0-9\.\[\]]+\(.*\)',ty):
ty=dealarg_for(ty)
#print "deal with arg:",ty
op=addty(ty,i,lsop)
#print "new op:",op
final=op
delop(op)
elif re.match('[_a-zA-Z0-9\.\[\]]+\[.*\]',ty):
ty=dealist(ty)
#print "deal with list:",ty
op=addty(ty,i,lsop)
#print "new op:",op
final=op
delop(op)
elif '.' in ty:
ty=re.sub('\.','-->',ty)
#print "deal with point:",ty
op=addty(ty,i,lsop)
#print "new op:",op
final=op
delop(op)
def GetMiddleStr(content,startStr,endStr):
startIndex = content.index(startStr)
if startIndex>=0:
startIndex += len(startStr)
endIndex = content.index(endStr)
return content[startIndex:endIndex]
def prex(x):
x=re.sub(' ','',x)
if re.match('\(.*,.*\)\,[a-zA-Z0-9_\'\"\(\)|]+',x) or re.match('[a-zA-Z0-9_\'\"\(\)|]+\,\(.*,.*\)',x) or re.match('\(.*,.*\)\,\(.*,.*\)',x):
x=re.sub('[\(\)]+','',x)
#print "yes:",x
return x
def dealtuple(ty):
my=re.sub(' ','',ty)
my=my[1:-1]
lsmy=my.split(",")
ret=''
for i in lsmy:
ret=ret+i+"|"
ret=ret[:-1]
#print "ret1:",ret
return ret
def deald(ty):
return re.sub(',','|',ty)
def dealcall(ty):
#print "ty:",ty
#print "re:",re.sub('\.','-->',ty)
return re.sub('\.','-->',ty)
def rbl(tempy):
ls=0
rs=0
ln=0
rn=0
ret=0
for i in range(0,len(tempy)):
if tempy[i]=='(':
ls=i
ln+=1
elif tempy[i]==')':
rs=i
rn+=1
if rn>ln:
ret=1
return ret
elif rs<ls:
ret=1
return ret
return ret
def findcircle_call(tempy):
global fcall
if tempy.count('(') != tempy.count(')') or rbl(tempy)!=0:
#global fcall
fcall=''
return
tempy=recheck(tempy)
ls=tempy.split('-->')
for i in range(0,len(ls)):
ty=ls[i]
#print ty
ty=re.sub(' ','',ty)
if ',' in ty:
#print 'yes!',ty
ty=re.sub(',','|',ty)
#print 'later',ty
tempy=addty(ty,i,ls)
fcall=tempy
#print 2,ty,tempy
findcircle_call(tempy)
elif '.' in ty and not re.match('.*\(.*\..*\).*',ty):
#print "ty1",ty
ty=re.sub('\.','-->',ty)
tempy=addty(ty,i,ls)
#print 3,ty,tempy
#global final
fcall=tempy
findcircle_call(tempy)
elif re.match('.*[a-zA-Z0-9_]+\(.*[a-zA-Z0-9_\'\"\(\)\|\-\>\:\[\]\,\.]+\).*',ty) and re.match('.*\(.*[a-zA-Z0-9_]+.*\).*',ty):
ty=re.sub('\(\)','',ty)
ty=re.sub('\(\[\]\)','',ty)
if not (re.match('.*[a-zA-Z0-9_]+\(.*[a-zA-Z0-9_\'\"\(\)\|\-\>\:\[\]\,\.]+\).*',ty) and re.match('.*\(.*[a-zA-Z0-9_]+.*\).*',ty)):
tempy=addty(ty,i,ls)
final=tempy
#print "4.1",ty,tempy
findcircle_call(tempy)
continue
#print ty
ty=dealarg_call(ty)
tempy=addty(ty,i,ls)
#print 4,ty,tempy
#global final
fcall=tempy
findcircle_call(tempy)
elif '.' in ty :
#print "ty2",ty
ty=re.sub('\.','-->',ty)
tempy=addty(ty,i,ls)
#print 5,ty,tempy
#global final
fcall=tempy
findcircle_call(tempy)
elif re.match('[a-zA-Z0-9_]+\[[a-zA-Z0-9_]+\]',ty):
ty=deallist(ty)
tempy=addty(ty,i,ls)
fcall=tempy
#print 6,ty,tempy
findcircle_call(tempy)
#return tempy
def del_call(line):
#print(line)
calls=re.findall('[_a-zA-Z0-9\.\[\]\'\"\(\)\{\}\,\:]+\(.*\)',line)
#print(calls)
call=''
if len(calls)>0:
call=calls[0]
else:
return call
call=re.sub('\(\'.*\'\)','',call)
call=re.sub('\(\".*\"\)','',call)
call=re.sub('\[\'.*\'\]','',call)
call=re.sub('\[\".*\"\]','',call)
call=re.sub('\(\)','',call)
call=re.sub('\([0-9]+\)','',call)
call=re.sub('\[[0-9:\-]+\]','',call)
call=call.strip()
call=re.sub(' ','',call)
call=recheck(call)
findcircle_call(call)
#print(fcall,'\n')
return fcall
def findcircle(tempy):
global fresult
#print "temp:",tempy
lsy=tempy.split("-->")
#print "lsy:",lsy
for i in range(0,len(lsy)):
ty=lsy[i]
ty=ty.strip()
#print "i:",i,ty
if re.match(r'\(.*,.*\)',ty):
#print "matchtuple:",ty
ty=dealtuple(ty)
#print "addty"
tempy=addty(ty,i,lsy)
fresult=tempy
#print fresult
findcircle(tempy)
elif ',' in ty and '\',\'' not in ty:
#print "matchmulti"
#print "2:",ty,i,lsy
ty=deald(ty)
tempy=addty(ty,i,lsy)
#print "yes!",ty,tempy
fresult=tempy
#print fresult
findcircle(tempy)
elif re.match('.*[a-zA-Z0-9_]+\(.*[a-zA-Z0-9_\'\"\(\)\|\-\>\:]+\).*',ty):
#print "matcharg:",ty
ty=dealarg_for(ty)
#print "addty"
tempy=addty(ty,i,lsy)
fresult=tempy
#print fresult
#print "1:",ty,i,lsy
findcircle(tempy)
elif '.' in ty and '\'\.\'' not in ty:
#print "matchpoint"
ty=dealcall(ty)
tempy=addty(ty,i,lsy)
#print "yes!",tempy
fresult=tempy
#print fresult
findcircle(tempy)
elif re.match('.*\[\'.*\'\].*',ty) or re.match('.*\[\".*\"\].*',ty) or re.match('.*\[[0-9:]+\].*',ty):
#print "yes:",ty
tempy=re.sub('\[.*\]','',ty)
#print "new:",tyy
fresult=tempy
#print fresult
findcircle(tempy)
#elif re.match('[a-zA-Z0-9_]+',ty):
#print "result:",tempy,"\n"
#global fresult
#print "tempy:",ty,tempy
#fresult=tempy
#print lsy
#if ty==lsy[len(lsy)-1]:
#break
#findcircle(tempy)
#return tempy
#fresult=tempy
#return tempy
def delfor(line):
#if re.match('.*\[.*for\s.*\sin\s.*\].*',line):
#return
#forp=line.find('for ')
#print forp
#print line[forp+4:]
#ls=line[forp+4:].split(" in ")
#print ls
#x=ls[0]
#if len(ls) < 2:
#return
#ls2=ls[1].split(":\n")
#print ls2
#y=ls2[0]
#print x
#print y
ops=re.findall('for\s[_a-zA-Z0-9\.\,\s]+\sin\s[_a-zA-Z0-9\,\.\[\]\(\)\{\}\s]+',line)
#print(ops)
s=''
if len(ops)>0:
s=ops[0]
#s=recheck(s)
else:
return s
if s.endswith(','):
s=s[:-1]
if (s.endswith(']') and s.count('[')<s.count(']')) or (s.endswith(')') and s.count('(')<s.count(')')) or (s.endswith('}') and s.count('{')<s.count('}')):
s=s[:-1]
s=recheck(s)
if s.strip().endswith('in'):
return ''
#print(s)
try:
x=GetMiddleStr(s,'for ',' in ')
except Exception:
return ''
#y=GetMiddleStr(line,'in',':')
x=x.strip()
y=s.split(' in ')[1].strip()
#print('x,y')
#print(x,y)
#print "x:",x
#print "START"+"#"+str(num)
#print(line[:-1])
y=re.sub(' ','',y)
x=re.sub(' ','',x)
x=re.sub('\(\)','',x)
y=re.sub('\(\)','',y)
y=re.sub('\[\'.*\'\]','',y)
y=re.sub('\[\".*\"\]','',y)
y=re.sub('\(\'.*\'\)','',y)
y=re.sub('\(\".*\"\)','',y)
y=re.sub('\[[0-9:]+\]','',y)
y=re.sub('\([0-9:]+\)','',y)
y=re.sub('\[.*[\+\:]+.*\]','',y)
y=re.sub('\+\=','',y)
y=re.sub('r\'.*\'\,','',y)
x=re.sub('\[\'.*\'\]','',x)
x=re.sub('\[\".*\"\]','',x)
x=re.sub('\(\'.*\'\)','',x)
x=re.sub('\(\".*\"\)','',x)
x=re.sub('\[[0-9:]+\]','',x)
x=re.sub('\([0-9:]+\)','',x)
x=re.sub('\[.*[\+\:]+.*\]','',x)
x=re.sub('\+\=','',x)
x=re.sub('r\'.*\'\,','',x)
#print(x,y)
#TODO:meici xu t<NAME>
y=recheck2(y)
findcircle(y)
global fresult
if fresult=='':
rety=y
else:
rety=fresult
fresult=''
x=prex(x)
findcircle(x)
if fresult=='':
retx=x
else:
retx=fresult
#print "result:",rety,"-->",retx,"\n"
fresult=''
forx=rety+"-->"+retx
#if forx.count('-->') >10:
#s="START:\n"+line+rety+"-->"+retx+"\n"+"END\n"
s2=rety+"-->"+retx+"\n"
#print(s)
#print(s2)
return s2
def finalcheck(s):
s=re.sub('\*\*','',s)
s=re.sub('\*args','args',s)
s=re.sub('[\+\/\*]','|',s)
s=re.sub('\n','',s)
if s.count('-->')==1:
ls=s.split('-->')
if ls[0]==ls[1]:
s=''
return s
class ShowProcess():
i = 0
max_steps = 0
max_arrow = 50
infoDone = 'done'
def __init__(self, max_steps, infoDone = 'Done'):
self.max_steps = max_steps
self.i = 0
self.infoDone = infoDone
def show_process(self, i=None):
if i is not None:
self.i = i
else:
self.i += 1
num_arrow = int(self.i * self.max_arrow / self.max_steps)
num_line = self.max_arrow - num_arrow
percent = self.i * 100.0 / self.max_steps
process_bar = '[' + '>' * num_arrow + '-' * num_line + ']'\
+ '%.2f' % percent + '%' + '\r'
sys.stdout.write(process_bar)
sys.stdout.flush()
if self.i >= self.max_steps:
self.close()
def close(self):
print('')
print(self.infoDone)
self.i = 0
def recheck2(l):
line=l
line=re.sub('return ','',line)
line=re.sub('\[.*\]','',line)
line=re.sub('\(.*\)','',line)
line=re.sub('\{.*\}','',line)
line=re.sub('\+\=','=',line)
#line=re.sub(' ','',line)
line=re.sub('r\'.*\'\,*\s*','',line)
line=re.sub('b\'.*\'\,*\s*','',line)
line=re.sub('rb\'.*\'\,*\s*','',line)
line=re.sub('f\'.*\'\,*\s*','',line)
line=re.sub('\'.*\'\,*\s*','',line)
line=re.sub('\".*\"\,*\s*','',line)
line=re.sub('r\".*\"\,*\s*','',line)
line=re.sub('b\".*\"\,*\s*','',line)
line=re.sub('rb\".*\"\,*\s*','',line)
line=re.sub('f\".*\"\,*\s*','',line)
#line=recheck(line)
line=line.strip()
return line
def get_current_dataflow2(current_context,caller):
dataflows=[]
lines=current_context.split('\n')
#process_bar = ShowProcess(len(lines), 'Start to deal with the file')
for line in lines:
if (not caller in line) and (caller!='__all__') :
continue
if not ('.' in line and '(' in line):
continue
line=line.strip()
if line == '' or line.endswith('='):
continue
#print('NOTE!',line)
tpline=line
if line.startswith('#') or line.startswith('def ') or line.startswith('class '):
continue
elif 'lambda' in line:
continue
elif re.match('.*=\s*[0-9\.\:\-]+',line):
continue
line2=re.sub(' ','',line)
if re.match('.*=\'.*\'.*',line2) or re.match('.*=\".*\".*',line2) or re.match('.*=[0-9\.]+.*',line2) or re.match('.*=None.*',line2) or re.match('.*=True.*',line2) or re.match('.*=False.*',line2) or "==" in line2 or line2.endswith('='):
#print('yes!')
continue
#print(tpline,line)
line=re.sub('#.*','',line)
if '=' in line:
#print(line)
#print('yes!')
line=recheck2(line)
if line.endswith('='):
continue
text = re.compile(r".*[a-zA-Z]$")
if not text.match(line):
continue
ops=re.findall('[_a-zA-Z0-9\.\[\]\"\'\(\)\{\}]+\s*=\s*[_a-zA-Z0-9\.\[\]\"\'\(\)\{\}\*\/\-\%\*\,\=\s\+]+',line)
if len(ops)==0:
continue
line=ops[0]
line=re.sub('[\+\-\/\*]+','|',line)
#print('op',tpline,line)
ls=line.split('=')
x=ls[0]
y=ls[1]
x=re.sub('\.','-->',x)
y=re.sub('\.','-->',y)
tf=y+'-->'+x
#print(tf)
opps=re.findall('[\(\{\)\}\[\]\'\"]',tf)
if len(opps)!=0:
continue
tf=tf.strip()
if tf!='' and not tf in dataflows:
dataflows.append(tf)
elif re.match('.*for\s.*\sin\s.*',line):
line=recheck(line)
#print('FOR_EXPR')
#print(file,tpline)
fors=delfor(line)
#print('FOR DATAFLOW:')
#print(str(fors),'\n')
tff=str(fors)
tff=finalcheck(tff)
#print('for',tpline)
#print(tff)
opps=re.findall('[\(\{\)\}\[\]\'\"]',tff)
if len(opps)!=0:
continue
tff=tff.strip()
if tff!='' and not tff in dataflows:
dataflows.append(tff)
#print(tff)
#with open('tmp_dataflow/for_expr.txt','a+') as ff:
#ff.write(file+'#'+str(num)+": "+tpline+'\n'+str(fors)+'\n\n')
elif re.match('.*[_a-zA-Z0-9\.\[\]\'\"\(\)\{\}\,\:]+\(.*\).*',line) and not line.startswith('def ') and not line.startswith('class '):
#print(file)
#print(line,'\n')
#line=recheck(line)
#print(line)
#cas=del_call(line)
#print('CALL DATAFLOW:')
#print(cas,'\n')
#cas=finalcheck(cas)
calls=re.findall('[_a-zA-Z0-9\.\[\]\'\"\(\)\{\}\,\:]+\(.*\)',line)
call=''
if len(calls)>0:
call=calls[0]
else:
continue
line=recheck2(call)
line=re.sub('[\+\-\/]+','|',line)
#print('call',tpline,line)
cas=re.sub('\.','-->',line)
#print(cas)
opps=re.findall('[\(\{\)\}\[\]\'\"]',cas)
if len(opps)!=0:
continue
if not '-->' in cas:
continue
cas=cas.strip()
if cas!='' and not cas in dataflows:
dataflows.append(cas)
#print(cas)
#callflow.append(ls2.strip())
#with open('tmp_dataflow/call_expr.txt','a+') as fc:
#fc.write(file+'#'+str(num)+'\n'+line+'\n')
#process_bar.show_process()
newflows=[]
oldflows=dataflows
lens=5*len(dataflows)
used=[0]*lens
for i in range(0,len(dataflows)):
#flag=0
current_flow_end=dataflows[i].split('-->')[-1]
current_flow_head=dataflows[i].split('-->')[0]
if current_flow_end==current_flow_head:
continue
for j in range(i,len(dataflows)):
#print(j,len(dataflows))
current_flow_end=dataflows[i].split('-->')[-1]
next_flow_head=dataflows[j].split('-->')[0]
s1=current_flow_end+'|'
s2='|'+current_flow_end
s3=next_flow_head+'|'
s4='|'+next_flow_head
if current_flow_end == next_flow_head or s1 in next_flow_head or s2 in next_flow_head:
y=dataflows[j].replace(next_flow_head,'',1)
#y=re.sub(next_flow_head,'',dataflows[j])
newflow=dataflows[i]+y
#print('yes1!')
#print(i,current_flow_end,next_flow_head,s1,s2)
#print(next_flow_head)
#print(dataflows[i])
#print(dataflows[j])
#print(y)
#print(newflow)
if not newflow in newflows:
tmp=[i,newflow]
newflows.append(tmp)
#if not newflow in dataflows:
#dataflows.append(newflow)
#print(newflow)
#dataflows[i]=newflow
#print('yes!')
#print(dataflows[i],' , ',dataflows[j])
#print(newflow)
#i=i-1
#used[j]=1
#del dataflows[j]
#j=j-1
#flag=1
elif s3 in current_flow_end or s4 in current_flow_end:
#x=re.sub(current_flow_end,'',dataflows[i])
x=dataflows[i].replace(current_flow_end,'')
#print('flow_end:',current_flow_end)
#print('xxxx',x)
newflow=x+dataflows[j]
#dataflows[i]=newflow
#print('yes2!')
#print(dataflows[i])
#print(dataflows[j])
#print(x)
#print(newflow)
if not newflow in newflows:
tmp=[i,newflow]
newflows.append(tmp)
#if not newflow in dataflows:
#dataflows.append(newflow)
#print(newflow)
#dataflows[i]=newflow
#print('yes2!')
#print(dataflows[i],' , ',dataflows[j])
#print(newflow)
#i=i-1
#used[j]=1
#del dataflows[j]
#j=j-1
#flag=1
#print('\n')
updateflow=[]
for i in range(0,len(newflows)):
#flag=0
pos=newflows[i][0]
flow=newflows[i][1]
for j in range(pos+1,len(dataflows)):
#print(j,len(dataflows))
current_flow_end=flow.split('-->')[-1]
next_flow_head=dataflows[j].split('-->')[0]
s1=current_flow_end+'|'
s2='|'+current_flow_end
s3=next_flow_head+'|'
s4='|'+next_flow_head
if current_flow_end == next_flow_head or s1 in next_flow_head or s2 in next_flow_head:
y=dataflows[j].replace(next_flow_head,'',1)
#y=re.sub(next_flow_head,'',dataflows[j])
newflow=flow+y
if not newflow in updateflow:
#print('yes!',newflow)
updateflow.append(newflow)
elif s3 in current_flow_end or s4 in current_flow_end:
#x=re.sub(current_flow_end,'',dataflows[i])
x=flow.replace(current_flow_end,'')
#print('flow_end:',current_flow_end)
#print('xxxx',x)
newflow=x+dataflows[j]
if not newflow in updateflow:
#print('yes!',newflow)
updateflow.append(newflow)
for i in range(0,len(newflows)):
flow=newflows[i][1]
dataflows.append(flow)
#process_bar.show_process()
retflow=[]
for flow in dataflows:
if 'unknown_api' in flow:
retflow.append(flow)
if caller=='__all__':
return dataflows
else:
return retflow
def get_current_dataflow(current_context,caller):
dataflows=[]
lines=current_context.split('\n')
#process_bar = ShowProcess(len(lines), 'Start to deal with the file')
for line in lines:
if (not caller in line) and (caller!='__all__') :
continue
if line.strip()=='':
continue
#print('NOTE!',line)
tpline=line.strip()
line=line.strip()
if line.startswith('#') or line.startswith('def ') or line.startswith('class '):
continue
elif line.endswith('(') or line.endswith('[') or line.endswith('{'):
line=line[:-1]
elif line.startswith(')') or line.startswith('}') or line.startswith(']'):
continue
elif line.count('(') != line.count(')') or line.count('[') != line.count(']') or line.count('{') != line.count('}'):
continue
elif 'lambda' in line:
continue
elif re.match('.*=\s*[0-9\.]+',line.strip()):
continue
line2=re.sub(' ','',line)
if re.match('.*=\'.*\'.*',line2) or re.match('.*=\".*\".*',line2) or re.match('.*=[0-9\.]+.*',line2) or re.match('.*=None.*',line2) or re.match('.*=True.*',line2) or re.match('.*=False.*',line2) or re.match('.*=\{\}.*',line2) or re.match('.*=\(\).*',line2) or re.match('.*=\[\].*',line2) or "==" in line2 or line2.endswith('='):
#print('yes!')
continue
line=re.sub('#.*','',line)
if '=' in line:
#print(line)
#print('yes!')
line=recheck(line)
if line.endswith('='):
continue
if line.endswith(',') or line.endswith(':') or line.endswith('+') or line.endswith('-') or line.endswith('*') or line.endswith('/'):
line=line[:-1].strip()
#print(line)
ops=re.findall('[_a-zA-Z0-9\.\[\]\"\'\(\)\{\}]+\s*=\s*[_a-zA-Z0-9\.\[\]\"\'\(\)\{\}\*\/\-\%\*\,\=\s\+]+',line)
#print(ops)
if len(ops)>0:
s=ops[0]
s=recheck(s)
rs=s.split('=')[1]
ps=re.findall('[\,\-\+\*\/\%]+',rs)
if len(ps)==0 and rs.count(' ')>1:
#print('ignored\n')
continue
elif s.endswith(')') and s.count(')')-s.count('(')==1:
s=s[:-1]
elif s.endswith(', )'):
s=s[:-3]+')'
s=re.sub('\)\,.*$','',s)
s=check(s)
if s.count('(') != s.count(')') or s.count('[') != s.count(']') or s.count('{') != s.count('}'):
#print('ignored\n')
continue
else:
#s=re.sub('\)\,.*$','',s)
#print(s)
s=re.sub(' ','',s)
delop(s)
#print(file)
#print(s,final,'\n')
#print('OP DATAFLOW:')
#print(final,'\n')
tf=final
tf=finalcheck(tf)
if tf!='' and not tf in dataflows:
dataflows.append(tf)
#print(tf)
#with open('tmp_dataflow/op_expr.txt','a+') as fo:
#fo.write(file+'#'+str(num)+": "+tpline+'\n'+s+'\n'+final+'\n\n')
elif re.match('.*for\s.*\sin\s.*',line):
line=recheck(line)
#print('FOR_EXPR')
#print(file,tpline)
fors=delfor(line)
#print('FOR DATAFLOW:')
#print(str(fors),'\n')
tff=str(fors)
tff=finalcheck(tff)
if tff!='' and not tff in dataflows:
dataflows.append(tff)
#print(tff)
#with open('tmp_dataflow/for_expr.txt','a+') as ff:
#ff.write(file+'#'+str(num)+": "+tpline+'\n'+str(fors)+'\n\n')
elif re.match('.*[_a-zA-Z0-9\.\[\]\'\"\(\)\{\}\,\:]+\(.*\).*',line) and not line.startswith('def ') and not line.startswith('class '):
#print(file)
#print(line,'\n')
#line=recheck(line)
#print(line)
cas=del_call(line)
#print('CALL DATAFLOW:')
#print(cas,'\n')
cas=finalcheck(cas)
if cas!='' and not cas in dataflows:
dataflows.append(cas)
#print(cas)
#callflow.append(ls2.strip())
#with open('tmp_dataflow/call_expr.txt','a+') as fc:
#fc.write(file+'#'+str(num)+'\n'+line+'\n')
#process_bar.show_process()
newflows=[]
oldflows=dataflows
lens=5*len(dataflows)
used=[0]*lens
for i in range(0,len(dataflows)):
#flag=0
current_flow_end=dataflows[i].split('-->')[-1]
current_flow_head=dataflows[i].split('-->')[0]
if current_flow_end==current_flow_head:
continue
for j in range(i,len(dataflows)):
#print(j,len(dataflows))
current_flow_end=dataflows[i].split('-->')[-1]
next_flow_head=dataflows[j].split('-->')[0]
s1=current_flow_end+'|'
s2='|'+current_flow_end
s3=next_flow_head+'|'
s4='|'+next_flow_head
if current_flow_end == next_flow_head or s1 in next_flow_head or s2 in next_flow_head:
y=dataflows[j].replace(next_flow_head,'',1)
#y=re.sub(next_flow_head,'',dataflows[j])
newflow=dataflows[i]+y
#print('yes1!')
#print(i,current_flow_end,next_flow_head,s1,s2)
#print(next_flow_head)
#print(dataflows[i])
#print(dataflows[j])
#print(y)
#print(newflow)
if not newflow in newflows:
tmp=[i,newflow]
newflows.append(tmp)
#if not newflow in dataflows:
#dataflows.append(newflow)
#print(newflow)
#dataflows[i]=newflow
#print('yes!')
#print(dataflows[i],' , ',dataflows[j])
#print(newflow)
#i=i-1
#used[j]=1
#del dataflows[j]
#j=j-1
#flag=1
elif s3 in current_flow_end or s4 in current_flow_end:
#x=re.sub(current_flow_end,'',dataflows[i])
x=dataflows[i].replace(current_flow_end,'')
#print('flow_end:',current_flow_end)
#print('xxxx',x)
newflow=x+dataflows[j]
#dataflows[i]=newflow
#print('yes2!')
#print(dataflows[i])
#print(dataflows[j])
#print(x)
#print(newflow)
if not newflow in newflows:
tmp=[i,newflow]
newflows.append(tmp)
#if not newflow in dataflows:
#dataflows.append(newflow)
#print(newflow)
#dataflows[i]=newflow
#print('yes2!')
#print(dataflows[i],' , ',dataflows[j])
#print(newflow)
#i=i-1
#used[j]=1
#del dataflows[j]
#j=j-1
#flag=1
'''
if flag==0 and used[i]==0:
if not dataflows[i] in newflows:
newflows.append(dataflows[i])
if flag==1:
i=i-1
'''
#print('\n')
updateflow=[]
for i in range(0,len(newflows)):
#flag=0
pos=newflows[i][0]
flow=newflows[i][1]
for j in range(pos+1,len(dataflows)):
#print(j,len(dataflows))
current_flow_end=flow.split('-->')[-1]
next_flow_head=dataflows[j].split('-->')[0]
s1=current_flow_end+'|'
s2='|'+current_flow_end
s3=next_flow_head+'|'
s4='|'+next_flow_head
if current_flow_end == next_flow_head or s1 in next_flow_head or s2 in next_flow_head:
y=dataflows[j].replace(next_flow_head,'',1)
#y=re.sub(next_flow_head,'',dataflows[j])
newflow=flow+y
if not newflow in updateflow:
#print('yes!',newflow)
updateflow.append(newflow)
elif s3 in current_flow_end or s4 in current_flow_end:
#x=re.sub(current_flow_end,'',dataflows[i])
x=flow.replace(current_flow_end,'')
#print('flow_end:',current_flow_end)
#print('xxxx',x)
newflow=x+dataflows[j]
if not newflow in updateflow:
#print('yes!',newflow)
updateflow.append(newflow)
for i in range(0,len(newflows)):
flow=newflows[i][1]
dataflows.append(flow)
#process_bar.show_process()
retflow=[]
for flow in dataflows:
if 'unknown_api' in flow:
retflow.append(flow)
if caller=='__all__':
return dataflows
else:
return retflow
def lcs(X, Y):
# find the length of the strings
m = len(X)
n = len(Y)
L = [[None]*(n + 1) for i in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0 :
L[i][j] = 0
elif X[i-1] == Y[j-1]:
L[i][j] = L[i-1][j-1]+1
else:
L[i][j] = max(L[i-1][j], L[i][j-1])
# L[m][n] contains the length of LCS of X[0..n-1] & Y[0..m-1]
return L[m][n]
# end of function lcs
def get_sim_score(api,token,d):
lcsn=lcs(api,token)
lcsn=float(lcsn)
ret=float((lcsn*2.0) / (float(d)*float(len(api)+len(token))))
#print(api,token,ret)
return ret
def get_tosim_score(api,maxflow):
if ' ' in maxflow:
flows=maxflow.split(' ')
for flow in flows:
if 'unknown_api' in flow:
mfx=flow
break
else:
mfx=maxflow
ls=mfx.split('-->')
apindex=len(ls)
for k in range(0,len(ls)):
if 'unknown_api' in ls[k]:
apindex=k
tosim=0.0
for i in range(0,len(ls)):
if i!=apindex:
sim_score=get_sim_score(api,ls[i],abs(apindex-i))
tosim+=sim_score
tosim=float(tosim/float(len(ls)))
#print(tosim)
return tosim
def standard(scsk):
scs=scsk
data=[]
for k in scs.keys():
scs[k]=pow(10,scs[k])
data.append(scs[k])
lenth = len(data)
if lenth==0:
return scsk
total = sum(data)
ave = float(total)/lenth
tempsum = sum([pow(data[i] - ave,2) for i in range(lenth)])
tempsum = pow(float(tempsum)/lenth,0.5)
try:
for k in scs.keys():
scs[k] = (scs[k] - ave)/tempsum
scs[k] = 1 / (1 + np.exp(-scs[k]))
except Exception:
return scsk
return scs
def get_ngram_scores(flows,apis,callee):
s=''
#print(apis)
#print(flows)
ngramscore={}
for flow in flows:
s=s+flow+'\n'
with open('output/test.txt','w+') as f:
f.write(s)
#print(s)
#os.chdir('dataflow/')
os.system('srilm-1.7.2/lm/bin/i686-m64/ngram -ppl output/test.txt -order 4 -lm trainfile.lm -debug 2 > output/'+callee+'.ppl')
with open('output/'+callee+'.ppl',encoding='ISO-8859-1') as f:
lines=f.readlines()
for key in apis:
flag=0
for i in range(0,len(lines)):
kname=lines[i].strip().split(' ')
for item in kname:
if item==key:
flag=1
break
if flag==1:
#print(lines[i])
j=i+1
while 'logprob=' not in lines[j]:
j=j+1
score=re.findall('logprob=\s[0-9\-\.]+',lines[j])
ngramscore[key]=float(score[0][9:])
break
if flag==0:
ngramscore[key]=0.0
#ngramscore=standard(ngramscore)
#print(ngramscore)
#ngramscore=sorted(ngramscore.items(), key=lambda x: x[1], reverse=True)
#print(ngramscore)
os.system('rm output/'+callee+'.ppl')
#os.chdir('../')
return ngramscore
def get_ngram_score(apis,current_dataflow,baseflag,basetype,callee):
flows=[]
if baseflag==1:
for api in apis:
if api.startswith('__') or re.match('[A-Z0-9_]+$',api) or api.strip()=='_':
continue
#print(api)
flow=basetype+' '+api
flows.append(flow)
#ngram_score=get_basetype_score(flow)
else:
#print(current_dataflow)
#print(apis)
for flow in current_dataflow:
for api in apis:
if api.startswith('__') or re.match('[A-Z0-9_]+$',api) or api.strip()=='_':
continue
flow1=re.sub('unknown_api',api,flow)
#print(flow1)
flow2=re.sub('-->',' ',flow1)
#print(flow2)
flows.append(flow2)
#print(flows,apis,callee)
dataflow_ngram_scores=get_ngram_scores(flows,apis,callee)
#print('data1',dataflow_ngram_scores)
return dataflow_ngram_scores
def get_api_scores(apis,maxflow,current_dataflow,ft,callee):
dataflow_ngram_score={}
basetypes=['int','str','float','list','dict','set','tuple','buffer','frozenset','complex','bool','unicode','bytes','bytearray']
basetype=''
baseflag=0
for bt in basetypes:
if bt==ft:
#print(bt,api)
basetype=bt
if re.match('List\[.*\]',ft):
#print('list',api)
basetype='list'
ft='list'
elif re.match('Dict\[.*\]',ft):
#print('dict',api)
basetype='dict'
ft='dict'
if basetype!='':
baseflag=1
dataflow_ngram_scores=get_ngram_score(apis,current_dataflow,baseflag,ft,callee)
#print("data",dataflow_ngram_scores)
final_scores={}
tosim_scores={}
for api in apis:
if api.startswith('__') or re.match('[A-Z0-9_]+$',api) or api.strip()=='_':
continue
tosim_scores[api]=get_tosim_score(api,maxflow)
tosim_scores=standard(tosim_scores)
#tosim_scores = sorted(tosim_scores.items(),key = lambda x:x[1],reverse = True)
#print(tosim_scores)
#for k in tosim_scores.keys():
#final_scores[k]=0.5+float(dataflow_ngram_scores[k]+tosim_scores[k])/4.0
dataflow_ngram_scores=sorted(dataflow_ngram_scores.items(), key=lambda x: x[1], reverse=True)
tosim_scores = sorted(tosim_scores.items(),key = lambda x:x[1],reverse = True)
#final_scores= sorted(final_scores.items(),key = lambda x:x[1],reverse = True)
#print(final_scores)
print("NGRAM-SCORE: ",dataflow_ngram_scores[:20])
print("SIMILAR-SCORE: ",tosim_scores[:20])
#print("ADD-SCORE: ",final_scores[:20])
#return final_scores
drank=21
nrank=21
if len(dataflow_ngram_scores)<20:
k=len(dataflow_ngram_scores)
else:
k=20
for i in range(0,k):
if dataflow_ngram_scores[i][0]==callee:
drank=i+1
if tosim_scores[i][0]==callee:
nrank=i+1
print(drank,nrank)
return drank,nrank
def get_dataflow_scores(apis,maxflow,current_dataflow,ft,callee):
dataflow_ngram_score={}
basetypes=['int','str','float','list','dict','set','tuple','buffer','frozenset','complex','bool','unicode','bytes','bytearray']
basetype=''
baseflag=0
for bt in basetypes:
if bt==ft:
#print(bt,api)
basetype=bt
if re.match('List\[.*\]',ft):
#print('list',api)
basetype='list'
ft='list'
elif re.match('Dict\[.*\]',ft):
#print('dict',api)
basetype='dict'
ft='dict'
if basetype!='':
baseflag=1
dataflow_ngram_scores=get_ngram_score(apis,current_dataflow,baseflag,ft,callee)
return dataflow_ngram_scores
def get_tosim_scores(apis,maxflow,current_dataflow,ft,callee):
tosim_scores={}
for api in apis:
if api.startswith('__') or re.match('[A-Z0-9_]+$',api) or api.strip()=='_':
continue
tosim_scores[api]=get_tosim_score(api,maxflow)
#tosim_scores=standard(tosim_scores)
return tosim_scores
| [
"sys.stdout.flush",
"re.compile",
"re.match",
"numpy.exp",
"re.sub",
"re.findall",
"os.system",
"sys.stdout.write"
] | [((728, 755), 're.sub', 're.sub', (['"""return """', '""""""', 'line'], {}), "('return ', '', line)\n", (734, 755), False, 'import re\n'), ((760, 790), 're.sub', 're.sub', (['"""\\\\[\'.*\'\\\\]"""', '""""""', 'line'], {}), '("\\\\[\'.*\'\\\\]", \'\', line)\n', (766, 790), False, 'import re\n'), ((795, 825), 're.sub', 're.sub', (['"""\\\\[".*"\\\\]"""', '""""""', 'line'], {}), '(\'\\\\[".*"\\\\]\', \'\', line)\n', (801, 825), False, 'import re\n'), ((830, 860), 're.sub', 're.sub', (['"""\\\\(\'.*\'\\\\)"""', '""""""', 'line'], {}), '("\\\\(\'.*\'\\\\)", \'\', line)\n', (836, 860), False, 'import re\n'), ((865, 895), 're.sub', 're.sub', (['"""\\\\(".*"\\\\)"""', '""""""', 'line'], {}), '(\'\\\\(".*"\\\\)\', \'\', line)\n', (871, 895), False, 'import re\n'), ((900, 944), 're.sub', 're.sub', (['"""\\\\[[0-9\\\\.\\\\-\\\\s\\\\:]+\\\\]"""', '""""""', 'line'], {}), "('\\\\[[0-9\\\\.\\\\-\\\\s\\\\:]+\\\\]', '', line)\n", (906, 944), False, 'import re\n'), ((943, 987), 're.sub', 're.sub', (['"""\\\\([0-9\\\\.\\\\-\\\\s\\\\:]+\\\\)"""', '""""""', 'line'], {}), "('\\\\([0-9\\\\.\\\\-\\\\s\\\\:]+\\\\)', '', line)\n", (949, 987), False, 'import re\n'), ((986, 1030), 're.sub', 're.sub', (['"""\\\\{[0-9\\\\.\\\\-\\\\s\\\\:]+\\\\}"""', '""""""', 'line'], {}), "('\\\\{[0-9\\\\.\\\\-\\\\s\\\\:]+\\\\}', '', line)\n", (992, 1030), False, 'import re\n'), ((1029, 1068), 're.sub', 're.sub', (['"""\\\\[.*[\\\\+\\\\:]+.*\\\\]"""', '""""""', 'line'], {}), "('\\\\[.*[\\\\+\\\\:]+.*\\\\]', '', line)\n", (1035, 1068), False, 'import re\n'), ((1069, 1096), 're.sub', 're.sub', (['"""\\\\+\\\\="""', '"""="""', 'line'], {}), "('\\\\+\\\\=', '=', line)\n", (1075, 1096), False, 'import re\n'), ((1126, 1159), 're.sub', 're.sub', (['"""r\'.*\'\\\\,*\\\\s*"""', '""""""', 'line'], {}), '("r\'.*\'\\\\,*\\\\s*", \'\', line)\n', (1132, 1159), False, 'import re\n'), ((1164, 1197), 're.sub', 're.sub', (['"""b\'.*\'\\\\,*\\\\s*"""', '""""""', 'line'], {}), '("b\'.*\'\\\\,*\\\\s*", \'\', line)\n', (1170, 1197), False, 'import re\n'), ((1202, 1236), 're.sub', 're.sub', (['"""rb\'.*\'\\\\,*\\\\s*"""', '""""""', 'line'], {}), '("rb\'.*\'\\\\,*\\\\s*", \'\', line)\n', (1208, 1236), False, 'import re\n'), ((1241, 1274), 're.sub', 're.sub', (['"""f\'.*\'\\\\,*\\\\s*"""', '""""""', 'line'], {}), '("f\'.*\'\\\\,*\\\\s*", \'\', line)\n', (1247, 1274), False, 'import re\n'), ((1279, 1311), 're.sub', 're.sub', (['"""\'.*\'\\\\,*\\\\s*"""', '""""""', 'line'], {}), '("\'.*\'\\\\,*\\\\s*", \'\', line)\n', (1285, 1311), False, 'import re\n'), ((1316, 1348), 're.sub', 're.sub', (['"""".*"\\\\,*\\\\s*"""', '""""""', 'line'], {}), '(\'".*"\\\\,*\\\\s*\', \'\', line)\n', (1322, 1348), False, 'import re\n'), ((1353, 1386), 're.sub', 're.sub', (['"""r".*"\\\\,*\\\\s*"""', '""""""', 'line'], {}), '(\'r".*"\\\\,*\\\\s*\', \'\', line)\n', (1359, 1386), False, 'import re\n'), ((1391, 1424), 're.sub', 're.sub', (['"""b".*"\\\\,*\\\\s*"""', '""""""', 'line'], {}), '(\'b".*"\\\\,*\\\\s*\', \'\', line)\n', (1397, 1424), False, 'import re\n'), ((1429, 1463), 're.sub', 're.sub', (['"""rb".*"\\\\,*\\\\s*"""', '""""""', 'line'], {}), '(\'rb".*"\\\\,*\\\\s*\', \'\', line)\n', (1435, 1463), False, 'import re\n'), ((1468, 1501), 're.sub', 're.sub', (['"""f".*"\\\\,*\\\\s*"""', '""""""', 'line'], {}), '(\'f".*"\\\\,*\\\\s*\', \'\', line)\n', (1474, 1501), False, 'import re\n'), ((1506, 1532), 're.sub', 're.sub', (['"""\\\\(\\\\)"""', '""""""', 'line'], {}), "('\\\\(\\\\)', '', line)\n", (1512, 1532), False, 'import re\n'), ((1535, 1561), 're.sub', 're.sub', (['"""\\\\{\\\\}"""', '""""""', 'line'], {}), "('\\\\{\\\\}', '', line)\n", (1541, 1561), False, 'import re\n'), ((1564, 1590), 're.sub', 're.sub', (['"""\\\\[\\\\]"""', '""""""', 'line'], {}), "('\\\\[\\\\]', '', line)\n", (1570, 1590), False, 'import re\n'), ((6008, 6026), 're.sub', 're.sub', (['""" """', '""""""', 'x'], {}), "(' ', '', x)\n", (6014, 6026), False, 'import re\n'), ((6246, 6265), 're.sub', 're.sub', (['""" """', '""""""', 'ty'], {}), "(' ', '', ty)\n", (6252, 6265), False, 'import re\n'), ((6407, 6427), 're.sub', 're.sub', (['""","""', '"""|"""', 'ty'], {}), "(',', '|', ty)\n", (6413, 6427), False, 'import re\n'), ((6506, 6530), 're.sub', 're.sub', (['"""\\\\."""', '"""-->"""', 'ty'], {}), "('\\\\.', '-->', ty)\n", (6512, 6530), False, 'import re\n'), ((8365, 8436), 're.findall', 're.findall', (['"""[_a-zA-Z0-9\\\\.\\\\[\\\\]\'"\\\\(\\\\)\\\\{\\\\}\\\\,\\\\:]+\\\\(.*\\\\)"""', 'line'], {}), '(\'[_a-zA-Z0-9\\\\.\\\\[\\\\]\\\'"\\\\(\\\\)\\\\{\\\\}\\\\,\\\\:]+\\\\(.*\\\\)\', line)\n', (8375, 8436), False, 'import re\n'), ((8511, 8541), 're.sub', 're.sub', (['"""\\\\(\'.*\'\\\\)"""', '""""""', 'call'], {}), '("\\\\(\'.*\'\\\\)", \'\', call)\n', (8517, 8541), False, 'import re\n'), ((8546, 8576), 're.sub', 're.sub', (['"""\\\\(".*"\\\\)"""', '""""""', 'call'], {}), '(\'\\\\(".*"\\\\)\', \'\', call)\n', (8552, 8576), False, 'import re\n'), ((8581, 8611), 're.sub', 're.sub', (['"""\\\\[\'.*\'\\\\]"""', '""""""', 'call'], {}), '("\\\\[\'.*\'\\\\]", \'\', call)\n', (8587, 8611), False, 'import re\n'), ((8616, 8646), 're.sub', 're.sub', (['"""\\\\[".*"\\\\]"""', '""""""', 'call'], {}), '(\'\\\\[".*"\\\\]\', \'\', call)\n', (8622, 8646), False, 'import re\n'), ((8651, 8677), 're.sub', 're.sub', (['"""\\\\(\\\\)"""', '""""""', 'call'], {}), "('\\\\(\\\\)', '', call)\n", (8657, 8677), False, 'import re\n'), ((8680, 8712), 're.sub', 're.sub', (['"""\\\\([0-9]+\\\\)"""', '""""""', 'call'], {}), "('\\\\([0-9]+\\\\)', '', call)\n", (8686, 8712), False, 'import re\n'), ((8715, 8751), 're.sub', 're.sub', (['"""\\\\[[0-9:\\\\-]+\\\\]"""', '""""""', 'call'], {}), "('\\\\[[0-9:\\\\-]+\\\\]', '', call)\n", (8721, 8751), False, 'import re\n'), ((8772, 8793), 're.sub', 're.sub', (['""" """', '""""""', 'call'], {}), "(' ', '', call)\n", (8778, 8793), False, 'import re\n'), ((10671, 10777), 're.findall', 're.findall', (['"""for\\\\s[_a-zA-Z0-9\\\\.\\\\,\\\\s]+\\\\sin\\\\s[_a-zA-Z0-9\\\\,\\\\.\\\\[\\\\]\\\\(\\\\)\\\\{\\\\}\\\\s]+"""', 'line'], {}), "(\n 'for\\\\s[_a-zA-Z0-9\\\\.\\\\,\\\\s]+\\\\sin\\\\s[_a-zA-Z0-9\\\\,\\\\.\\\\[\\\\]\\\\(\\\\)\\\\{\\\\}\\\\s]+'\n , line)\n", (10681, 10777), False, 'import re\n'), ((11337, 11355), 're.sub', 're.sub', (['""" """', '""""""', 'y'], {}), "(' ', '', y)\n", (11343, 11355), False, 'import re\n'), ((11357, 11375), 're.sub', 're.sub', (['""" """', '""""""', 'x'], {}), "(' ', '', x)\n", (11363, 11375), False, 'import re\n'), ((11377, 11400), 're.sub', 're.sub', (['"""\\\\(\\\\)"""', '""""""', 'x'], {}), "('\\\\(\\\\)', '', x)\n", (11383, 11400), False, 'import re\n'), ((11400, 11423), 're.sub', 're.sub', (['"""\\\\(\\\\)"""', '""""""', 'y'], {}), "('\\\\(\\\\)', '', y)\n", (11406, 11423), False, 'import re\n'), ((11423, 11450), 're.sub', 're.sub', (['"""\\\\[\'.*\'\\\\]"""', '""""""', 'y'], {}), '("\\\\[\'.*\'\\\\]", \'\', y)\n', (11429, 11450), False, 'import re\n'), ((11452, 11479), 're.sub', 're.sub', (['"""\\\\[".*"\\\\]"""', '""""""', 'y'], {}), '(\'\\\\[".*"\\\\]\', \'\', y)\n', (11458, 11479), False, 'import re\n'), ((11481, 11508), 're.sub', 're.sub', (['"""\\\\(\'.*\'\\\\)"""', '""""""', 'y'], {}), '("\\\\(\'.*\'\\\\)", \'\', y)\n', (11487, 11508), False, 'import re\n'), ((11510, 11537), 're.sub', 're.sub', (['"""\\\\(".*"\\\\)"""', '""""""', 'y'], {}), '(\'\\\\(".*"\\\\)\', \'\', y)\n', (11516, 11537), False, 'import re\n'), ((11539, 11569), 're.sub', 're.sub', (['"""\\\\[[0-9:]+\\\\]"""', '""""""', 'y'], {}), "('\\\\[[0-9:]+\\\\]', '', y)\n", (11545, 11569), False, 'import re\n'), ((11569, 11599), 're.sub', 're.sub', (['"""\\\\([0-9:]+\\\\)"""', '""""""', 'y'], {}), "('\\\\([0-9:]+\\\\)', '', y)\n", (11575, 11599), False, 'import re\n'), ((11599, 11635), 're.sub', 're.sub', (['"""\\\\[.*[\\\\+\\\\:]+.*\\\\]"""', '""""""', 'y'], {}), "('\\\\[.*[\\\\+\\\\:]+.*\\\\]', '', y)\n", (11605, 11635), False, 'import re\n'), ((11633, 11656), 're.sub', 're.sub', (['"""\\\\+\\\\="""', '""""""', 'y'], {}), "('\\\\+\\\\=', '', y)\n", (11639, 11656), False, 'import re\n'), ((11656, 11681), 're.sub', 're.sub', (['"""r\'.*\'\\\\,"""', '""""""', 'y'], {}), '("r\'.*\'\\\\,", \'\', y)\n', (11662, 11681), False, 'import re\n'), ((11684, 11711), 're.sub', 're.sub', (['"""\\\\[\'.*\'\\\\]"""', '""""""', 'x'], {}), '("\\\\[\'.*\'\\\\]", \'\', x)\n', (11690, 11711), False, 'import re\n'), ((11713, 11740), 're.sub', 're.sub', (['"""\\\\[".*"\\\\]"""', '""""""', 'x'], {}), '(\'\\\\[".*"\\\\]\', \'\', x)\n', (11719, 11740), False, 'import re\n'), ((11742, 11769), 're.sub', 're.sub', (['"""\\\\(\'.*\'\\\\)"""', '""""""', 'x'], {}), '("\\\\(\'.*\'\\\\)", \'\', x)\n', (11748, 11769), False, 'import re\n'), ((11771, 11798), 're.sub', 're.sub', (['"""\\\\(".*"\\\\)"""', '""""""', 'x'], {}), '(\'\\\\(".*"\\\\)\', \'\', x)\n', (11777, 11798), False, 'import re\n'), ((11800, 11830), 're.sub', 're.sub', (['"""\\\\[[0-9:]+\\\\]"""', '""""""', 'x'], {}), "('\\\\[[0-9:]+\\\\]', '', x)\n", (11806, 11830), False, 'import re\n'), ((11830, 11860), 're.sub', 're.sub', (['"""\\\\([0-9:]+\\\\)"""', '""""""', 'x'], {}), "('\\\\([0-9:]+\\\\)', '', x)\n", (11836, 11860), False, 'import re\n'), ((11860, 11896), 're.sub', 're.sub', (['"""\\\\[.*[\\\\+\\\\:]+.*\\\\]"""', '""""""', 'x'], {}), "('\\\\[.*[\\\\+\\\\:]+.*\\\\]', '', x)\n", (11866, 11896), False, 'import re\n'), ((11894, 11917), 're.sub', 're.sub', (['"""\\\\+\\\\="""', '""""""', 'x'], {}), "('\\\\+\\\\=', '', x)\n", (11900, 11917), False, 'import re\n'), ((11917, 11942), 're.sub', 're.sub', (['"""r\'.*\'\\\\,"""', '""""""', 'x'], {}), '("r\'.*\'\\\\,", \'\', x)\n', (11923, 11942), False, 'import re\n'), ((12394, 12417), 're.sub', 're.sub', (['"""\\\\*\\\\*"""', '""""""', 's'], {}), "('\\\\*\\\\*', '', s)\n", (12400, 12417), False, 'import re\n'), ((12417, 12445), 're.sub', 're.sub', (['"""\\\\*args"""', '"""args"""', 's'], {}), "('\\\\*args', 'args', s)\n", (12423, 12445), False, 'import re\n'), ((12446, 12475), 're.sub', 're.sub', (['"""[\\\\+\\\\/\\\\*]"""', '"""|"""', 's'], {}), "('[\\\\+\\\\/\\\\*]', '|', s)\n", (12452, 12475), False, 'import re\n'), ((12474, 12493), 're.sub', 're.sub', (['"""\n"""', '""""""', 's'], {}), "('\\n', '', s)\n", (12480, 12493), False, 'import re\n'), ((13460, 13487), 're.sub', 're.sub', (['"""return """', '""""""', 'line'], {}), "('return ', '', line)\n", (13466, 13487), False, 'import re\n'), ((13492, 13520), 're.sub', 're.sub', (['"""\\\\[.*\\\\]"""', '""""""', 'line'], {}), "('\\\\[.*\\\\]', '', line)\n", (13498, 13520), False, 'import re\n'), ((13523, 13551), 're.sub', 're.sub', (['"""\\\\(.*\\\\)"""', '""""""', 'line'], {}), "('\\\\(.*\\\\)', '', line)\n", (13529, 13551), False, 'import re\n'), ((13554, 13582), 're.sub', 're.sub', (['"""\\\\{.*\\\\}"""', '""""""', 'line'], {}), "('\\\\{.*\\\\}', '', line)\n", (13560, 13582), False, 'import re\n'), ((13585, 13612), 're.sub', 're.sub', (['"""\\\\+\\\\="""', '"""="""', 'line'], {}), "('\\\\+\\\\=', '=', line)\n", (13591, 13612), False, 'import re\n'), ((13642, 13675), 're.sub', 're.sub', (['"""r\'.*\'\\\\,*\\\\s*"""', '""""""', 'line'], {}), '("r\'.*\'\\\\,*\\\\s*", \'\', line)\n', (13648, 13675), False, 'import re\n'), ((13680, 13713), 're.sub', 're.sub', (['"""b\'.*\'\\\\,*\\\\s*"""', '""""""', 'line'], {}), '("b\'.*\'\\\\,*\\\\s*", \'\', line)\n', (13686, 13713), False, 'import re\n'), ((13718, 13752), 're.sub', 're.sub', (['"""rb\'.*\'\\\\,*\\\\s*"""', '""""""', 'line'], {}), '("rb\'.*\'\\\\,*\\\\s*", \'\', line)\n', (13724, 13752), False, 'import re\n'), ((13757, 13790), 're.sub', 're.sub', (['"""f\'.*\'\\\\,*\\\\s*"""', '""""""', 'line'], {}), '("f\'.*\'\\\\,*\\\\s*", \'\', line)\n', (13763, 13790), False, 'import re\n'), ((13795, 13827), 're.sub', 're.sub', (['"""\'.*\'\\\\,*\\\\s*"""', '""""""', 'line'], {}), '("\'.*\'\\\\,*\\\\s*", \'\', line)\n', (13801, 13827), False, 'import re\n'), ((13832, 13864), 're.sub', 're.sub', (['"""".*"\\\\,*\\\\s*"""', '""""""', 'line'], {}), '(\'".*"\\\\,*\\\\s*\', \'\', line)\n', (13838, 13864), False, 'import re\n'), ((13869, 13902), 're.sub', 're.sub', (['"""r".*"\\\\,*\\\\s*"""', '""""""', 'line'], {}), '(\'r".*"\\\\,*\\\\s*\', \'\', line)\n', (13875, 13902), False, 'import re\n'), ((13907, 13940), 're.sub', 're.sub', (['"""b".*"\\\\,*\\\\s*"""', '""""""', 'line'], {}), '(\'b".*"\\\\,*\\\\s*\', \'\', line)\n', (13913, 13940), False, 'import re\n'), ((13945, 13979), 're.sub', 're.sub', (['"""rb".*"\\\\,*\\\\s*"""', '""""""', 'line'], {}), '(\'rb".*"\\\\,*\\\\s*\', \'\', line)\n', (13951, 13979), False, 'import re\n'), ((13984, 14017), 're.sub', 're.sub', (['"""f".*"\\\\,*\\\\s*"""', '""""""', 'line'], {}), '(\'f".*"\\\\,*\\\\s*\', \'\', line)\n', (13990, 14017), False, 'import re\n'), ((29429, 29571), 'os.system', 'os.system', (["(\n 'srilm-1.7.2/lm/bin/i686-m64/ngram -ppl output/test.txt -order 4 -lm trainfile.lm -debug 2 > output/'\n + callee + '.ppl')"], {}), "(\n 'srilm-1.7.2/lm/bin/i686-m64/ngram -ppl output/test.txt -order 4 -lm trainfile.lm -debug 2 > output/'\n + callee + '.ppl')\n", (29438, 29571), False, 'import os\n'), ((30194, 30235), 'os.system', 'os.system', (["('rm output/' + callee + '.ppl')"], {}), "('rm output/' + callee + '.ppl')\n", (30203, 30235), False, 'import os\n'), ((31383, 31411), 're.match', 're.match', (['"""List\\\\[.*\\\\]"""', 'ft'], {}), "('List\\\\[.*\\\\]', ft)\n", (31391, 31411), False, 'import re\n'), ((33154, 33182), 're.match', 're.match', (['"""List\\\\[.*\\\\]"""', 'ft'], {}), "('List\\\\[.*\\\\]', ft)\n", (33162, 33182), False, 'import re\n'), ((6029, 6081), 're.match', 're.match', (['"""\\\\(.*,.*\\\\)\\\\,[a-zA-Z0-9_\'"\\\\(\\\\)|]+"""', 'x'], {}), '(\'\\\\(.*,.*\\\\)\\\\,[a-zA-Z0-9_\\\'"\\\\(\\\\)|]+\', x)\n', (6037, 6081), False, 'import re\n'), ((6080, 6132), 're.match', 're.match', (['"""[a-zA-Z0-9_\'"\\\\(\\\\)|]+\\\\,\\\\(.*,.*\\\\)"""', 'x'], {}), '(\'[a-zA-Z0-9_\\\'"\\\\(\\\\)|]+\\\\,\\\\(.*,.*\\\\)\', x)\n', (6088, 6132), False, 'import re\n'), ((6131, 6171), 're.match', 're.match', (['"""\\\\(.*,.*\\\\)\\\\,\\\\(.*,.*\\\\)"""', 'x'], {}), "('\\\\(.*,.*\\\\)\\\\,\\\\(.*,.*\\\\)', x)\n", (6139, 6171), False, 'import re\n'), ((6171, 6197), 're.sub', 're.sub', (['"""[\\\\(\\\\)]+"""', '""""""', 'x'], {}), "('[\\\\(\\\\)]+', '', x)\n", (6177, 6197), False, 'import re\n'), ((7013, 7032), 're.sub', 're.sub', (['""" """', '""""""', 'ty'], {}), "(' ', '', ty)\n", (7019, 7032), False, 'import re\n'), ((9061, 9088), 're.match', 're.match', (['"""\\\\(.*,.*\\\\)"""', 'ty'], {}), "('\\\\(.*,.*\\\\)', ty)\n", (9069, 9088), False, 'import re\n'), ((13221, 13250), 'sys.stdout.write', 'sys.stdout.write', (['process_bar'], {}), '(process_bar)\n', (13237, 13250), False, 'import sys, time\n'), ((13259, 13277), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13275, 13277), False, 'import sys, time\n'), ((14682, 14703), 're.sub', 're.sub', (['""" """', '""""""', 'line'], {}), "(' ', '', line)\n", (14688, 14703), False, 'import re\n'), ((15000, 15023), 're.sub', 're.sub', (['"""#.*"""', '""""""', 'line'], {}), "('#.*', '', line)\n", (15006, 15023), False, 'import re\n'), ((21410, 21431), 're.sub', 're.sub', (['""" """', '""""""', 'line'], {}), "(' ', '', line)\n", (21416, 21431), False, 'import re\n'), ((21799, 21822), 're.sub', 're.sub', (['"""#.*"""', '""""""', 'line'], {}), "('#.*', '', line)\n", (21805, 21822), False, 'import re\n'), ((31468, 31496), 're.match', 're.match', (['"""Dict\\\\[.*\\\\]"""', 'ft'], {}), "('Dict\\\\[.*\\\\]', ft)\n", (31476, 31496), False, 'import re\n'), ((33239, 33267), 're.match', 're.match', (['"""Dict\\\\[.*\\\\]"""', 'ft'], {}), "('Dict\\\\[.*\\\\]', ft)\n", (33247, 33267), False, 'import re\n'), ((4604, 4653), 're.match', 're.match', (['"""[_a-zA-Z0-9\\\\.\\\\[\\\\]\\\\|]+\\\\(.*\\\\)"""', 'ty'], {}), "('[_a-zA-Z0-9\\\\.\\\\[\\\\]\\\\|]+\\\\(.*\\\\)', ty)\n", (4612, 4653), False, 'import re\n'), ((7073, 7093), 're.sub', 're.sub', (['""","""', '"""|"""', 'ty'], {}), "(',', '|', ty)\n", (7079, 7093), False, 'import re\n'), ((14707, 14735), 're.match', 're.match', (['""".*=\'.*\'.*"""', 'line2'], {}), '(".*=\'.*\'.*", line2)\n', (14715, 14735), False, 'import re\n'), ((14740, 14768), 're.match', 're.match', (['""".*=".*".*"""', 'line2'], {}), '(\'.*=".*".*\', line2)\n', (14748, 14768), False, 'import re\n'), ((14773, 14806), 're.match', 're.match', (['""".*=[0-9\\\\.]+.*"""', 'line2'], {}), "('.*=[0-9\\\\.]+.*', line2)\n", (14781, 14806), False, 'import re\n'), ((14809, 14837), 're.match', 're.match', (['""".*=None.*"""', 'line2'], {}), "('.*=None.*', line2)\n", (14817, 14837), False, 'import re\n'), ((14840, 14868), 're.match', 're.match', (['""".*=True.*"""', 'line2'], {}), "('.*=True.*', line2)\n", (14848, 14868), False, 'import re\n'), ((14871, 14900), 're.match', 're.match', (['""".*=False.*"""', 'line2'], {}), "('.*=False.*', line2)\n", (14879, 14900), False, 'import re\n'), ((15146, 15171), 're.compile', 're.compile', (['""".*[a-zA-Z]$"""'], {}), "('.*[a-zA-Z]$')\n", (15156, 15171), False, 'import re\n'), ((15221, 15361), 're.findall', 're.findall', (['"""[_a-zA-Z0-9\\\\.\\\\[\\\\]"\'\\\\(\\\\)\\\\{\\\\}]+\\\\s*=\\\\s*[_a-zA-Z0-9\\\\.\\\\[\\\\]"\'\\\\(\\\\)\\\\{\\\\}\\\\*\\\\/\\\\-\\\\%\\\\*\\\\,\\\\=\\\\s\\\\+]+"""', 'line'], {}), '(\n \'[_a-zA-Z0-9\\\\.\\\\[\\\\]"\\\'\\\\(\\\\)\\\\{\\\\}]+\\\\s*=\\\\s*[_a-zA-Z0-9\\\\.\\\\[\\\\]"\\\'\\\\(\\\\)\\\\{\\\\}\\\\*\\\\/\\\\-\\\\%\\\\*\\\\,\\\\=\\\\s\\\\+]+\'\n , line)\n', (15231, 15361), False, 'import re\n'), ((15383, 15419), 're.sub', 're.sub', (['"""[\\\\+\\\\-\\\\/\\\\*]+"""', '"""|"""', 'line'], {}), "('[\\\\+\\\\-\\\\/\\\\*]+', '|', line)\n", (15389, 15419), False, 'import re\n'), ((15491, 15514), 're.sub', 're.sub', (['"""\\\\."""', '"""-->"""', 'x'], {}), "('\\\\.', '-->', x)\n", (15497, 15514), False, 'import re\n'), ((15517, 15540), 're.sub', 're.sub', (['"""\\\\."""', '"""-->"""', 'y'], {}), "('\\\\.', '-->', y)\n", (15523, 15540), False, 'import re\n'), ((15576, 15617), 're.findall', 're.findall', (['"""[\\\\(\\\\{\\\\)\\\\}\\\\[\\\\]\'"]"""', 'tf'], {}), '(\'[\\\\(\\\\{\\\\)\\\\}\\\\[\\\\]\\\'"]\', tf)\n', (15586, 15617), False, 'import re\n'), ((15732, 15770), 're.match', 're.match', (['""".*for\\\\s.*\\\\sin\\\\s.*"""', 'line'], {}), "('.*for\\\\s.*\\\\sin\\\\s.*', line)\n", (15740, 15770), False, 'import re\n'), ((21435, 21463), 're.match', 're.match', (['""".*=\'.*\'.*"""', 'line2'], {}), '(".*=\'.*\'.*", line2)\n', (21443, 21463), False, 'import re\n'), ((21468, 21496), 're.match', 're.match', (['""".*=".*".*"""', 'line2'], {}), '(\'.*=".*".*\', line2)\n', (21476, 21496), False, 'import re\n'), ((21501, 21534), 're.match', 're.match', (['""".*=[0-9\\\\.]+.*"""', 'line2'], {}), "('.*=[0-9\\\\.]+.*', line2)\n", (21509, 21534), False, 'import re\n'), ((21537, 21565), 're.match', 're.match', (['""".*=None.*"""', 'line2'], {}), "('.*=None.*', line2)\n", (21545, 21565), False, 'import re\n'), ((21568, 21596), 're.match', 're.match', (['""".*=True.*"""', 'line2'], {}), "('.*=True.*', line2)\n", (21576, 21596), False, 'import re\n'), ((21599, 21628), 're.match', 're.match', (['""".*=False.*"""', 'line2'], {}), "('.*=False.*', line2)\n", (21607, 21628), False, 'import re\n'), ((21631, 21661), 're.match', 're.match', (['""".*=\\\\{\\\\}.*"""', 'line2'], {}), "('.*=\\\\{\\\\}.*', line2)\n", (21639, 21661), False, 'import re\n'), ((21662, 21692), 're.match', 're.match', (['""".*=\\\\(\\\\).*"""', 'line2'], {}), "('.*=\\\\(\\\\).*', line2)\n", (21670, 21692), False, 'import re\n'), ((21693, 21723), 're.match', 're.match', (['""".*=\\\\[\\\\].*"""', 'line2'], {}), "('.*=\\\\[\\\\].*', line2)\n", (21701, 21723), False, 'import re\n'), ((22121, 22261), 're.findall', 're.findall', (['"""[_a-zA-Z0-9\\\\.\\\\[\\\\]"\'\\\\(\\\\)\\\\{\\\\}]+\\\\s*=\\\\s*[_a-zA-Z0-9\\\\.\\\\[\\\\]"\'\\\\(\\\\)\\\\{\\\\}\\\\*\\\\/\\\\-\\\\%\\\\*\\\\,\\\\=\\\\s\\\\+]+"""', 'line'], {}), '(\n \'[_a-zA-Z0-9\\\\.\\\\[\\\\]"\\\'\\\\(\\\\)\\\\{\\\\}]+\\\\s*=\\\\s*[_a-zA-Z0-9\\\\.\\\\[\\\\]"\\\'\\\\(\\\\)\\\\{\\\\}\\\\*\\\\/\\\\-\\\\%\\\\*\\\\,\\\\=\\\\s\\\\+]+\'\n , line)\n', (22131, 22261), False, 'import re\n'), ((23187, 23225), 're.match', 're.match', (['""".*for\\\\s.*\\\\sin\\\\s.*"""', 'line'], {}), "('.*for\\\\s.*\\\\sin\\\\s.*', line)\n", (23195, 23225), False, 'import re\n'), ((31777, 31805), 're.match', 're.match', (['"""[A-Z0-9_]+$"""', 'api'], {}), "('[A-Z0-9_]+$', api)\n", (31785, 31805), False, 'import re\n'), ((33588, 33616), 're.match', 're.match', (['"""[A-Z0-9_]+$"""', 'api'], {}), "('[A-Z0-9_]+$', api)\n", (33596, 33616), False, 'import re\n'), ((4843, 4863), 're.sub', 're.sub', (['""","""', '"""|"""', 'ty'], {}), "(',', '|', ty)\n", (4849, 4863), False, 'import re\n'), ((4962, 5014), 're.match', 're.match', (['"""[_a-zA-Z0-9\\\\.\\\\[\\\\]\\\\|]+\\\\(.*=.*\\\\)"""', 'ty'], {}), "('[_a-zA-Z0-9\\\\.\\\\[\\\\]\\\\|]+\\\\(.*=.*\\\\)', ty)\n", (4970, 5014), False, 'import re\n'), ((7283, 7307), 're.sub', 're.sub', (['"""\\\\."""', '"""-->"""', 'ty'], {}), "('\\\\.', '-->', ty)\n", (7289, 7307), False, 'import re\n'), ((9451, 9526), 're.match', 're.match', (['""".*[a-zA-Z0-9_]+\\\\(.*[a-zA-Z0-9_\'"\\\\(\\\\)\\\\|\\\\-\\\\>\\\\:]+\\\\).*"""', 'ty'], {}), '(\'.*[a-zA-Z0-9_]+\\\\(.*[a-zA-Z0-9_\\\'"\\\\(\\\\)\\\\|\\\\-\\\\>\\\\:]+\\\\).*\', ty)\n', (9459, 9526), False, 'import re\n'), ((14625, 14665), 're.match', 're.match', (['""".*=\\\\s*[0-9\\\\.\\\\:\\\\-]+"""', 'line'], {}), "('.*=\\\\s*[0-9\\\\.\\\\:\\\\-]+', line)\n", (14633, 14665), False, 'import re\n'), ((15996, 16038), 're.findall', 're.findall', (['"""[\\\\(\\\\{\\\\)\\\\}\\\\[\\\\]\'"]"""', 'tff'], {}), '(\'[\\\\(\\\\{\\\\)\\\\}\\\\[\\\\]\\\'"]\', tff)\n', (16006, 16038), False, 'import re\n'), ((22321, 22360), 're.findall', 're.findall', (['"""[\\\\,\\\\-\\\\+\\\\*\\\\/\\\\%]+"""', 'rs'], {}), "('[\\\\,\\\\-\\\\+\\\\*\\\\/\\\\%]+', rs)\n", (22331, 22360), False, 'import re\n'), ((22557, 22583), 're.sub', 're.sub', (['"""\\\\)\\\\,.*$"""', '""""""', 's'], {}), "('\\\\)\\\\,.*$', '', s)\n", (22563, 22583), False, 'import re\n'), ((29912, 29959), 're.findall', 're.findall', (['"""logprob=\\\\s[0-9\\\\-\\\\.]+"""', 'lines[j]'], {}), "('logprob=\\\\s[0-9\\\\-\\\\.]+', lines[j])\n", (29922, 29959), False, 'import re\n'), ((30421, 30449), 're.match', 're.match', (['"""[A-Z0-9_]+$"""', 'api'], {}), "('[A-Z0-9_]+$', api)\n", (30429, 30449), False, 'import re\n'), ((30790, 30822), 're.sub', 're.sub', (['"""unknown_api"""', 'api', 'flow'], {}), "('unknown_api', api, flow)\n", (30796, 30822), False, 'import re\n'), ((30849, 30874), 're.sub', 're.sub', (['"""-->"""', '""" """', 'flow1'], {}), "('-->', ' ', flow1)\n", (30855, 30874), False, 'import re\n'), ((7227, 7260), 're.match', 're.match', (['""".*\\\\(.*\\\\..*\\\\).*"""', 'ty'], {}), "('.*\\\\(.*\\\\..*\\\\).*', ty)\n", (7235, 7260), False, 'import re\n'), ((7415, 7511), 're.match', 're.match', (['""".*[a-zA-Z0-9_]+\\\\(.*[a-zA-Z0-9_\'"\\\\(\\\\)\\\\|\\\\-\\\\>\\\\:\\\\[\\\\]\\\\,\\\\.]+\\\\).*"""', 'ty'], {}), '(\n \'.*[a-zA-Z0-9_]+\\\\(.*[a-zA-Z0-9_\\\'"\\\\(\\\\)\\\\|\\\\-\\\\>\\\\:\\\\[\\\\]\\\\,\\\\.]+\\\\).*\',\n ty)\n', (7423, 7511), False, 'import re\n'), ((7495, 7538), 're.match', 're.match', (['""".*\\\\(.*[a-zA-Z0-9_]+.*\\\\).*"""', 'ty'], {}), "('.*\\\\(.*[a-zA-Z0-9_]+.*\\\\).*', ty)\n", (7503, 7538), False, 'import re\n'), ((7543, 7567), 're.sub', 're.sub', (['"""\\\\(\\\\)"""', '""""""', 'ty'], {}), "('\\\\(\\\\)', '', ty)\n", (7549, 7567), False, 'import re\n'), ((7570, 7600), 're.sub', 're.sub', (['"""\\\\(\\\\[\\\\]\\\\)"""', '""""""', 'ty'], {}), "('\\\\(\\\\[\\\\]\\\\)', '', ty)\n", (7576, 7600), False, 'import re\n'), ((16296, 16369), 're.match', 're.match', (['""".*[_a-zA-Z0-9\\\\.\\\\[\\\\]\'"\\\\(\\\\)\\\\{\\\\}\\\\,\\\\:]+\\\\(.*\\\\).*"""', 'line'], {}), '(\'.*[_a-zA-Z0-9\\\\.\\\\[\\\\]\\\'"\\\\(\\\\)\\\\{\\\\}\\\\,\\\\:]+\\\\(.*\\\\).*\', line)\n', (16304, 16369), False, 'import re\n'), ((16606, 16677), 're.findall', 're.findall', (['"""[_a-zA-Z0-9\\\\.\\\\[\\\\]\'"\\\\(\\\\)\\\\{\\\\}\\\\,\\\\:]+\\\\(.*\\\\)"""', 'line'], {}), '(\'[_a-zA-Z0-9\\\\.\\\\[\\\\]\\\'"\\\\(\\\\)\\\\{\\\\}\\\\,\\\\:]+\\\\(.*\\\\)\', line)\n', (16616, 16677), False, 'import re\n'), ((16769, 16802), 're.sub', 're.sub', (['"""[\\\\+\\\\-\\\\/]+"""', '"""|"""', 'line'], {}), "('[\\\\+\\\\-\\\\/]+', '|', line)\n", (16775, 16802), False, 'import re\n'), ((16835, 16861), 're.sub', 're.sub', (['"""\\\\."""', '"""-->"""', 'line'], {}), "('\\\\.', '-->', line)\n", (16841, 16861), False, 'import re\n'), ((16882, 16924), 're.findall', 're.findall', (['"""[\\\\(\\\\{\\\\)\\\\}\\\\[\\\\]\'"]"""', 'cas'], {}), '(\'[\\\\(\\\\{\\\\)\\\\}\\\\[\\\\]\\\'"]\', cas)\n', (16892, 16924), False, 'import re\n'), ((22805, 22823), 're.sub', 're.sub', (['""" """', '""""""', 's'], {}), "(' ', '', s)\n", (22811, 22823), False, 'import re\n'), ((23615, 23688), 're.match', 're.match', (['""".*[_a-zA-Z0-9\\\\.\\\\[\\\\]\'"\\\\(\\\\)\\\\{\\\\}\\\\,\\\\:]+\\\\(.*\\\\).*"""', 'line'], {}), '(\'.*[_a-zA-Z0-9\\\\.\\\\[\\\\]\\\'"\\\\(\\\\)\\\\{\\\\}\\\\,\\\\:]+\\\\(.*\\\\).*\', line)\n', (23623, 23688), False, 'import re\n'), ((29149, 29164), 'numpy.exp', 'np.exp', (['(-scs[k])'], {}), '(-scs[k])\n', (29155, 29164), True, 'import numpy as np\n'), ((30717, 30745), 're.match', 're.match', (['"""[A-Z0-9_]+$"""', 'api'], {}), "('[A-Z0-9_]+$', api)\n", (30725, 30745), False, 'import re\n'), ((5296, 5342), 're.match', 're.match', (['"""[_a-zA-Z0-9\\\\.\\\\[\\\\]]+\\\\(.*\\\\)"""', 'ty'], {}), "('[_a-zA-Z0-9\\\\.\\\\[\\\\]]+\\\\(.*\\\\)', ty)\n", (5304, 5342), False, 'import re\n'), ((8019, 8043), 're.sub', 're.sub', (['"""\\\\."""', '"""-->"""', 'ty'], {}), "('\\\\.', '-->', ty)\n", (8025, 8043), False, 'import re\n'), ((8151, 8199), 're.match', 're.match', (['"""[a-zA-Z0-9_]+\\\\[[a-zA-Z0-9_]+\\\\]"""', 'ty'], {}), "('[a-zA-Z0-9_]+\\\\[[a-zA-Z0-9_]+\\\\]', ty)\n", (8159, 8199), False, 'import re\n'), ((5471, 5517), 're.match', 're.match', (['"""[_a-zA-Z0-9\\\\.\\\\[\\\\]]+\\\\[.*\\\\]"""', 'ty'], {}), "('[_a-zA-Z0-9\\\\.\\\\[\\\\]]+\\\\[.*\\\\]', ty)\n", (5479, 5517), False, 'import re\n'), ((7606, 7702), 're.match', 're.match', (['""".*[a-zA-Z0-9_]+\\\\(.*[a-zA-Z0-9_\'"\\\\(\\\\)\\\\|\\\\-\\\\>\\\\:\\\\[\\\\]\\\\,\\\\.]+\\\\).*"""', 'ty'], {}), '(\n \'.*[a-zA-Z0-9_]+\\\\(.*[a-zA-Z0-9_\\\'"\\\\(\\\\)\\\\|\\\\-\\\\>\\\\:\\\\[\\\\]\\\\,\\\\.]+\\\\).*\',\n ty)\n', (7614, 7702), False, 'import re\n'), ((7686, 7729), 're.match', 're.match', (['""".*\\\\(.*[a-zA-Z0-9_]+.*\\\\).*"""', 'ty'], {}), "('.*\\\\(.*[a-zA-Z0-9_]+.*\\\\).*', ty)\n", (7694, 7729), False, 'import re\n'), ((9887, 9917), 're.match', 're.match', (['""".*\\\\[\'.*\'\\\\].*"""', 'ty'], {}), '(".*\\\\[\'.*\'\\\\].*", ty)\n', (9895, 9917), False, 'import re\n'), ((9920, 9950), 're.match', 're.match', (['""".*\\\\[".*"\\\\].*"""', 'ty'], {}), '(\'.*\\\\[".*"\\\\].*\', ty)\n', (9928, 9950), False, 'import re\n'), ((9953, 9986), 're.match', 're.match', (['""".*\\\\[[0-9:]+\\\\].*"""', 'ty'], {}), "('.*\\\\[[0-9:]+\\\\].*', ty)\n", (9961, 9986), False, 'import re\n'), ((10014, 10040), 're.sub', 're.sub', (['"""\\\\[.*\\\\]"""', '""""""', 'ty'], {}), "('\\\\[.*\\\\]', '', ty)\n", (10020, 10040), False, 'import re\n'), ((5660, 5684), 're.sub', 're.sub', (['"""\\\\."""', '"""-->"""', 'ty'], {}), "('\\\\.', '-->', ty)\n", (5666, 5684), False, 'import re\n')] |
import os
from statistics import mean
import multiprocessing as mp
import numpy as np
import datetime
from frigate.edgetpu import ObjectDetector, EdgeTPUProcess, RemoteObjectDetector, load_labels
my_frame = np.expand_dims(np.full((300,300,3), 1, np.uint8), axis=0)
labels = load_labels('/labelmap.txt')
######
# Minimal same process runner
######
# object_detector = ObjectDetector()
# tensor_input = np.expand_dims(np.full((300,300,3), 0, np.uint8), axis=0)
# start = datetime.datetime.now().timestamp()
# frame_times = []
# for x in range(0, 1000):
# start_frame = datetime.datetime.now().timestamp()
# tensor_input[:] = my_frame
# detections = object_detector.detect_raw(tensor_input)
# parsed_detections = []
# for d in detections:
# if d[1] < 0.4:
# break
# parsed_detections.append((
# labels[int(d[0])],
# float(d[1]),
# (d[2], d[3], d[4], d[5])
# ))
# frame_times.append(datetime.datetime.now().timestamp()-start_frame)
# duration = datetime.datetime.now().timestamp()-start
# print(f"Processed for {duration:.2f} seconds.")
# print(f"Average frame processing time: {mean(frame_times)*1000:.2f}ms")
######
# Separate process runner
######
def start(id, num_detections, detection_queue):
object_detector = RemoteObjectDetector(str(id), '/labelmap.txt', detection_queue)
start = datetime.datetime.now().timestamp()
frame_times = []
for x in range(0, num_detections):
start_frame = datetime.datetime.now().timestamp()
detections = object_detector.detect(my_frame)
frame_times.append(datetime.datetime.now().timestamp()-start_frame)
duration = datetime.datetime.now().timestamp()-start
print(f"{id} - Processed for {duration:.2f} seconds.")
print(f"{id} - Average frame processing time: {mean(frame_times)*1000:.2f}ms")
edgetpu_process = EdgeTPUProcess()
# start(1, 1000, edgetpu_process.detect_lock, edgetpu_process.detect_ready, edgetpu_process.frame_ready)
####
# Multiple camera processes
####
camera_processes = []
for x in range(0, 10):
camera_process = mp.Process(target=start, args=(x, 100, edgetpu_process.detection_queue))
camera_process.daemon = True
camera_processes.append(camera_process)
start = datetime.datetime.now().timestamp()
for p in camera_processes:
p.start()
for p in camera_processes:
p.join()
duration = datetime.datetime.now().timestamp()-start
print(f"Total - Processed for {duration:.2f} seconds.") | [
"statistics.mean",
"frigate.edgetpu.EdgeTPUProcess",
"multiprocessing.Process",
"datetime.datetime.now",
"numpy.full",
"frigate.edgetpu.load_labels"
] | [((275, 303), 'frigate.edgetpu.load_labels', 'load_labels', (['"""/labelmap.txt"""'], {}), "('/labelmap.txt')\n", (286, 303), False, 'from frigate.edgetpu import ObjectDetector, EdgeTPUProcess, RemoteObjectDetector, load_labels\n'), ((1848, 1864), 'frigate.edgetpu.EdgeTPUProcess', 'EdgeTPUProcess', ([], {}), '()\n', (1862, 1864), False, 'from frigate.edgetpu import ObjectDetector, EdgeTPUProcess, RemoteObjectDetector, load_labels\n'), ((223, 258), 'numpy.full', 'np.full', (['(300, 300, 3)', '(1)', 'np.uint8'], {}), '((300, 300, 3), 1, np.uint8)\n', (230, 258), True, 'import numpy as np\n'), ((2074, 2146), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'start', 'args': '(x, 100, edgetpu_process.detection_queue)'}), '(target=start, args=(x, 100, edgetpu_process.detection_queue))\n', (2084, 2146), True, 'import multiprocessing as mp\n'), ((2229, 2252), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2250, 2252), False, 'import datetime\n'), ((1366, 1389), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1387, 1389), False, 'import datetime\n'), ((2356, 2379), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2377, 2379), False, 'import datetime\n'), ((1477, 1500), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1498, 1500), False, 'import datetime\n'), ((1649, 1672), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1670, 1672), False, 'import datetime\n'), ((1797, 1814), 'statistics.mean', 'mean', (['frame_times'], {}), '(frame_times)\n', (1801, 1814), False, 'from statistics import mean\n'), ((1586, 1609), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1607, 1609), False, 'import datetime\n')] |
import numpy as np
import scipy
import scipy.stats as stats
import torch
from sklearn.metrics import roc_auc_score
from netquery.decoders import BilinearMetapathDecoder, TransEMetapathDecoder, BilinearDiagMetapathDecoder, BilinearBlockDiagMetapathDecoder, BilinearBlockDiagPos2FeatMatMetapathDecoder, SetIntersection, SimpleSetIntersection
from netquery.encoders import *
from netquery.aggregators import MeanAggregator
from netquery.attention import IntersectConcatAttention, IntersectDotProductAttention
# from netquery.graph import _reverse_relation
from netquery.module import *
from netquery.SpatialRelationEncoder import *
import cPickle as pickle
import logging
import random
import time
import math
"""
Misc utility functions..
"""
def detect_cuda_device(device):
if not torch.cuda.is_available():
device = "cpu"
else:
if device == "cpu":
return device
elif "cuda" in device:
if device == "cuda":
print("Using cuda!!!")
elif "cuda:" in device:
cuda_device = int(device.replace("cuda:", ""))
num_cuda = torch.cuda.device_count()
if not (cuda_device < num_cuda and cuda_device >= 0):
raise Exception("The cuda device number {} is not available!!!".format(device))
device = torch.device(device)
return device
def cudify(feature_modules, node_maps=None, device = "cuda"):
'''
Make the features function with cuda mode
Args:
feature_modules: a dict of embedding matrix by node type, each embed matrix shape: [num_ent_by_type + 2, embed_dim]
node_maps: a dict()
key: type, 5 types: function, sideeffects, protein, disease, drug
value: dict():
key: global node id
value: local node id for this type
Return:
features(nodes, mode): a function to make a dict() from node type to pytorch variable tensor for all (local) node id + 1
nodes: a lists of global node id which are in type (mode)
mode: node type
'''
if node_maps is None:
features = lambda nodes, mode : feature_modules[mode](
torch.autograd.Variable(torch.LongTensor(nodes)+1).to(device))
else:
features = lambda nodes, mode : feature_modules[mode](
torch.autograd.Variable(torch.LongTensor([node_maps[mode][n] for n in nodes])+1).to(device))
return features
def _get_perc_scores(scores, lengths):
'''
percentile rank score: Given a query, one positive target cos score p, x negative target, and their cos score [n1, n2, ..., nx],
See the rank of p in [n1, n2, ..., nx]
There are N queries, compute percentiel rank (APR) score for each query
Args:
scores: 1st N corespond to cos score for each positive query-target
scores[N:] correspond to cos score for each negative query-target which append in order, the number is sum(lengths)
lengths: a list of N int, each indicate the negative sample size for this query
Return:
perc_scores: a list of percentile rank score per query, APR are the average of all these score
'''
perc_scores = []
cum_sum = 0
neg_scores = scores[len(lengths):]
for i, length in enumerate(lengths):
# score[i]: the cos score for positive query-target
# neg_scores[cum_sum:cum_sum+length]: the list of cos score for negative query-target
perc_scores.append(stats.percentileofscore(neg_scores[cum_sum:cum_sum+length], scores[i]))
cum_sum += length
return perc_scores
def entity_embeding_lookup(features, node_list, mode_list):
# embeds: [batch_size, 1, embed_dim]
embeds = torch.stack([features([node], mode_list[i]) for i, node in enumerate(node_list)])
# output: [embed_dim, batch_size]
return embeds.squeeze(1).t()
def eval_auc_queries(test_queries, enc_dec, batch_size=1000, hard_negatives=False, seed=0):
'''
Given a list of queries, run enc_dec, compute AUC score with the negative samples and ground truth labels
Args:
test_queries: a dict()
key: formula template
value: the query object
Return:
formula_aucs: a dict():
key: (formula.query_type, formula.rels)
value: AUC for this formula
overall_auc: overall AUC score for all test queries, overall AUC for all queries for a query type
'''
predictions = []
labels = []
formula_aucs = {}
random.seed(seed)
for formula in test_queries:
formula_labels = [] # a list of ground truth labels
formula_predictions = [] # a list of prediction scores
formula_queries = test_queries[formula]
offset = 0
# split the formula_queries intp batches, add collect their ground truth and prediction scores
while offset < len(formula_queries):
max_index = min(offset+batch_size, len(formula_queries))
batch_queries = formula_queries[offset:max_index]
if hard_negatives:
# a list for number of negative sample per query
lengths = [1 for j in range(offset, max_index)]
negatives = [random.choice(formula_queries[j].hard_neg_samples) for j in xrange(offset, max_index)]
else:
lengths = [1 for j in range(offset, max_index)]
negatives = [random.choice(formula_queries[j].neg_samples) for j in xrange(offset, max_index)]
offset += batch_size
formula_labels.extend([1 for _ in xrange(len(lengths))])
formula_labels.extend([0 for _ in xrange(len(negatives))])
batch_scores = enc_dec.forward(formula,
batch_queries+[b for i, b in enumerate(batch_queries) for _ in range(lengths[i])],
[q.target_node for q in batch_queries] + negatives)
batch_scores = batch_scores.data.tolist()
formula_predictions.extend(batch_scores)
formula_key = (formula.query_type, formula.rels)
formula_aucs[formula_key] = roc_auc_score(formula_labels, np.nan_to_num(formula_predictions))
labels.extend(formula_labels)
predictions.extend(formula_predictions)
overall_auc = roc_auc_score(labels, np.nan_to_num(predictions))
return overall_auc, formula_aucs
def eval_auc_queries_spa_sem_lift(test_queries, enc_dec, batch_size=1000, hard_negatives=False, seed=0, do_spa_sem_lift = False):
'''
Given a list of queries, run enc_dec, compute AUC score with the negative samples and ground truth labels
Args:
test_queries: a dict()
key: formula template
value: the query object
Return:
formula_aucs: a dict():
key: (formula.query_type, formula.rels)
value: AUC for this formula
overall_auc: overall AUC score for all test queries, overall AUC for all queries for a query type
'''
predictions = []
labels = []
formula_aucs = {}
random.seed(seed)
for formula in test_queries:
formula_labels = [] # a list of ground truth labels
formula_predictions = [] # a list of prediction scores
formula_queries = test_queries[formula]
offset = 0
# split the formula_queries intp batches, add collect their ground truth and prediction scores
while offset < len(formula_queries):
max_index = min(offset+batch_size, len(formula_queries))
batch_queries = formula_queries[offset:max_index]
if hard_negatives:
# a list for number of negative sample per query
lengths = [1 for j in range(offset, max_index)]
negatives = [random.choice(formula_queries[j].hard_neg_samples) for j in xrange(offset, max_index)]
else:
lengths = [1 for j in range(offset, max_index)]
negatives = [random.choice(formula_queries[j].neg_samples) for j in xrange(offset, max_index)]
offset += batch_size
formula_labels.extend([1 for _ in xrange(len(lengths))])
formula_labels.extend([0 for _ in xrange(len(negatives))])
batch_scores = enc_dec.forward(formula,
batch_queries+[b for i, b in enumerate(batch_queries) for _ in range(lengths[i])],
[q.target_node for q in batch_queries] + negatives,
do_spa_sem_lift = do_spa_sem_lift)
batch_scores = batch_scores.data.tolist()
formula_predictions.extend(batch_scores)
formula_key = (formula.query_type, formula.rels)
formula_aucs[formula_key] = roc_auc_score(formula_labels, np.nan_to_num(formula_predictions))
labels.extend(formula_labels)
predictions.extend(formula_predictions)
overall_auc = roc_auc_score(labels, np.nan_to_num(predictions))
return overall_auc, formula_aucs
def eval_perc_queries(test_queries, enc_dec, batch_size=1000, hard_negatives=False, eval_detail_log = False):
'''
Given a list of queries, run enc_dec, compute average percentiel rank (APR) score with the negative samples and ground truth labels
Args:
test_queries: a dict()
key: formula template
value: the query object
Return:
perc_scores: average percentiel rank (APR) score for all test_queries
the average percentiel rank (APR)
fm2query_prec: a dict()
key: (formula.query_type, formula.rels)
value: a list, each item is [query.serialize(), prec]
query.serialize(): (query_graph, neg_samples, hard_neg_samples)
prec: prec score for current query
'''
if eval_detail_log:
fm2query_prec = {}
perc_scores = []
for formula in test_queries:
formula_queries = test_queries[formula]
if eval_detail_log:
# save the prec score for each query in each formula
formula_key = (formula.query_type, formula.rels)
fm2query_prec[formula_key] = []
offset = 0
while offset < len(formula_queries):
max_index = min(offset+batch_size, len(formula_queries))
batch_queries = formula_queries[offset:max_index]
if hard_negatives:
lengths = [len(formula_queries[j].hard_neg_samples) for j in range(offset, max_index)]
negatives = [n for j in range(offset, max_index) for n in formula_queries[j].hard_neg_samples]
else:
lengths = [len(formula_queries[j].neg_samples) for j in range(offset, max_index)]
negatives = [n for j in range(offset, max_index) for n in formula_queries[j].neg_samples]
offset += batch_size
# the 1st batch_queries is the positive query-target
# the 2nd is the negative query-target
batch_scores = enc_dec.forward(formula,
batch_queries+[b for i, b in enumerate(batch_queries) for _ in range(lengths[i])],
[q.target_node for q in batch_queries] + negatives)
batch_scores = batch_scores.data.tolist()
# batch_perc_scores:
# a list of percentile rank score per query, APR are the average of all these score
batch_perc_scores = _get_perc_scores(batch_scores, lengths)
perc_scores.extend(batch_perc_scores)
if eval_detail_log:
assert len(batch_queries) == len(batch_perc_scores)
for i, prec in enumerate(batch_perc_scores):
query = batch_queries[i]
assert query.query_graph is not None
q_s = query.serialize()
fm2query_prec[formula_key].append([q_s, prec])
if eval_detail_log:
return np.mean(perc_scores), fm2query_prec
else:
return np.mean(perc_scores)
def eval_perc_queries_spa_sem_lift(test_queries, enc_dec, batch_size=1000,
hard_negatives=False, eval_detail_log = False, do_spa_sem_lift = False):
'''
Given a list of queries, run enc_dec, compute average percentiel rank (APR) score with the negative samples and ground truth labels
Args:
test_queries: a dict()
key: formula template
value: the query object
Return:
perc_scores: average percentiel rank (APR) score for all test_queries
the average percentiel rank (APR)
fm2query_prec: a dict()
key: (formula.query_type, formula.rels)
value: a list, each item is [query.serialize(), prec]
query.serialize(): (query_graph, neg_samples, hard_neg_samples)
prec: prec score for current query
'''
if eval_detail_log:
fm2query_prec = {}
perc_scores = []
for formula in test_queries:
formula_queries = test_queries[formula]
if eval_detail_log:
# save the prec score for each query in each formula
formula_key = (formula.query_type, formula.rels)
fm2query_prec[formula_key] = []
offset = 0
while offset < len(formula_queries):
max_index = min(offset+batch_size, len(formula_queries))
batch_queries = formula_queries[offset:max_index]
if hard_negatives:
lengths = [len(formula_queries[j].hard_neg_samples) for j in range(offset, max_index)]
negatives = [n for j in range(offset, max_index) for n in formula_queries[j].hard_neg_samples]
else:
lengths = [len(formula_queries[j].neg_samples) for j in range(offset, max_index)]
negatives = [n for j in range(offset, max_index) for n in formula_queries[j].neg_samples]
offset += batch_size
# the 1st batch_queries is the positive query-target
# the 2nd is the negative query-target
batch_scores = enc_dec.forward(formula,
batch_queries+[b for i, b in enumerate(batch_queries) for _ in range(lengths[i])],
[q.target_node for q in batch_queries] + negatives,
do_spa_sem_lift = do_spa_sem_lift)
batch_scores = batch_scores.data.tolist()
# batch_perc_scores:
# a list of percentile rank score per query, APR are the average of all these score
batch_perc_scores = _get_perc_scores(batch_scores, lengths)
perc_scores.extend(batch_perc_scores)
if eval_detail_log:
assert len(batch_queries) == len(batch_perc_scores)
for i, prec in enumerate(batch_perc_scores):
query = batch_queries[i]
assert query.query_graph is not None
q_s = query.serialize()
fm2query_prec[formula_key].append([q_s, prec])
if eval_detail_log:
return np.mean(perc_scores), fm2query_prec
else:
return np.mean(perc_scores)
def get_pos_encoder(geo_info,
spa_enc_type,
id2geo,
id2extent,
spa_enc,
graph,
spa_enc_embed_norm = True,
device = "cpu"):
if geo_info in ["geo", "proj"]:
pos_enc = PositionEncoder(spa_enc_type, id2geo, spa_enc, graph,
spa_enc_embed_norm = spa_enc_embed_norm, device = device)
elif geo_info in ["projbbox", "projbboxmerge"]:
pos_enc = ExtentPositionEncoder(spa_enc_type, id2geo, id2extent, spa_enc, graph,
spa_enc_embed_norm = spa_enc_embed_norm, device = device)
else:
raise Exception("Unknown geo_info parameters!")
return pos_enc
def get_encoder(depth, graph, out_dims, feature_modules,
geo_info,
spa_enc_type = "no",
spa_enc_embed_norm = True,
id2geo = None,
id2extent = None,
spa_enc = None,
enc_agg_type = "add",
task = "qa",
device = "cpu"):
'''
Construct the GraphSAGE style node embedding encoder
Args:
depth: the depth of the graph node embedding encoder, num of GraphSAGE aggregaters
graph: a Graph() object
out_dims: a dict() from node type to embed_dim
feature_modules: a dict of embedding matrix by node type, each embed matrix shape: [num_ent_by_type + 2, embed_dim]
spa_enc_type: the type of place encoding method
spa_enc_embed_norm: whether to do position embedding normlization is pos_enc
spa_enc: the space encoder
device: cpu or cuda or cuda:0 or cuda:1
Return:
enc: a encoder whose forward(nodes, mode) will return node embedding metrix of shape [embed_dim, num_ent]
'''
if depth < 0 or depth > 3:
raise Exception("Depth must be between 0 and 3 (inclusive)")
if depth == 0:
if graph.features is not None and feature_modules is not None:
# 0 layer, directly embedding lookup
feat_enc = DirectEncoder(graph.features, feature_modules)
else:
feat_enc = None
if spa_enc_type == "no":
pos_enc = None
else:
assert spa_enc is not None
pos_enc = get_pos_encoder(geo_info = geo_info,
spa_enc_type = spa_enc_type,
id2geo = id2geo,
id2extent = id2extent,
spa_enc = spa_enc,
graph = graph,
spa_enc_embed_norm = spa_enc_embed_norm,
device = device)
# pos_enc = PositionEncoder(spa_enc_type, id2geo, spa_enc, graph,
# spa_enc_embed_norm = spa_enc_embed_norm, device = device)
if task == "qa":
enc = NodeEncoder(feat_enc, pos_enc, agg_type = enc_agg_type)
elif task == "spa_sem_lift":
enc = NodeAndLocationEncoder(feat_enc, pos_enc,
out_dims = out_dims, agg_type = enc_agg_type)
# elif spa_enc_type == "simple":
# enc = SimpleSpatialEncoder(graph.features, feature_modules, out_dims, id2geo)
else:
if spa_enc_type != "no":
raise Exception("The place encoding is implemented for depth-0 encoder")
# 1 GraphSAGE mean aggregator
aggregator1 = MeanAggregator(graph.features)
# enc1: a GraphSage Layer, forward() will output [embed_dim, num_ent]
enc1 = Encoder(graph.features,
graph.feature_dims,
out_dims,
graph.relations,
graph.adj_lists, feature_modules=feature_modules,
aggregator=aggregator1,
device = device)
enc = enc1
if depth >= 2:
# 2 GraphSAGE mean aggregator
aggregator2 = MeanAggregator(lambda nodes, mode : enc1(nodes, mode).t().squeeze())
enc2 = Encoder(lambda nodes, mode : enc1(nodes, mode).t().squeeze(),
enc1.out_dims,
out_dims,
graph.relations,
graph.adj_lists, base_model=enc1,
aggregator=aggregator2,
device = device)
enc = enc2
if depth >= 3:
# 3 GraphSAGE mean aggregator
aggregator3 = MeanAggregator(lambda nodes, mode : enc2(nodes, mode).t().squeeze())
enc3 = Encoder(lambda nodes, mode : enc1(nodes, mode).t().squeeze(),
enc2.out_dims,
out_dims,
graph.relations,
graph.adj_lists, base_model=enc2,
aggregator=aggregator3,
device = device)
enc = enc3
return enc
def get_metapath_decoder(graph, out_dims, decoder, feat_dims, spa_embed_dim, enc_agg_type):
'''
The metapath decoder just define the geometric project operator
Args:
graph: a Graph() object
out_dims: a dict() mapping node type -> embed_dim
decoder: a flag for decoder's geometric project operator type
feat_dims: a dict() mapping node type -> feat embed dim
enc_agg_type:
'''
if decoder == "bilinear":
dec = BilinearMetapathDecoder(graph.relations, out_dims)
elif decoder == "transe":
dec = TransEMetapathDecoder(graph.relations, out_dims)
elif decoder == "bilinear-diag":
dec = BilinearDiagMetapathDecoder(graph.relations, out_dims)
elif decoder == "bilinear_blockdiag":
assert enc_agg_type == "concat"
assert feat_dims[list(feat_dims.keys())[0]] > 0 and spa_embed_dim > 0
dec = BilinearBlockDiagMetapathDecoder(graph.relations,
dims = out_dims,
feat_dims = feat_dims,
spa_embed_dim = spa_embed_dim)
elif decoder == "blockdiag_p2fmat":
assert enc_agg_type == "concat"
assert feat_dims[list(feat_dims.keys())[0]] > 0 and spa_embed_dim > 0
dec = BilinearBlockDiagPos2FeatMatMetapathDecoder(graph.relations,
dims = out_dims,
feat_dims = feat_dims,
spa_embed_dim = spa_embed_dim)
else:
raise Exception("Metapath decoder not recognized.")
return dec
def get_intersection_decoder(graph, out_dims, decoder, use_relu = True):
'''
The intersection decoder define the geometric intersection operator
Args:
graph: a Graph() object
out_dims: a dict() mapping node type -> embed_dim
decoder: a flag for decoder's geometric intersection operator type
'''
if decoder == "mean":
dec = SetIntersection(out_dims, out_dims, use_relu = use_relu, use_post_mat = True, agg_func=torch.mean)
elif decoder == "mean_nopostm":
dec = SetIntersection(out_dims, out_dims, use_relu = use_relu, use_post_mat = False, agg_func=torch.mean)
elif decoder == "mean_simple":
dec = SimpleSetIntersection(agg_func=torch.mean)
elif decoder == "min":
dec = SetIntersection(out_dims, out_dims, use_relu = use_relu, use_post_mat = True, agg_func=torch.min)
elif decoder == "min_nopostm":
dec = SetIntersection(out_dims, out_dims, use_relu = use_relu, use_post_mat = False, agg_func=torch.min)
elif decoder == "min_simple":
dec = SimpleSetIntersection(agg_func=torch.min)
else:
raise Exception("Intersection decoder not recognized.")
return dec
def get_intersection_attention(out_dims, inter_decoder_atten_type, inter_decoder_atten_num=0, inter_decoder_atten_act="leakyrelu", inter_decoder_atten_f_act='sigmoid'):
'''
The attention mechinism sit on top of intersection operator
'''
if inter_decoder_atten_num == 0:
return None
else:
if inter_decoder_atten_type == "concat":
attn = IntersectConcatAttention(out_dims, out_dims, inter_decoder_atten_num, activation = inter_decoder_atten_act, f_activation = inter_decoder_atten_f_act, layernorm = False, use_post_mat = False)
elif inter_decoder_atten_type == "concat_norm":
attn = IntersectConcatAttention(out_dims, out_dims, inter_decoder_atten_num, activation = inter_decoder_atten_act, f_activation = inter_decoder_atten_f_act, layernorm = True, use_post_mat = False)
elif inter_decoder_atten_type == "concat_postm":
attn = IntersectConcatAttention(out_dims, out_dims, inter_decoder_atten_num, activation = inter_decoder_atten_act, f_activation = inter_decoder_atten_f_act, layernorm = False, use_post_mat = True)
elif inter_decoder_atten_type == "concat_norm_postm":
attn = IntersectConcatAttention(out_dims, out_dims, inter_decoder_atten_num, activation = inter_decoder_atten_act, f_activation = inter_decoder_atten_f_act, layernorm = True, use_post_mat = True)
elif inter_decoder_atten_type == "dotproduct_scaled":
attn = IntersectDotProductAttention(out_dims, out_dims, inter_decoder_atten_num, dotproduct_scaled = True, layernorm = False, use_post_mat = False)
elif inter_decoder_atten_type == "dotproduct":
attn = IntersectDotProductAttention(out_dims, out_dims, inter_decoder_atten_num, dotproduct_scaled = False, layernorm = False, use_post_mat = False)
elif inter_decoder_atten_type == "dotproduct_scaled_norm":
attn = IntersectDotProductAttention(out_dims, out_dims, inter_decoder_atten_num, dotproduct_scaled = True, layernorm = True, use_post_mat = False)
elif inter_decoder_atten_type == "dotproduct_norm":
attn = IntersectDotProductAttention(out_dims, out_dims, inter_decoder_atten_num, dotproduct_scaled = False, layernorm = True, use_post_mat = False)
elif inter_decoder_atten_type == "dotproduct_scaled_postm":
attn = IntersectDotProductAttention(out_dims, out_dims, inter_decoder_atten_num, dotproduct_scaled = True, layernorm = False, use_post_mat = True)
elif inter_decoder_atten_type == "dotproduct_postm":
attn = IntersectDotProductAttention(out_dims, out_dims, inter_decoder_atten_num, dotproduct_scaled = False, layernorm = False, use_post_mat = True)
elif inter_decoder_atten_type == "dotproduct_scaled_norm_postm":
attn = IntersectDotProductAttention(out_dims, out_dims, inter_decoder_atten_num, dotproduct_scaled = True, layernorm = True, use_post_mat = True)
elif inter_decoder_atten_type == "dotproduct_norm_postm":
attn = IntersectDotProductAttention(out_dims, out_dims, inter_decoder_atten_num, dotproduct_scaled = False, layernorm = True, use_post_mat = True)
else:
raise Exception("intersection attention type not recognized.")
return attn
def setup_logging(log_file, console=True, filemode='w'):
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
filename=log_file,
filemode=filemode)
if console:
console = logging.StreamHandler()
# optional, set the logging level
console.setLevel(logging.INFO)
# set a format which is the same for console use
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
return logging
def sample_entity_by_metapath(graph, batch_size, neighbor_size, iterator):
'''
Args:
graph: Graph() object
batch_size: the maximum number of entities for each mini-batch
neighbor_size: the number of triple templates need to be sampled whose head type is the sampled entity type
iterator:
Return:
mode: a node type
nodes: a set of node ids with the node type mode
neg_nodes: a list of node ids as the negative samples
neighbor_templates: a list of triple templates whose domain type is mode
'''
start = time.time()
# 1. randomly sample a node type
mode = random.choice(graph.flat_adj_lists.keys())
nodes = set()
while len(nodes) == 0:
# 2. sample K=neighbor_size triple template for the given node type
templates = graph.relations[mode]
if len(templates) < neighbor_size:
neighbor_templates = templates
else:
neighbor_templates = random.sample(templates, neighbor_size)
neighbor_templates = [(mode, to_r[1], to_r[0]) for to_r in neighbor_templates]
# 3. get all nodes whose satisfy all these sampled triple templates
nodes_union = set()
for i, rel in enumerate(neighbor_templates):
if i == 0:
nodes = set(graph.adj_lists[rel].keys())
nodes_union = set(graph.adj_lists[rel].keys())
else:
nodes = nodes.intersection(set(graph.adj_lists[rel].keys()))
nodes_union = nodes_union.union(set(graph.adj_lists[rel].keys()))
if len(nodes) == 0:
break
hard_neg_nodes = list(nodes_union - nodes)
# 4. get negative nodes
if len(nodes) > batch_size:
nodes = set(random.sample(list(nodes), batch_size))
neg_nodes = list(graph.full_sets[mode] - nodes)
nodes = list(nodes)
if len(neg_nodes) > len(nodes):
neg_nodes = list(np.random.choice(neg_nodes, size=len(nodes), replace=False))
else:
neg_nodes = list(np.random.choice(neg_nodes, size=len(nodes), replace=True))
# 5. random sample tail node for each triple template
# tail_nodes: [len(neighbor_templates), len(nodes)], ideally [neighbor_size, batch_size]
tail_nodes = []
for i, rel in enumerate(neighbor_templates):
t_nodes = []
for n in nodes:
t_nodes.append(random.choice(list(graph.adj_lists[rel][n])))
tail_nodes.append(t_nodes)
# 6. FOR NOW, get a fake hard negative sampling
if len(hard_neg_nodes) > len(nodes):
hard_neg_nodes = list(np.random.choice(hard_neg_nodes, size=len(nodes), replace=False))
elif len(hard_neg_nodes) == 0:
hard_neg_nodes = neg_nodes
else:
hard_neg_nodes = list(np.random.choice(hard_neg_nodes, size=len(nodes), replace=True))
# 7. reverse the relation in neighbor_templates
neighbor_templates = [graph._reverse_relation(rel) for rel in neighbor_templates]
assert len(nodes) == len(neg_nodes) == len(hard_neg_nodes)
assert len(neighbor_templates) == len(tail_nodes)
print("mode: {}".format(mode))
print(nodes)
print(neg_nodes)
print(hard_neg_nodes)
print(neighbor_templates)
print(tail_nodes)
print("The total time: {}".format(time.time()-start))
return mode, nodes, neg_nodes, hard_neg_nodes, neighbor_templates, tail_nodes
########################
'''
This is for space encoding
'''
def get_ffn(args, input_dim, f_act, context_str = ""):
# print("Create 3 FeedForward NN!!!!!!!!!!")
if args.use_layn == "T":
use_layn = True
else:
use_layn = False
if args.skip_connection == "T":
skip_connection = True
else:
skip_connection = False
# if args.use_post_mat == "T":
# use_post_mat = True
# else:
# use_post_mat = False
return MultiLayerFeedForwardNN(
input_dim=input_dim,
output_dim=args.spa_embed_dim,
num_hidden_layers=args.num_hidden_layer,
dropout_rate=args.dropout,
hidden_dim=args.hidden_dim,
activation=f_act,
use_layernormalize=use_layn,
skip_connection = skip_connection,
context_str = context_str)
# def get_spatial_context():
# extent = (-180, 180, -90, 90)
# return extent
def get_spatial_context(id2geo, geo_info = "geo", percision = 100):
'''
get extent of the input geo-entities
percision: the number we want to get for the extent, 0 means no change
'''
if geo_info == "geo":
return (-180, 180, -90, 90)
elif geo_info == "proj" or geo_info == "projbbox" or geo_info == "projbboxmerge":
iri = list(id2geo.keys())[0]
x_min = id2geo[iri][0]
x_max = id2geo[iri][0]
y_min = id2geo[iri][1]
y_max = id2geo[iri][1]
for iri in id2geo:
if id2geo[iri][0] < x_min:
x_min = id2geo[iri][0]
if id2geo[iri][0] > x_max:
x_max = id2geo[iri][0]
if id2geo[iri][1] < y_min:
y_min = id2geo[iri][1]
if id2geo[iri][1] > y_max:
y_max = id2geo[iri][1]
if percision > 0:
x_min = math.floor(x_min/percision)*percision
x_max = math.ceil(x_max/percision)*percision
y_min = math.floor(y_min/percision)*percision
y_max = math.ceil(y_max/percision)*percision
return (x_min, x_max, y_min, y_max)
else:
raise Exception("geo_info Unknown!")
def get_spa_encoder(args, geo_info, spa_enc_type, id2geo, spa_embed_dim, coord_dim = 2,
anchor_sample_method = "fromid2geo",
num_rbf_anchor_pts = 100,
rbf_kernal_size = 10e2,
frequency_num = 16,
max_radius = 10000,
min_radius = 1,
f_act = "sigmoid",
freq_init = "geometric",
use_postmat = "T",
device = "cpu"):
'''
Args:
args: the argparser Object, the attribute we use
use_layn
skip_connection
spa_embed_dim
num_hidden_layer
dropout
hidden_dim
spa_enc_type: the type of space encoder
id2geo: a dict(): node id -> [longitude, latitude]
spa_embed_dim: the output space embedding
coord_dim:
'''
if args.use_layn == "T":
use_layn = True
else:
use_layn = False
if use_postmat == "T":
use_post_mat = True
else:
use_post_mat = False
if spa_enc_type == "gridcell":
ffn = get_ffn(args,
input_dim=int(4 * frequency_num),
f_act = f_act,
context_str = "GridCellSpatialRelationEncoder")
spa_enc = GridCellSpatialRelationEncoder(
spa_embed_dim,
coord_dim = coord_dim,
frequency_num = frequency_num,
max_radius = max_radius,
min_radius = min_radius,
freq_init = freq_init,
ffn=ffn,
device=device)
elif spa_enc_type == "gridcellnonorm":
ffn = get_ffn(args,
input_dim=int(4 * frequency_num),
f_act = f_act,
context_str = "GridNoNormCellSpatialRelationEncoder")
spa_enc = GridNoNormCellSpatialRelationEncoder(
spa_embed_dim,
coord_dim = coord_dim,
frequency_num = frequency_num,
max_radius = max_radius,
min_radius = min_radius,
freq_init = freq_init,
ffn=ffn,
device=device)
elif spa_enc_type == "hexagridcell":
spa_enc = HexagonGridCellSpatialRelationEncoder(
spa_embed_dim,
coord_dim = coord_dim,
frequency_num = frequency_num,
max_radius = max_radius,
dropout = args.dropout,
f_act= f_act,
device=device)
elif spa_enc_type == "theory":
ffn = get_ffn(args,
input_dim=int(6 * frequency_num),
f_act = f_act,
context_str = "TheoryGridCellSpatialRelationEncoder")
spa_enc = TheoryGridCellSpatialRelationEncoder(
spa_embed_dim,
coord_dim = coord_dim,
frequency_num = frequency_num,
max_radius = max_radius,
min_radius = min_radius,
freq_init = freq_init,
ffn=ffn,
device=device)
elif spa_enc_type == "theorydiag":
spa_enc = TheoryDiagGridCellSpatialRelationEncoder(
spa_embed_dim,
coord_dim = coord_dim,
frequency_num = frequency_num,
max_radius = max_radius,
min_radius = min_radius,
dropout = args.dropout,
f_act= f_act,
freq_init = freq_init,
use_layn = use_layn,
use_post_mat = use_post_mat,
device=device)
elif spa_enc_type == "naive":
extent = get_spatial_context(id2geo, geo_info = geo_info)
ffn = get_ffn(args,
input_dim=2,
f_act = f_act,
context_str = "NaiveSpatialRelationEncoder")
spa_enc = NaiveSpatialRelationEncoder(
spa_embed_dim,
extent = extent,
coord_dim = coord_dim,
ffn = ffn,
device=device)
# elif spa_enc_type == "polar":
# ffn = get_ffn(args,
# input_dim=2,
# f_act = f_act,
# context_str = "PolarCoordSpatialRelationEncoder")
# spa_enc = PolarCoordSpatialRelationEncoder(spa_embed_dim, coord_dim = coord_dim, ffn = ffn)
# elif spa_enc_type == "polardist":
# ffn = get_ffn(args,
# input_dim=1,
# f_act = f_act,
# context_str = "PolarDistCoordSpatialRelationEncoder")
# spa_enc = PolarDistCoordSpatialRelationEncoder(spa_embed_dim, coord_dim = coord_dim, ffn = ffn)
# elif spa_enc_type == "polargrid":
# ffn = get_ffn(args,
# input_dim=int(2 * frequency_num),
# f_act = f_act,
# context_str = "PolarGridCoordSpatialRelationEncoder")
# spa_enc = PolarGridCoordSpatialRelationEncoder(
# spa_embed_dim,
# coord_dim = coord_dim,
# frequency_num = frequency_num,
# max_radius = max_radius,
# min_radius = min_radius,
# freq_init = freq_init,
# ffn=ffn)
elif spa_enc_type == "rbf":
extent = get_spatial_context(id2geo, geo_info = geo_info)
ffn = get_ffn(args,
input_dim=num_rbf_anchor_pts,
f_act = f_act,
context_str = "RBFSpatialRelationEncoder")
spa_enc = RBFSpatialRelationEncoder(
id2geo = id2geo,
spa_embed_dim = spa_embed_dim,
coord_dim = coord_dim,
anchor_sample_method = anchor_sample_method,
num_rbf_anchor_pts = num_rbf_anchor_pts,
rbf_kernal_size = rbf_kernal_size,
rbf_kernal_size_ratio = 0, # we just use 0, because this is only used for global pos enc
extent = extent,
ffn=ffn,
device=device)
# elif spa_enc_type == "distrbf":
# spa_enc = DistRBFSpatialRelationEncoder(
# spa_embed_dim, coord_dim = coord_dim,
# num_rbf_anchor_pts = num_rbf_anchor_pts, rbf_kernal_size = rbf_kernal_size, max_radius = max_radius,
# dropout = dropout, f_act = f_act)
elif spa_enc_type == "gridlookup":
ffn = get_ffn(args,
input_dim=spa_embed_dim,
f_act = f_act,
context_str = "GridLookupSpatialRelationEncoder")
extent = get_spatial_context(id2geo, geo_info = geo_info)
spa_enc = GridLookupSpatialRelationEncoder(
spa_embed_dim,
coord_dim = coord_dim,
interval = min_radius,
extent = extent,
ffn = ffn,
device=device)
elif spa_enc_type == "gridlookupnoffn":
extent = get_spatial_context(id2geo, geo_info = geo_info)
spa_enc = GridLookupSpatialRelationEncoder(
spa_embed_dim,
coord_dim = coord_dim,
interval = min_radius,
extent = extent,
ffn = None,
device=device)
# elif spa_enc_type == "polargridlookup":
# assert model_type == "relative"
# ffn = get_ffn(args,
# input_dim=args.spa_embed_dim,
# f_act = f_act,
# context_str = "PolarGridLookupSpatialRelationEncoder")
# spa_enc = PolarGridLookupSpatialRelationEncoder(
# spa_embed_dim,
# coord_dim = coord_dim,
# max_radius = max_radius,
# frequency_num = frequency_num,
# ffn = ffn)
elif spa_enc_type == "aodha":
extent = get_spatial_context(id2geo, geo_info = geo_info)
spa_enc = AodhaSpatialRelationEncoder(
spa_embed_dim,
extent = extent,
coord_dim = coord_dim,
num_hidden_layers = args.num_hidden_layer,
hidden_dim = args.hidden_dim,
use_post_mat=use_post_mat,
f_act=f_act,
device=device)
elif spa_enc_type == "none":
assert spa_embed_dim == 0
spa_enc = None
else:
raise Exception("Space encoder function no support!")
return spa_enc | [
"logging.getLogger",
"logging.StreamHandler",
"math.floor",
"netquery.decoders.BilinearDiagMetapathDecoder",
"torch.LongTensor",
"torch.cuda.device_count",
"netquery.decoders.BilinearBlockDiagMetapathDecoder",
"torch.cuda.is_available",
"numpy.mean",
"netquery.attention.IntersectDotProductAttentio... | [((4537, 4554), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (4548, 4554), False, 'import random\n'), ((7060, 7077), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (7071, 7077), False, 'import random\n'), ((26140, 26278), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(levelname)s - %(message)s"""', 'filename': 'log_file', 'filemode': 'filemode'}), "(level=logging.INFO, format=\n '%(asctime)s - %(levelname)s - %(message)s', filename=log_file,\n filemode=filemode)\n", (26159, 26278), False, 'import logging\n'), ((27409, 27420), 'time.time', 'time.time', ([], {}), '()\n', (27418, 27420), False, 'import time\n'), ((784, 809), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (807, 809), False, 'import torch\n'), ((6322, 6348), 'numpy.nan_to_num', 'np.nan_to_num', (['predictions'], {}), '(predictions)\n', (6335, 6348), True, 'import numpy as np\n'), ((8900, 8926), 'numpy.nan_to_num', 'np.nan_to_num', (['predictions'], {}), '(predictions)\n', (8913, 8926), True, 'import numpy as np\n'), ((11954, 11974), 'numpy.mean', 'np.mean', (['perc_scores'], {}), '(perc_scores)\n', (11961, 11974), True, 'import numpy as np\n'), ((15065, 15085), 'numpy.mean', 'np.mean', (['perc_scores'], {}), '(perc_scores)\n', (15072, 15085), True, 'import numpy as np\n'), ((18611, 18641), 'netquery.aggregators.MeanAggregator', 'MeanAggregator', (['graph.features'], {}), '(graph.features)\n', (18625, 18641), False, 'from netquery.aggregators import MeanAggregator\n'), ((20561, 20611), 'netquery.decoders.BilinearMetapathDecoder', 'BilinearMetapathDecoder', (['graph.relations', 'out_dims'], {}), '(graph.relations, out_dims)\n', (20584, 20611), False, 'from netquery.decoders import BilinearMetapathDecoder, TransEMetapathDecoder, BilinearDiagMetapathDecoder, BilinearBlockDiagMetapathDecoder, BilinearBlockDiagPos2FeatMatMetapathDecoder, SetIntersection, SimpleSetIntersection\n'), ((21998, 22096), 'netquery.decoders.SetIntersection', 'SetIntersection', (['out_dims', 'out_dims'], {'use_relu': 'use_relu', 'use_post_mat': '(True)', 'agg_func': 'torch.mean'}), '(out_dims, out_dims, use_relu=use_relu, use_post_mat=True,\n agg_func=torch.mean)\n', (22013, 22096), False, 'from netquery.decoders import BilinearMetapathDecoder, TransEMetapathDecoder, BilinearDiagMetapathDecoder, BilinearBlockDiagMetapathDecoder, BilinearBlockDiagPos2FeatMatMetapathDecoder, SetIntersection, SimpleSetIntersection\n'), ((26376, 26399), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (26397, 26399), False, 'import logging\n'), ((26558, 26620), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(levelname)s - %(message)s')\n", (26575, 26620), False, 'import logging\n'), ((3513, 3585), 'scipy.stats.percentileofscore', 'stats.percentileofscore', (['neg_scores[cum_sum:cum_sum + length]', 'scores[i]'], {}), '(neg_scores[cum_sum:cum_sum + length], scores[i])\n', (3536, 3585), True, 'import scipy.stats as stats\n'), ((6160, 6194), 'numpy.nan_to_num', 'np.nan_to_num', (['formula_predictions'], {}), '(formula_predictions)\n', (6173, 6194), True, 'import numpy as np\n'), ((8738, 8772), 'numpy.nan_to_num', 'np.nan_to_num', (['formula_predictions'], {}), '(formula_predictions)\n', (8751, 8772), True, 'import numpy as np\n'), ((11893, 11913), 'numpy.mean', 'np.mean', (['perc_scores'], {}), '(perc_scores)\n', (11900, 11913), True, 'import numpy as np\n'), ((15004, 15024), 'numpy.mean', 'np.mean', (['perc_scores'], {}), '(perc_scores)\n', (15011, 15024), True, 'import numpy as np\n'), ((20656, 20704), 'netquery.decoders.TransEMetapathDecoder', 'TransEMetapathDecoder', (['graph.relations', 'out_dims'], {}), '(graph.relations, out_dims)\n', (20677, 20704), False, 'from netquery.decoders import BilinearMetapathDecoder, TransEMetapathDecoder, BilinearDiagMetapathDecoder, BilinearBlockDiagMetapathDecoder, BilinearBlockDiagPos2FeatMatMetapathDecoder, SetIntersection, SimpleSetIntersection\n'), ((22147, 22246), 'netquery.decoders.SetIntersection', 'SetIntersection', (['out_dims', 'out_dims'], {'use_relu': 'use_relu', 'use_post_mat': '(False)', 'agg_func': 'torch.mean'}), '(out_dims, out_dims, use_relu=use_relu, use_post_mat=False,\n agg_func=torch.mean)\n', (22162, 22246), False, 'from netquery.decoders import BilinearMetapathDecoder, TransEMetapathDecoder, BilinearDiagMetapathDecoder, BilinearBlockDiagMetapathDecoder, BilinearBlockDiagPos2FeatMatMetapathDecoder, SetIntersection, SimpleSetIntersection\n'), ((23190, 23381), 'netquery.attention.IntersectConcatAttention', 'IntersectConcatAttention', (['out_dims', 'out_dims', 'inter_decoder_atten_num'], {'activation': 'inter_decoder_atten_act', 'f_activation': 'inter_decoder_atten_f_act', 'layernorm': '(False)', 'use_post_mat': '(False)'}), '(out_dims, out_dims, inter_decoder_atten_num,\n activation=inter_decoder_atten_act, f_activation=\n inter_decoder_atten_f_act, layernorm=False, use_post_mat=False)\n', (23214, 23381), False, 'from netquery.attention import IntersectConcatAttention, IntersectDotProductAttention\n'), ((27808, 27847), 'random.sample', 'random.sample', (['templates', 'neighbor_size'], {}), '(templates, neighbor_size)\n', (27821, 27847), False, 'import random\n'), ((20756, 20810), 'netquery.decoders.BilinearDiagMetapathDecoder', 'BilinearDiagMetapathDecoder', (['graph.relations', 'out_dims'], {}), '(graph.relations, out_dims)\n', (20783, 20810), False, 'from netquery.decoders import BilinearMetapathDecoder, TransEMetapathDecoder, BilinearDiagMetapathDecoder, BilinearBlockDiagMetapathDecoder, BilinearBlockDiagPos2FeatMatMetapathDecoder, SetIntersection, SimpleSetIntersection\n'), ((22296, 22338), 'netquery.decoders.SimpleSetIntersection', 'SimpleSetIntersection', ([], {'agg_func': 'torch.mean'}), '(agg_func=torch.mean)\n', (22317, 22338), False, 'from netquery.decoders import BilinearMetapathDecoder, TransEMetapathDecoder, BilinearDiagMetapathDecoder, BilinearBlockDiagMetapathDecoder, BilinearBlockDiagPos2FeatMatMetapathDecoder, SetIntersection, SimpleSetIntersection\n'), ((23456, 23646), 'netquery.attention.IntersectConcatAttention', 'IntersectConcatAttention', (['out_dims', 'out_dims', 'inter_decoder_atten_num'], {'activation': 'inter_decoder_atten_act', 'f_activation': 'inter_decoder_atten_f_act', 'layernorm': '(True)', 'use_post_mat': '(False)'}), '(out_dims, out_dims, inter_decoder_atten_num,\n activation=inter_decoder_atten_act, f_activation=\n inter_decoder_atten_f_act, layernorm=True, use_post_mat=False)\n', (23480, 23646), False, 'from netquery.attention import IntersectConcatAttention, IntersectDotProductAttention\n'), ((26760, 26781), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (26777, 26781), False, 'import logging\n'), ((30116, 30127), 'time.time', 'time.time', ([], {}), '()\n', (30125, 30127), False, 'import time\n'), ((5246, 5296), 'random.choice', 'random.choice', (['formula_queries[j].hard_neg_samples'], {}), '(formula_queries[j].hard_neg_samples)\n', (5259, 5296), False, 'import random\n'), ((5444, 5489), 'random.choice', 'random.choice', (['formula_queries[j].neg_samples'], {}), '(formula_queries[j].neg_samples)\n', (5457, 5489), False, 'import random\n'), ((7769, 7819), 'random.choice', 'random.choice', (['formula_queries[j].hard_neg_samples'], {}), '(formula_queries[j].hard_neg_samples)\n', (7782, 7819), False, 'import random\n'), ((7967, 8012), 'random.choice', 'random.choice', (['formula_queries[j].neg_samples'], {}), '(formula_queries[j].neg_samples)\n', (7980, 8012), False, 'import random\n'), ((20985, 21104), 'netquery.decoders.BilinearBlockDiagMetapathDecoder', 'BilinearBlockDiagMetapathDecoder', (['graph.relations'], {'dims': 'out_dims', 'feat_dims': 'feat_dims', 'spa_embed_dim': 'spa_embed_dim'}), '(graph.relations, dims=out_dims, feat_dims=\n feat_dims, spa_embed_dim=spa_embed_dim)\n', (21017, 21104), False, 'from netquery.decoders import BilinearMetapathDecoder, TransEMetapathDecoder, BilinearDiagMetapathDecoder, BilinearBlockDiagMetapathDecoder, BilinearBlockDiagPos2FeatMatMetapathDecoder, SetIntersection, SimpleSetIntersection\n'), ((22380, 22477), 'netquery.decoders.SetIntersection', 'SetIntersection', (['out_dims', 'out_dims'], {'use_relu': 'use_relu', 'use_post_mat': '(True)', 'agg_func': 'torch.min'}), '(out_dims, out_dims, use_relu=use_relu, use_post_mat=True,\n agg_func=torch.min)\n', (22395, 22477), False, 'from netquery.decoders import BilinearMetapathDecoder, TransEMetapathDecoder, BilinearDiagMetapathDecoder, BilinearBlockDiagMetapathDecoder, BilinearBlockDiagPos2FeatMatMetapathDecoder, SetIntersection, SimpleSetIntersection\n'), ((23722, 23912), 'netquery.attention.IntersectConcatAttention', 'IntersectConcatAttention', (['out_dims', 'out_dims', 'inter_decoder_atten_num'], {'activation': 'inter_decoder_atten_act', 'f_activation': 'inter_decoder_atten_f_act', 'layernorm': '(False)', 'use_post_mat': '(True)'}), '(out_dims, out_dims, inter_decoder_atten_num,\n activation=inter_decoder_atten_act, f_activation=\n inter_decoder_atten_f_act, layernorm=False, use_post_mat=True)\n', (23746, 23912), False, 'from netquery.attention import IntersectConcatAttention, IntersectDotProductAttention\n'), ((32267, 32296), 'math.floor', 'math.floor', (['(x_min / percision)'], {}), '(x_min / percision)\n', (32277, 32296), False, 'import math\n'), ((32329, 32357), 'math.ceil', 'math.ceil', (['(x_max / percision)'], {}), '(x_max / percision)\n', (32338, 32357), False, 'import math\n'), ((32390, 32419), 'math.floor', 'math.floor', (['(y_min / percision)'], {}), '(y_min / percision)\n', (32400, 32419), False, 'import math\n'), ((32452, 32480), 'math.ceil', 'math.ceil', (['(y_max / percision)'], {}), '(y_max / percision)\n', (32461, 32480), False, 'import math\n'), ((1127, 1152), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1150, 1152), False, 'import torch\n'), ((1365, 1385), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (1377, 1385), False, 'import torch\n'), ((21341, 21470), 'netquery.decoders.BilinearBlockDiagPos2FeatMatMetapathDecoder', 'BilinearBlockDiagPos2FeatMatMetapathDecoder', (['graph.relations'], {'dims': 'out_dims', 'feat_dims': 'feat_dims', 'spa_embed_dim': 'spa_embed_dim'}), '(graph.relations, dims=out_dims,\n feat_dims=feat_dims, spa_embed_dim=spa_embed_dim)\n', (21384, 21470), False, 'from netquery.decoders import BilinearMetapathDecoder, TransEMetapathDecoder, BilinearDiagMetapathDecoder, BilinearBlockDiagMetapathDecoder, BilinearBlockDiagPos2FeatMatMetapathDecoder, SetIntersection, SimpleSetIntersection\n'), ((22527, 22625), 'netquery.decoders.SetIntersection', 'SetIntersection', (['out_dims', 'out_dims'], {'use_relu': 'use_relu', 'use_post_mat': '(False)', 'agg_func': 'torch.min'}), '(out_dims, out_dims, use_relu=use_relu, use_post_mat=False,\n agg_func=torch.min)\n', (22542, 22625), False, 'from netquery.decoders import BilinearMetapathDecoder, TransEMetapathDecoder, BilinearDiagMetapathDecoder, BilinearBlockDiagMetapathDecoder, BilinearBlockDiagPos2FeatMatMetapathDecoder, SetIntersection, SimpleSetIntersection\n'), ((23993, 24182), 'netquery.attention.IntersectConcatAttention', 'IntersectConcatAttention', (['out_dims', 'out_dims', 'inter_decoder_atten_num'], {'activation': 'inter_decoder_atten_act', 'f_activation': 'inter_decoder_atten_f_act', 'layernorm': '(True)', 'use_post_mat': '(True)'}), '(out_dims, out_dims, inter_decoder_atten_num,\n activation=inter_decoder_atten_act, f_activation=\n inter_decoder_atten_f_act, layernorm=True, use_post_mat=True)\n', (24017, 24182), False, 'from netquery.attention import IntersectConcatAttention, IntersectDotProductAttention\n'), ((22674, 22715), 'netquery.decoders.SimpleSetIntersection', 'SimpleSetIntersection', ([], {'agg_func': 'torch.min'}), '(agg_func=torch.min)\n', (22695, 22715), False, 'from netquery.decoders import BilinearMetapathDecoder, TransEMetapathDecoder, BilinearDiagMetapathDecoder, BilinearBlockDiagMetapathDecoder, BilinearBlockDiagPos2FeatMatMetapathDecoder, SetIntersection, SimpleSetIntersection\n'), ((24263, 24401), 'netquery.attention.IntersectDotProductAttention', 'IntersectDotProductAttention', (['out_dims', 'out_dims', 'inter_decoder_atten_num'], {'dotproduct_scaled': '(True)', 'layernorm': '(False)', 'use_post_mat': '(False)'}), '(out_dims, out_dims, inter_decoder_atten_num,\n dotproduct_scaled=True, layernorm=False, use_post_mat=False)\n', (24291, 24401), False, 'from netquery.attention import IntersectConcatAttention, IntersectDotProductAttention\n'), ((2248, 2271), 'torch.LongTensor', 'torch.LongTensor', (['nodes'], {}), '(nodes)\n', (2264, 2271), False, 'import torch\n'), ((2396, 2449), 'torch.LongTensor', 'torch.LongTensor', (['[node_maps[mode][n] for n in nodes]'], {}), '([node_maps[mode][n] for n in nodes])\n', (2412, 2449), False, 'import torch\n'), ((24478, 24617), 'netquery.attention.IntersectDotProductAttention', 'IntersectDotProductAttention', (['out_dims', 'out_dims', 'inter_decoder_atten_num'], {'dotproduct_scaled': '(False)', 'layernorm': '(False)', 'use_post_mat': '(False)'}), '(out_dims, out_dims, inter_decoder_atten_num,\n dotproduct_scaled=False, layernorm=False, use_post_mat=False)\n', (24506, 24617), False, 'from netquery.attention import IntersectConcatAttention, IntersectDotProductAttention\n'), ((24706, 24843), 'netquery.attention.IntersectDotProductAttention', 'IntersectDotProductAttention', (['out_dims', 'out_dims', 'inter_decoder_atten_num'], {'dotproduct_scaled': '(True)', 'layernorm': '(True)', 'use_post_mat': '(False)'}), '(out_dims, out_dims, inter_decoder_atten_num,\n dotproduct_scaled=True, layernorm=True, use_post_mat=False)\n', (24734, 24843), False, 'from netquery.attention import IntersectConcatAttention, IntersectDotProductAttention\n'), ((24925, 25063), 'netquery.attention.IntersectDotProductAttention', 'IntersectDotProductAttention', (['out_dims', 'out_dims', 'inter_decoder_atten_num'], {'dotproduct_scaled': '(False)', 'layernorm': '(True)', 'use_post_mat': '(False)'}), '(out_dims, out_dims, inter_decoder_atten_num,\n dotproduct_scaled=False, layernorm=True, use_post_mat=False)\n', (24953, 25063), False, 'from netquery.attention import IntersectConcatAttention, IntersectDotProductAttention\n'), ((25153, 25290), 'netquery.attention.IntersectDotProductAttention', 'IntersectDotProductAttention', (['out_dims', 'out_dims', 'inter_decoder_atten_num'], {'dotproduct_scaled': '(True)', 'layernorm': '(False)', 'use_post_mat': '(True)'}), '(out_dims, out_dims, inter_decoder_atten_num,\n dotproduct_scaled=True, layernorm=False, use_post_mat=True)\n', (25181, 25290), False, 'from netquery.attention import IntersectConcatAttention, IntersectDotProductAttention\n'), ((25373, 25511), 'netquery.attention.IntersectDotProductAttention', 'IntersectDotProductAttention', (['out_dims', 'out_dims', 'inter_decoder_atten_num'], {'dotproduct_scaled': '(False)', 'layernorm': '(False)', 'use_post_mat': '(True)'}), '(out_dims, out_dims, inter_decoder_atten_num,\n dotproduct_scaled=False, layernorm=False, use_post_mat=True)\n', (25401, 25511), False, 'from netquery.attention import IntersectConcatAttention, IntersectDotProductAttention\n'), ((25606, 25742), 'netquery.attention.IntersectDotProductAttention', 'IntersectDotProductAttention', (['out_dims', 'out_dims', 'inter_decoder_atten_num'], {'dotproduct_scaled': '(True)', 'layernorm': '(True)', 'use_post_mat': '(True)'}), '(out_dims, out_dims, inter_decoder_atten_num,\n dotproduct_scaled=True, layernorm=True, use_post_mat=True)\n', (25634, 25742), False, 'from netquery.attention import IntersectConcatAttention, IntersectDotProductAttention\n'), ((25830, 25967), 'netquery.attention.IntersectDotProductAttention', 'IntersectDotProductAttention', (['out_dims', 'out_dims', 'inter_decoder_atten_num'], {'dotproduct_scaled': '(False)', 'layernorm': '(True)', 'use_post_mat': '(True)'}), '(out_dims, out_dims, inter_decoder_atten_num,\n dotproduct_scaled=False, layernorm=True, use_post_mat=True)\n', (25858, 25967), False, 'from netquery.attention import IntersectConcatAttention, IntersectDotProductAttention\n')] |
import os
import sys
TRASH = [
'bottle',
'cup',
'fork',
'knife',
'spoon'
'banana',
'apple',
'sandwich',
'orange',
'broccoli',
'carrot',
'hot dog',
'pizza',
'donut',
'cake'
]
YOLO_PATH = os.path.join(sys.path[0], 'yc')
IMAGE_PATH = os.path.join(sys.path[0], 'input.jpg')
DEFAULT_CONFIDENCE = 0.5
DEFAULT_THRESHOLD = 0.3
import numpy as np
import argparse
import time
import cv2
from model_def import load_model
from keras import backend as K
import tensorflow as tf
import numpy as np
from keras.preprocessing.image import ImageDataGenerator, img_to_array, array_to_img, load_img
from slidingBox import boxCoordinates
from PIL import Image,ImageDraw
img_width, img_height = 256, 256
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = load_model(input_shape, "4.h5")
graph = tf.get_default_graph()
labelsPath = os.path.join(YOLO_PATH, 'coco.names')
LABELS = open(labelsPath).read().strip().splitlines()
# random list of colors for class labels
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype='uint8')
weightsPath = os.path.join(YOLO_PATH, 'yolov3.weights')
configPath = os.path.join(YOLO_PATH, 'yolov3.cfg')
# load YOLO data
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
def process_image(image):
(H, W) = image.shape[:2]
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# construct blob from image
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# show timing information on YOLO
print('took {:.6f} seconds'.format(end - start))
boxes = []
confidences = []
classIDs = []
for output in layerOutputs:
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
if confidence < DEFAULT_CONFIDENCE:
continue
# scale the bounding box coordinates back
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype('int')
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weak, overlapping bounding
# boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, DEFAULT_CONFIDENCE, DEFAULT_THRESHOLD)
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
label = LABELS[classIDs[i]]
if label not in TRASH:
continue
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the image
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
text = LABELS[classIDs[i]]
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
0.5, color, 2)
return image
# Take a PIL image
def process_image_keras(image):
width, height = image.size
boxCoords = []
boxSize = width / 5
boxCoords = boxCoordinates(width, height, boxSize)
subImageVals = []
rectangleCoord = []
for i in range(len(boxCoords)):
boxX = boxCoords[i][0]
boxY = boxCoords[i][1]
subImage = image.crop((boxX,boxY ,boxX + boxSize, boxY + boxSize))
subImage = subImage.resize((256, 256), Image.ANTIALIAS)
foundTrash = process_subimage(subImage)
if(foundTrash >= 0.92):
# smallerSubImage = image.crop((boxCoords[i][0]+boxSize*0.15,boxCoords[i][1]+boxSize*0.15,boxCoords[i][0] + boxSize*0.85, boxCoords[i][1] + boxSize*0.85))
# smallerSubImage = smallerSubImage.resize((256, 256), Image.ANTIALIAS)
draw = ImageDraw.Draw(image)
rectangleCoord.append((boxX,boxY))
# val2 = process_subimage(smallerSubImage)
# if (val2 >= 0.5):
subImageVals.append((foundTrash,boxX,boxY))
for j in range(len(rectangleCoord)):
width = 3
for k in range(width):
draw.rectangle(((rectangleCoord[j][0] + k,rectangleCoord[j][1] + k), (rectangleCoord[j][0]+ boxSize - k, rectangleCoord[j][1] + boxSize-k)), outline = 'black')
# open_cv_image = numpy.array(image)
# Convert RGB to BGR
# open_cv_image = open_cv_image[:, :, ::-1].copy()
return image
def process_subimage(image):
with graph.as_default():
x = img_to_array(image) # this is a Numpy array with shape (3, 256, 256)
x = x.reshape((1,) + x.shape)
img_gen = ImageDataGenerator().flow(x)
result = model.predict(x)
# print(result)
return result[0][0]
| [
"cv2.dnn.blobFromImage",
"keras.preprocessing.image.img_to_array",
"cv2.rectangle",
"keras.backend.image_data_format",
"os.path.join",
"numpy.argmax",
"model_def.load_model",
"cv2.putText",
"keras.preprocessing.image.ImageDataGenerator",
"numpy.array",
"PIL.ImageDraw.Draw",
"numpy.random.seed"... | [((251, 282), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""yc"""'], {}), "(sys.path[0], 'yc')\n", (263, 282), False, 'import os\n'), ((296, 334), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""input.jpg"""'], {}), "(sys.path[0], 'input.jpg')\n", (308, 334), False, 'import os\n'), ((905, 936), 'model_def.load_model', 'load_model', (['input_shape', '"""4.h5"""'], {}), "(input_shape, '4.h5')\n", (915, 936), False, 'from model_def import load_model\n'), ((945, 967), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (965, 967), True, 'import tensorflow as tf\n'), ((983, 1020), 'os.path.join', 'os.path.join', (['YOLO_PATH', '"""coco.names"""'], {}), "(YOLO_PATH, 'coco.names')\n", (995, 1020), False, 'import os\n'), ((1117, 1135), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1131, 1135), True, 'import numpy as np\n'), ((1224, 1265), 'os.path.join', 'os.path.join', (['YOLO_PATH', '"""yolov3.weights"""'], {}), "(YOLO_PATH, 'yolov3.weights')\n", (1236, 1265), False, 'import os\n'), ((1279, 1316), 'os.path.join', 'os.path.join', (['YOLO_PATH', '"""yolov3.cfg"""'], {}), "(YOLO_PATH, 'yolov3.cfg')\n", (1291, 1316), False, 'import os\n'), ((1341, 1392), 'cv2.dnn.readNetFromDarknet', 'cv2.dnn.readNetFromDarknet', (['configPath', 'weightsPath'], {}), '(configPath, weightsPath)\n', (1367, 1392), False, 'import cv2\n'), ((757, 778), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (776, 778), True, 'from keras import backend as K\n'), ((1656, 1732), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['image', '(1 / 255.0)', '(416, 416)'], {'swapRB': '(True)', 'crop': '(False)'}), '(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n', (1677, 1732), False, 'import cv2\n'), ((1768, 1779), 'time.time', 'time.time', ([], {}), '()\n', (1777, 1779), False, 'import time\n'), ((1825, 1836), 'time.time', 'time.time', ([], {}), '()\n', (1834, 1836), False, 'import time\n'), ((2865, 2940), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['boxes', 'confidences', 'DEFAULT_CONFIDENCE', 'DEFAULT_THRESHOLD'], {}), '(boxes, confidences, DEFAULT_CONFIDENCE, DEFAULT_THRESHOLD)\n', (2881, 2940), False, 'import cv2\n'), ((3842, 3880), 'slidingBox.boxCoordinates', 'boxCoordinates', (['width', 'height', 'boxSize'], {}), '(width, height, boxSize)\n', (3856, 3880), False, 'from slidingBox import boxCoordinates\n'), ((5202, 5221), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['image'], {}), '(image)\n', (5214, 5221), False, 'from keras.preprocessing.image import ImageDataGenerator, img_to_array, array_to_img, load_img\n'), ((2223, 2240), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (2232, 2240), True, 'import numpy as np\n'), ((3473, 3527), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x, y)', '(x + w, y + h)', 'color', '(2)'], {}), '(image, (x, y), (x + w, y + h), color, 2)\n', (3486, 3527), False, 'import cv2\n'), ((3579, 3656), 'cv2.putText', 'cv2.putText', (['image', 'text', '(x, y - 5)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', 'color', '(2)'], {}), '(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n', (3590, 3656), False, 'import cv2\n'), ((4516, 4537), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (4530, 4537), False, 'from PIL import Image, ImageDraw\n'), ((2446, 2468), 'numpy.array', 'np.array', (['[W, H, W, H]'], {}), '([W, H, W, H])\n', (2454, 2468), True, 'import numpy as np\n'), ((5328, 5348), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '()\n', (5346, 5348), False, 'from keras.preprocessing.image import ImageDataGenerator, img_to_array, array_to_img, load_img\n')] |
"""
Basic Unit testing for helper_functions.py
"""
import random
import pandas as pd
import numpy as np
import pytest
from lambdata import helper_functions
df = pd.DataFrame(
np.random.randint(0, 100, size=(100, 4)),
columns=list('ABCD'))
def test_null_count():
"""
testing null count is zero
"""
wrangled_df = helper_functions.WrangledDataFrame(df)
null_count = wrangled_df.null_count()
assert isinstance(null_count, np.int64)
# def test_null_count_matches():
# wrangled_df = WrangledDataFrame(df)
# with pytest.raises(InsufficientAmount):
# wrangled_df.null_count()
| [
"numpy.random.randint",
"lambdata.helper_functions.WrangledDataFrame"
] | [((182, 222), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {'size': '(100, 4)'}), '(0, 100, size=(100, 4))\n', (199, 222), True, 'import numpy as np\n'), ((339, 377), 'lambdata.helper_functions.WrangledDataFrame', 'helper_functions.WrangledDataFrame', (['df'], {}), '(df)\n', (373, 377), False, 'from lambdata import helper_functions\n')] |
#!/usr/bin/env python
import numpy as np
from pars import Inp_Pars
class Forward_Rates(object):
"""
Description:
------------
For a given fitted financial model, compute a realization of future
IRs (or transformed IRs).
Parameters:
-----------
X_0 : ~float
The current IR (or transformed IR).
fit : ~list of floats
A list containing the fits for the financial model to be used.
model : ~str
Specifies which financial model to use.
random_array : np.array
Array containing a sequence of random numbers to compute future rates.
Notes:
------
The number of steps is implicitly determined by the size of random_array.
Return:
-------
X_forward: An array containing a realization of future rates.
"""
def __init__(self, X_0, fit, model, random_array):
self.X_0 = X_0
self.fit = fit
self.model = model
self.random_array = random_array
self.model_func = None
self.X_forward = None
def retrieve_function(self):
if self.model == 'Brownian':
mu, sigma = self.fit[0], self.fit[1]
step = np.exp((mu - sigma**2./2.)*Inp_Pars.dt
+ sigma*self.random_array)
self.X_forward = [self.X_0] + list(self.X_0 * np.cumprod(step))
if self.model == 'Vasicek':
theta1, theta2, theta3 = self.fit[0], self.fit[1], self.fit[2]
self.X_forward = [self.X_0]
for i in range(len(self.random_array)):
self.X_forward.append(
self.X_forward[i]
+ theta1*(theta2 - self.X_forward[i])*Inp_Pars.dt
+ theta3*self.random_array[i])
def run(self):
self.retrieve_function()
return self.X_forward
| [
"numpy.exp",
"numpy.cumprod"
] | [((1199, 1274), 'numpy.exp', 'np.exp', (['((mu - sigma ** 2.0 / 2.0) * Inp_Pars.dt + sigma * self.random_array)'], {}), '((mu - sigma ** 2.0 / 2.0) * Inp_Pars.dt + sigma * self.random_array)\n', (1205, 1274), True, 'import numpy as np\n'), ((1349, 1365), 'numpy.cumprod', 'np.cumprod', (['step'], {}), '(step)\n', (1359, 1365), True, 'import numpy as np\n')] |
import copy
import pickle
import random
import os
import torch
import torch.nn.functional as F
import torch.distributed as dist
import numpy as np
from torch.autograd import Variable
from gym.spaces import Discrete, Box
import ped_env
import rl
from rl.utils.miscellaneous import str_key
def set_dict(target_dict, value, *args):
if target_dict is None:
return
target_dict[str_key(*args)] = value
def get_dict(target_dict, *args):
#print("key: {}".format(str_key(*args)))
if target_dict is None:
return
return target_dict.get(str_key(*args),0)
def uniform_random_pi(A, s = None, Q = None, a = None):
'''均一随机策略下某行为的概率
'''
n = len(A)
if n == 0:
return 0.0
return 1.0/n
def sample(A):
'''从A中随机选一个
'''
return random.choice(A) # 随机选择A中的一个元素
def greedy_pi(A, s, Q, a):
'''依据贪婪选择,计算在行为空间A中,状态s下,a行为被贪婪选中的几率
考虑多个行为的价值相同的情况
'''
#print("in greedy_pi: s={},a={}".format(s,a))
max_q, a_max_q = -float('inf'), []
for a_opt in A:# 统计后续状态的最大价值以及到达到达该状态的行为(可能不止一个)
q = get_dict(Q, s, a_opt)
#print("get q from dict Q:{}".format(q))
if q > max_q:
max_q = q
a_max_q = [a_opt]
elif q == max_q:
#print("in greedy_pi: {} == {}".format(q,max_q))
a_max_q.append(a_opt)
n = len(a_max_q)
if n == 0: return 0.0
return 1.0/n if a in a_max_q else 0.0
def epsilon_greedy_pi(A, s, Q, a, epsilon = 0.1):
m = len(A)
if m == 0: return 0.0
greedy_p = greedy_pi(A, s, Q, a)
#print("greedy prob:{}".format(greedy_p))
if greedy_p == 0:
return epsilon / m
n = int(1.0/greedy_p)
return (1 - epsilon) * greedy_p + epsilon/m
def back_specified_dimension(space)->int:
if type(space) is Discrete:
return 1
elif type(space) is Box:
ret = 1
for x in space.shape:
ret *= x
return ret
else:
raise Exception("目前只能处理Discete与Box类型的空间!")
# https://github.com/seba-1511/dist_tuto.pth/blob/gh-pages/train_dist.py
def average_gradients(model):
""" Gradient averaging. """
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM, group=0)
param.grad.data /= size
def onehot_from_int(x,action_dim:int):
#小数是为了能够做梯度计算!
return torch.tensor([0.0 if i != x else 1.0 for i in range(action_dim)])
def onehot_from_logits(logits, eps=0.0):
"""
Given batch of logits, return one-hot sample using epsilon greedy strategy
(based on given epsilon)
"""
# get best (according to current policy) actions in one-hot form
argmax_acs = (logits == logits.max(1, keepdim=True)[0]).float()
if eps == 0.0:
return argmax_acs
# get random actions in one-hot form
rand_acs = Variable(torch.eye(logits.shape[1])[[np.random.choice(
range(logits.shape[1]), size=logits.shape[0])]], requires_grad=False)
# chooses between best and random actions using epsilon greedy
return torch.stack([argmax_acs[i] if r > eps else rand_acs[i] for i, r in
enumerate(torch.rand(logits.shape[0]))])
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def sample_gumbel(shape, eps=1e-20, tens_type=torch.FloatTensor):
"""Sample from Gumbel(0, 1)"""
U = Variable(tens_type(*shape).uniform_(), requires_grad=False)
return -torch.log(-torch.log(U + eps) + eps)
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def gumbel_softmax_sample(logits, temperature):
""" Draw a sample from the Gumbel-Softmax distribution"""
y = logits + sample_gumbel(logits.shape, tens_type=type(logits.data))
return F.softmax(y / temperature, dim=1)
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def gumbel_softmax(logits, temperature=1.0, hard=False):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y = gumbel_softmax_sample(logits.cpu(), temperature)
if hard:
y_hard = onehot_from_logits(y)
y = (y_hard - y).detach() + y
return y
def flatten_data(data, dim, device, ifBatch=False):
#将状态数组打平!
if type(data) is list:data = np.array(data)
data = torch.from_numpy(data).float().to(device)
if ifBatch:
batchSize = data.shape[0]
return data.reshape(batchSize,dim)
else:
return data.reshape(dim)
def process_experience_data(trans_pieces, to_tensor = False, device = None):
states_0 = np.vstack([x.s0 for x in trans_pieces])
actions_0 = np.array([x.a0 for x in trans_pieces])
reward_1 = np.array([x.reward for x in trans_pieces])
is_done = np.array([x.is_done for x in trans_pieces])
states_1 = np.vstack(x.s1 for x in trans_pieces)
if to_tensor:
states_0 = torch.from_numpy(states_0).float().to(device)
states_1 = torch.from_numpy(states_1).float().to(device)
actions_0 = torch.from_numpy(actions_0).float().to(device)
reward_1 = torch.from_numpy(reward_1).float()
is_done = torch.from_numpy(is_done)
return states_0,actions_0,reward_1,is_done,states_1
def process_maddpg_experience_data(trans_pieces, state_dims, agent_count, device = None):
s0 = np.array([x.s0 for x in trans_pieces])
a0 = np.array([x.a0 for x in trans_pieces])
r1 = np.array([x.reward for x in trans_pieces])
is_done = np.array([x.is_done for x in trans_pieces])
s1 = np.array([x.s1 for x in trans_pieces])
s0 = [np.stack(s0[:, j], axis=0) for j in range(agent_count)]
s1 = [np.stack(s1[:, j], axis=0) for j in range(agent_count)]
s0_temp_in = [flatten_data(s0[j], state_dims[j], device, ifBatch=True)
for j in range(agent_count)]
s1_temp_in = [flatten_data(s1[j], state_dims[j], device, ifBatch=True)
for j in range(agent_count)]
s0_critic_in = torch.cat([s0_temp_in[j] for j in range(agent_count)], dim=1)
s1_critic_in = torch.cat([s1_temp_in[j] for j in range(agent_count)], dim=1)
a0 = torch.from_numpy(
np.stack([np.concatenate(a0[j, :]) for j in range(a0.shape[0])], axis=0).astype(float)) \
.float().to(device)
r1 = torch.tensor(r1).float().to(device)
return s0_temp_in, a0, r1, is_done, s1_temp_in, s0_critic_in, s1_critic_in
def print_train_string(experience, trans=500):
rewards = []
last_trans = experience.last_n_trans(trans if trans > experience.len else experience.len)
if last_trans is None:
print("trans is none!!!")
return
rewards.append(np.mean([x.reward for x in last_trans]))
print("average rewards in last {} trans:{}".format(trans, rewards))
print("{}".format(experience.__str__()))
def loss_callback(agent, loss):
agent.loss_recoder.append(list(loss))
if len(agent.loss_recoder) > 0: #每一个episode都进行记录
arr = np.array(agent.loss_recoder)
critic_loss_mean = np.mean(arr[-agent.log_frequent:, 0])
actor_loss_mean = np.mean(arr[-agent.log_frequent:, 1])
agent.writer.add_scalar('loss/actor', actor_loss_mean, agent.total_steps_in_train)
agent.writer.add_scalar('loss/critic', critic_loss_mean, agent.total_steps_in_train)
if agent.total_episodes_in_train % agent.log_frequent == 0 \
and len(agent.loss_recoder) > 0:
arr = np.array(agent.loss_recoder)
critic_loss_mean = np.mean(arr[-agent.log_frequent:, 0])
actor_loss_mean = np.mean(arr[-agent.log_frequent:, 1])
print("Critic mean Loss:{},Actor mean Loss:{}"
.format(critic_loss_mean, actor_loss_mean))
def model_based_loss_callback(agent, loss):
agent.loss_recoder.append(list(loss))
if len(agent.loss_recoder) > 0: # 每一个episode都进行记录
arr = np.array(agent.loss_recoder)
critic_loss_mean = np.mean(arr[-agent.log_frequent:, 0])
actor_loss_mean = np.mean(arr[-agent.log_frequent:, 1])
model_loss_mean = np.mean(arr[-agent.log_frequent:, 2])
agent.writer.add_scalar('loss/actor', actor_loss_mean, agent.total_steps_in_train)
agent.writer.add_scalar('loss/critic', critic_loss_mean, agent.total_steps_in_train)
agent.writer.add_scalar('loss/model', model_loss_mean, agent.total_steps_in_train)
if agent.total_episodes_in_train % agent.log_frequent == 0 \
and len(agent.loss_recoder) > 0:
arr = np.array(agent.loss_recoder)
critic_loss_mean = np.mean(arr[-agent.log_frequent:, 0])
actor_loss_mean = np.mean(arr[-agent.log_frequent:, 1])
print("Critic mean Loss:{},Actor mean Loss:{}"
.format(critic_loss_mean, actor_loss_mean))
def save_callback(agent, episode_num: int):
sname = agent.log_dir
if episode_num % (agent.log_frequent) == 0:
print("save network!......")
for i in range(agent.env.agent_count):
agent.save(sname, "Actor{}".format(i), agent.agents[i].actor, episode_num)
agent.save(sname, "Critic{}".format(i), agent.agents[i].critic, episode_num)
#if isinstance(agent, rl.agents.MAMBPOAgent.MAMBPOAgent):
#agent.save_model()
if agent.info_callback_ != None:
agent.info_handler.save(sname)
def early_stop_callback(self, rewards, episode):
if isinstance(self.env, ped_env.envs.PedsMoveEnv) and isinstance(self.env.person_handler, ped_env.classes.PedsRLHandlerWithPlanner):
return min(rewards) > -40#当最小的奖励大于-40时,证明算法已经学到一个好的策略
return False
def info_callback(info, handler, reset=False):
handler.step(info) if not reset else handler.reset(info)
def setup_seed(seed):
# 设置随机数种子函数,用于强化学习的可复现而使用
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def load_experience(file, lock=None):
#带有锁机制的加载经验
def inner_func(file):
file = open(file, "rb")
return pickle.load(file)
if lock:
with lock:
print("带有锁加载机制!")
return inner_func(file)
else:
return inner_func(file)
| [
"torch.from_numpy",
"numpy.array",
"torch.nn.functional.softmax",
"numpy.mean",
"torch.eye",
"numpy.stack",
"numpy.vstack",
"numpy.random.seed",
"numpy.concatenate",
"torch.distributed.get_world_size",
"random.choice",
"pickle.load",
"torch.distributed.all_reduce",
"torch.cuda.manual_seed_... | [((788, 804), 'random.choice', 'random.choice', (['A'], {}), '(A)\n', (801, 804), False, 'import random\n'), ((3819, 3852), 'torch.nn.functional.softmax', 'F.softmax', (['(y / temperature)'], {'dim': '(1)'}), '(y / temperature, dim=1)\n', (3828, 3852), True, 'import torch.nn.functional as F\n'), ((5075, 5114), 'numpy.vstack', 'np.vstack', (['[x.s0 for x in trans_pieces]'], {}), '([x.s0 for x in trans_pieces])\n', (5084, 5114), True, 'import numpy as np\n'), ((5131, 5169), 'numpy.array', 'np.array', (['[x.a0 for x in trans_pieces]'], {}), '([x.a0 for x in trans_pieces])\n', (5139, 5169), True, 'import numpy as np\n'), ((5185, 5227), 'numpy.array', 'np.array', (['[x.reward for x in trans_pieces]'], {}), '([x.reward for x in trans_pieces])\n', (5193, 5227), True, 'import numpy as np\n'), ((5242, 5285), 'numpy.array', 'np.array', (['[x.is_done for x in trans_pieces]'], {}), '([x.is_done for x in trans_pieces])\n', (5250, 5285), True, 'import numpy as np\n'), ((5301, 5338), 'numpy.vstack', 'np.vstack', (['(x.s1 for x in trans_pieces)'], {}), '(x.s1 for x in trans_pieces)\n', (5310, 5338), True, 'import numpy as np\n'), ((5809, 5847), 'numpy.array', 'np.array', (['[x.s0 for x in trans_pieces]'], {}), '([x.s0 for x in trans_pieces])\n', (5817, 5847), True, 'import numpy as np\n'), ((5857, 5895), 'numpy.array', 'np.array', (['[x.a0 for x in trans_pieces]'], {}), '([x.a0 for x in trans_pieces])\n', (5865, 5895), True, 'import numpy as np\n'), ((5905, 5947), 'numpy.array', 'np.array', (['[x.reward for x in trans_pieces]'], {}), '([x.reward for x in trans_pieces])\n', (5913, 5947), True, 'import numpy as np\n'), ((5962, 6005), 'numpy.array', 'np.array', (['[x.is_done for x in trans_pieces]'], {}), '([x.is_done for x in trans_pieces])\n', (5970, 6005), True, 'import numpy as np\n'), ((6015, 6053), 'numpy.array', 'np.array', (['[x.s1 for x in trans_pieces]'], {}), '([x.s1 for x in trans_pieces])\n', (6023, 6053), True, 'import numpy as np\n'), ((10201, 10224), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (10218, 10224), False, 'import torch\n'), ((10229, 10261), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (10255, 10261), False, 'import torch\n'), ((10266, 10286), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (10280, 10286), True, 'import numpy as np\n'), ((10291, 10308), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (10302, 10308), False, 'import random\n'), ((393, 407), 'rl.utils.miscellaneous.str_key', 'str_key', (['*args'], {}), '(*args)\n', (400, 407), False, 'from rl.utils.miscellaneous import str_key\n'), ((567, 581), 'rl.utils.miscellaneous.str_key', 'str_key', (['*args'], {}), '(*args)\n', (574, 581), False, 'from rl.utils.miscellaneous import str_key\n'), ((2142, 2163), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2161, 2163), True, 'import torch.distributed as dist\n'), ((2210, 2274), 'torch.distributed.all_reduce', 'dist.all_reduce', (['param.grad.data'], {'op': 'dist.reduce_op.SUM', 'group': '(0)'}), '(param.grad.data, op=dist.reduce_op.SUM, group=0)\n', (2225, 2274), True, 'import torch.distributed as dist\n'), ((4778, 4792), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (4786, 4792), True, 'import numpy as np\n'), ((5627, 5652), 'torch.from_numpy', 'torch.from_numpy', (['is_done'], {}), '(is_done)\n', (5643, 5652), False, 'import torch\n'), ((6065, 6091), 'numpy.stack', 'np.stack', (['s0[:, j]'], {'axis': '(0)'}), '(s0[:, j], axis=0)\n', (6073, 6091), True, 'import numpy as np\n'), ((6131, 6157), 'numpy.stack', 'np.stack', (['s1[:, j]'], {'axis': '(0)'}), '(s1[:, j], axis=0)\n', (6139, 6157), True, 'import numpy as np\n'), ((7129, 7168), 'numpy.mean', 'np.mean', (['[x.reward for x in last_trans]'], {}), '([x.reward for x in last_trans])\n', (7136, 7168), True, 'import numpy as np\n'), ((7429, 7457), 'numpy.array', 'np.array', (['agent.loss_recoder'], {}), '(agent.loss_recoder)\n', (7437, 7457), True, 'import numpy as np\n'), ((7485, 7522), 'numpy.mean', 'np.mean', (['arr[-agent.log_frequent:, 0]'], {}), '(arr[-agent.log_frequent:, 0])\n', (7492, 7522), True, 'import numpy as np\n'), ((7549, 7586), 'numpy.mean', 'np.mean', (['arr[-agent.log_frequent:, 1]'], {}), '(arr[-agent.log_frequent:, 1])\n', (7556, 7586), True, 'import numpy as np\n'), ((7896, 7924), 'numpy.array', 'np.array', (['agent.loss_recoder'], {}), '(agent.loss_recoder)\n', (7904, 7924), True, 'import numpy as np\n'), ((7952, 7989), 'numpy.mean', 'np.mean', (['arr[-agent.log_frequent:, 0]'], {}), '(arr[-agent.log_frequent:, 0])\n', (7959, 7989), True, 'import numpy as np\n'), ((8016, 8053), 'numpy.mean', 'np.mean', (['arr[-agent.log_frequent:, 1]'], {}), '(arr[-agent.log_frequent:, 1])\n', (8023, 8053), True, 'import numpy as np\n'), ((8323, 8351), 'numpy.array', 'np.array', (['agent.loss_recoder'], {}), '(agent.loss_recoder)\n', (8331, 8351), True, 'import numpy as np\n'), ((8379, 8416), 'numpy.mean', 'np.mean', (['arr[-agent.log_frequent:, 0]'], {}), '(arr[-agent.log_frequent:, 0])\n', (8386, 8416), True, 'import numpy as np\n'), ((8443, 8480), 'numpy.mean', 'np.mean', (['arr[-agent.log_frequent:, 1]'], {}), '(arr[-agent.log_frequent:, 1])\n', (8450, 8480), True, 'import numpy as np\n'), ((8507, 8544), 'numpy.mean', 'np.mean', (['arr[-agent.log_frequent:, 2]'], {}), '(arr[-agent.log_frequent:, 2])\n', (8514, 8544), True, 'import numpy as np\n'), ((8945, 8973), 'numpy.array', 'np.array', (['agent.loss_recoder'], {}), '(agent.loss_recoder)\n', (8953, 8973), True, 'import numpy as np\n'), ((9001, 9038), 'numpy.mean', 'np.mean', (['arr[-agent.log_frequent:, 0]'], {}), '(arr[-agent.log_frequent:, 0])\n', (9008, 9038), True, 'import numpy as np\n'), ((9065, 9102), 'numpy.mean', 'np.mean', (['arr[-agent.log_frequent:, 1]'], {}), '(arr[-agent.log_frequent:, 1])\n', (9072, 9102), True, 'import numpy as np\n'), ((10483, 10500), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (10494, 10500), False, 'import pickle\n'), ((2856, 2882), 'torch.eye', 'torch.eye', (['logits.shape[1]'], {}), '(logits.shape[1])\n', (2865, 2882), False, 'import torch\n'), ((5574, 5600), 'torch.from_numpy', 'torch.from_numpy', (['reward_1'], {}), '(reward_1)\n', (5590, 5600), False, 'import torch\n'), ((3159, 3186), 'torch.rand', 'torch.rand', (['logits.shape[0]'], {}), '(logits.shape[0])\n', (3169, 3186), False, 'import torch\n'), ((3490, 3508), 'torch.log', 'torch.log', (['(U + eps)'], {}), '(U + eps)\n', (3499, 3508), False, 'import torch\n'), ((4804, 4826), 'torch.from_numpy', 'torch.from_numpy', (['data'], {}), '(data)\n', (4820, 4826), False, 'import torch\n'), ((6760, 6776), 'torch.tensor', 'torch.tensor', (['r1'], {}), '(r1)\n', (6772, 6776), False, 'import torch\n'), ((5377, 5403), 'torch.from_numpy', 'torch.from_numpy', (['states_0'], {}), '(states_0)\n', (5393, 5403), False, 'import torch\n'), ((5442, 5468), 'torch.from_numpy', 'torch.from_numpy', (['states_1'], {}), '(states_1)\n', (5458, 5468), False, 'import torch\n'), ((5508, 5535), 'torch.from_numpy', 'torch.from_numpy', (['actions_0'], {}), '(actions_0)\n', (5524, 5535), False, 'import torch\n'), ((6643, 6667), 'numpy.concatenate', 'np.concatenate', (['a0[j, :]'], {}), '(a0[j, :])\n', (6657, 6667), True, 'import numpy as np\n')] |
'''
Created on Jan 8, 2016
@author: <NAME>
'''
import caffe
from fast_rcnn.config import cfg
from roi_data_layer.minibatch import get_minibatch
import numpy as np
import yaml
from multiprocessing import Process, Queue
class PoseLossLayer(caffe.Layer):
"""
Pose loss layer that computes the biternion loss.
"""
def setup(self, bottom, top):
# check input pair
if len(bottom) != 3:
raise Exception("Need two inputs to compute distance.")
self.DEG2RAD_CONST = np.pi/180.0
# Pose weigths cls*n_bins
self.pose_weigths = [0.92, 1.06, 0.98, 1.04, 0.97, 1.07, 1.02, 1.06, \
0.89, 0.99, 0.99, 1.02, 0.74, 1.10, 1.10, 1.10, \
1.00, 1.08, 1.09, 1.01, 0.58, 0.90, 0.93, 0.87, \
0.90, 1.07, 1.04, 1.02, 0.81, 1.10, 1.02, 1.03, \
0.98, 1.07, 1.03, 1.06, 0.91, 1.10, 1.08, 1.03, \
0.95, 1.09, 1.09, 0.99, 0.91, 1.10, 1.09, 1.03]
def reshape(self, bottom, top):
# check input dimensions match
if bottom[0].count != (bottom[1].count*2):
raise Exception("Pose prediction does not match with pose labels dimensions.")
# To save inner products between pred and GT
self.inner_prod = np.zeros( bottom[0].data.shape[0] )
# Hold predicted modules
self.pred_mod = np.zeros( bottom[0].data.shape[0] )
# Hold polar labels
self.pol_labels = np.zeros( (bottom[0].data.shape[0], 2) )
# loss output is scalar
top[0].reshape(1)
def forward(self, bottom, top):
'''
Forward pass:
bottom[0]: predicted tuple (unnormalized)
bottom[1]: pose angle labels (degrees)
bottom[2]: class labels
'''
cls_labels = bottom[2].data.astype(np.int32) # Cast them to integer
# done= False
total_loss = 0
inds = np.where(cls_labels > 0)[0]
for ix in inds:
cls = cls_labels[ix]
# Cast labels into polar cordinates (cos x sin x)
rad_labels = bottom[1].data[ix,cls]*self.DEG2RAD_CONST
polar_labels = np.hstack( (np.cos(rad_labels), np.sin(rad_labels) ) ).reshape((1,2))
polar_pred = bottom[0].data[ix, cls*2:cls*2+2].reshape((2,1))
self.pol_labels[ix] = polar_labels
self.inner_prod[ix] = np.dot(polar_labels,polar_pred)
self.pred_mod[ix] = np.linalg.norm(polar_pred)
loss = 1 - self.inner_prod[ix]/self.pred_mod[ix]
total_loss += loss
# if not done:
# done = True
# print "GT: ", polar_labels
# print "Pred: ", polar_pred.ravel()/self.pred_mod[ix]
top[0].data[...] = total_loss/len(inds)
def backward(self, top, propagate_down, bottom):
# Reset gradients
bottom[0].diff[...] = np.zeros_like(bottom[0].diff)
# Get class labels
cls_labels = bottom[2].data.astype(np.int32) # Cast them to integer
# pose_labels = bottom[1].data # Poses
#
# pose_step = 360.0/4
# last_bin_angle = 360 - pose_step/2.0
inds = np.where(cls_labels > 0)[0]
for ix in inds:
cls = cls_labels[ix]
# First parameter
bottom[0].diff[ix, cls*2] += self.inner_prod[ix]*bottom[0].data[ix, cls*2] / (self.pred_mod[ix]**3) \
-self.pol_labels[ix, 0] / self.pred_mod[ix]
# Second parameter
bottom[0].diff[ix, cls*2 + 1] += self.inner_prod[ix]*bottom[0].data[ix, cls*2+1] / (self.pred_mod[ix]**3) \
-self.pol_labels[ix, 1] / self.pred_mod[ix]
# Weight loss
# wcls_ix = (cls-1)*4
# if pose_labels[ix, cls] > last_bin_angle:
# pose_ix = 0
# else:
# pose_ix = int( abs(pose_labels[ix, cls] - pose_step/2)/pose_step ) # Compute class + pose idx
#
# w = self.pose_weigths[wcls_ix + pose_ix]
# bottom[0].diff[ix, cls*2:cls*2 + 2] *= w
# Normalize output
bottom[0].diff[...] = bottom[0].diff[...] #/ bottom[0].num
class PoseEuclideanLossLayer(caffe.Layer):
"""
Pose loss layer that computes the biternion loss.
"""
def setup(self, bottom, top):
# check input pair
if len(bottom) != 3:
raise Exception("Need two inputs to compute distance.")
self.DEG2RAD_CONST = np.pi/180.0
# Pose weigths cls*n_bins
self.pose_weigths = [0.92, 1.06, 0.98, 1.04, 0.97, 1.07, 1.02, 1.06, \
0.89, 0.99, 0.99, 1.02, 0.74, 1.10, 1.10, 1.10, \
1.00, 1.08, 1.09, 1.01, 0.58, 0.90, 0.93, 0.87, \
0.90, 1.07, 1.04, 1.02, 0.81, 1.10, 1.02, 1.03, \
0.98, 1.07, 1.03, 1.06, 0.91, 1.10, 1.08, 1.03, \
0.95, 1.09, 1.09, 0.99, 0.91, 1.10, 1.09, 1.03]
def reshape(self, bottom, top):
# check input dimensions match
if bottom[0].count != (bottom[1].count*2):
raise Exception("Pose prediction does not match with pose labels dimensions.")
# Hold polar labels
self.pol_labels = np.zeros( (bottom[0].data.shape[0], 2) )
# Init diff
self.diff = np.zeros_like(bottom[0].data, dtype=np.float32)
# Count of fg objets
self.count = 1
# loss output is scalar
top[0].reshape(1)
def forward(self, bottom, top):
'''
Forward pass:
bottom[0]: predicted tuple (unnormalized)
bottom[1]: pose angle labels (degrees)
bottom[2]: class labels
'''
# Init diff
self.diff = np.zeros_like(bottom[0].data, dtype=np.float32)
cls_labels = bottom[2].data.astype(np.int32) # Cast them to integer
done= False
inds = np.where(cls_labels > 0)[0]
self.count = 0
# for ix in range( len(cls_labels) ):
for ix in inds:
cls = cls_labels[ix]
# Cast labels into polar cordinates (cos x sin x)
rad_labels = bottom[1].data[ix,cls]*self.DEG2RAD_CONST
polar_labels = np.hstack( (np.cos(rad_labels), np.sin(rad_labels) ) ).reshape((1,2))
polar_pred = bottom[0].data[ix, cls*2:cls*2+2].reshape((1,2))
# if not done:
# done = True
# print "GT: ", polar_labels
# print "Pred: ", polar_pred
self.count += 1
self.diff[ix, cls:cls+2] = polar_pred - polar_labels
self.count = max(self.count,1)
top[0].data[...] = np.sum(self.diff**2) / self.count / 2.
def backward(self, top, propagate_down, bottom):
# Normalize output
bottom[0].diff[...] = self.diff[...] / self.count | [
"numpy.where",
"numpy.sum",
"numpy.dot",
"numpy.zeros",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"numpy.zeros_like"
] | [((1360, 1393), 'numpy.zeros', 'np.zeros', (['bottom[0].data.shape[0]'], {}), '(bottom[0].data.shape[0])\n', (1368, 1393), True, 'import numpy as np\n'), ((1453, 1486), 'numpy.zeros', 'np.zeros', (['bottom[0].data.shape[0]'], {}), '(bottom[0].data.shape[0])\n', (1461, 1486), True, 'import numpy as np\n'), ((1543, 1581), 'numpy.zeros', 'np.zeros', (['(bottom[0].data.shape[0], 2)'], {}), '((bottom[0].data.shape[0], 2))\n', (1551, 1581), True, 'import numpy as np\n'), ((3067, 3096), 'numpy.zeros_like', 'np.zeros_like', (['bottom[0].diff'], {}), '(bottom[0].diff)\n', (3080, 3096), True, 'import numpy as np\n'), ((5568, 5606), 'numpy.zeros', 'np.zeros', (['(bottom[0].data.shape[0], 2)'], {}), '((bottom[0].data.shape[0], 2))\n', (5576, 5606), True, 'import numpy as np\n'), ((5658, 5705), 'numpy.zeros_like', 'np.zeros_like', (['bottom[0].data'], {'dtype': 'np.float32'}), '(bottom[0].data, dtype=np.float32)\n', (5671, 5705), True, 'import numpy as np\n'), ((6100, 6147), 'numpy.zeros_like', 'np.zeros_like', (['bottom[0].data'], {'dtype': 'np.float32'}), '(bottom[0].data, dtype=np.float32)\n', (6113, 6147), True, 'import numpy as np\n'), ((2016, 2040), 'numpy.where', 'np.where', (['(cls_labels > 0)'], {}), '(cls_labels > 0)\n', (2024, 2040), True, 'import numpy as np\n'), ((2491, 2523), 'numpy.dot', 'np.dot', (['polar_labels', 'polar_pred'], {}), '(polar_labels, polar_pred)\n', (2497, 2523), True, 'import numpy as np\n'), ((2555, 2581), 'numpy.linalg.norm', 'np.linalg.norm', (['polar_pred'], {}), '(polar_pred)\n', (2569, 2581), True, 'import numpy as np\n'), ((3374, 3398), 'numpy.where', 'np.where', (['(cls_labels > 0)'], {}), '(cls_labels > 0)\n', (3382, 3398), True, 'import numpy as np\n'), ((6271, 6295), 'numpy.where', 'np.where', (['(cls_labels > 0)'], {}), '(cls_labels > 0)\n', (6279, 6295), True, 'import numpy as np\n'), ((7066, 7088), 'numpy.sum', 'np.sum', (['(self.diff ** 2)'], {}), '(self.diff ** 2)\n', (7072, 7088), True, 'import numpy as np\n'), ((2269, 2287), 'numpy.cos', 'np.cos', (['rad_labels'], {}), '(rad_labels)\n', (2275, 2287), True, 'import numpy as np\n'), ((2289, 2307), 'numpy.sin', 'np.sin', (['rad_labels'], {}), '(rad_labels)\n', (2295, 2307), True, 'import numpy as np\n'), ((6593, 6611), 'numpy.cos', 'np.cos', (['rad_labels'], {}), '(rad_labels)\n', (6599, 6611), True, 'import numpy as np\n'), ((6613, 6631), 'numpy.sin', 'np.sin', (['rad_labels'], {}), '(rad_labels)\n', (6619, 6631), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import cv2, os, lda
if __name__ == "__main__":
# set parameter for experiment
nTopics = 8
# create folder for saving result
if not os.path.exists("result"):
os.mkdir("result")
# create folder for showing fitting process
if not os.path.exists("visualization"):
os.mkdir("visualization")
# load image files (created by createImage.py)
data = np.zeros((1000,16),dtype=np.uint8)
for i in range(1000):
image = cv2.resize(cv2.imread("image/%d.jpg"%i,0),(4,4),interpolation=cv2.INTER_NEAREST)
data[i,:] = image.reshape((16)).astype(np.uint8)
# apply latent dirichlet allocation
model = lda.LDA()
model.setData(data)
model.solve(nTopics=nTopics)
# show topics obtained
for i in range(nTopics):
topic = model.qPhi[i,:]
topic = topic/topic.max()*255
topic = topic.reshape((4,4))
cv2.imwrite("result/%d.bmp"%i,cv2.resize(topic.astype(np.uint8),(200,200),interpolation=cv2.INTER_NEAREST))
| [
"os.path.exists",
"numpy.zeros",
"os.mkdir",
"lda.LDA",
"cv2.imread"
] | [((401, 437), 'numpy.zeros', 'np.zeros', (['(1000, 16)'], {'dtype': 'np.uint8'}), '((1000, 16), dtype=np.uint8)\n', (409, 437), True, 'import numpy as np\n'), ((651, 660), 'lda.LDA', 'lda.LDA', ([], {}), '()\n', (658, 660), False, 'import cv2, os, lda\n'), ((182, 206), 'os.path.exists', 'os.path.exists', (['"""result"""'], {}), "('result')\n", (196, 206), False, 'import cv2, os, lda\n'), ((210, 228), 'os.mkdir', 'os.mkdir', (['"""result"""'], {}), "('result')\n", (218, 228), False, 'import cv2, os, lda\n'), ((283, 314), 'os.path.exists', 'os.path.exists', (['"""visualization"""'], {}), "('visualization')\n", (297, 314), False, 'import cv2, os, lda\n'), ((318, 343), 'os.mkdir', 'os.mkdir', (['"""visualization"""'], {}), "('visualization')\n", (326, 343), False, 'import cv2, os, lda\n'), ((481, 514), 'cv2.imread', 'cv2.imread', (["('image/%d.jpg' % i)", '(0)'], {}), "('image/%d.jpg' % i, 0)\n", (491, 514), False, 'import cv2, os, lda\n')] |
# -*- coding: utf-8 -*-
""" This is the script to generate a Circle dataset. Credits to
https://github.com/hyounesy/TFPlaygroundPSA/blob/master/src/dataset.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import random
import os
import matplotlib
matplotlib.use('Agg') # needed to avoid cloud errors
import matplotlib.pyplot as plt
def data_circle(num_samples, noise):
"""
Generates the two circles dataset with the given number of samples and noise
:param num_samples: total number of samples
:param noise: noise percentage (0 .. 50)
:return: None
https://github.com/hyounesy/TFPlaygroundPSA/blob/master/src/dataset.py
"""
radius = 5
def get_circle_label(x, y, xc, yc):
return 1 if np.sqrt((x - xc) ** 2 + (y - yc) ** 2) < (
radius * 0.5) else 0
noise *= 0.01
points = np.zeros([num_samples, 2])
labels = np.zeros(num_samples).astype(int)
# Generate positive points inside the circle.
for i in range(num_samples // 2):
r = random.uniform(0, radius * 0.5)
angle = random.uniform(0, 2 * np.pi)
x = r * np.sin(angle)
y = r * np.cos(angle)
noise_x = random.uniform(-radius, radius) * noise
noise_y = random.uniform(-radius, radius) * noise
labels[i] = get_circle_label(x + noise_x, y + noise_y, 0, 0)
points[i] = (x, y)
# Generate negative points outside the circle.
for i in range(num_samples // 2, num_samples):
r = random.uniform(radius * 0.7, radius)
angle = random.uniform(0, 2 * np.pi)
x = r * np.sin(angle)
y = r * np.cos(angle)
noise_x = random.uniform(-radius, radius) * noise
noise_y = random.uniform(-radius, radius) * noise
labels[i] = get_circle_label(x + noise_x, y + noise_y, 0, 0)
points[i] = (x, y)
return points, labels
if __name__ == '__main__':
if os.path.exists('./data'):
print('Files available.')
points = np.load('./data/points.npy')
labels = np.load('./data/labels.npy')
else:
os.mkdir('./data')
points, labels = data_circle(1000, 10)
np.save('./data/points.npy', points)
np.save('./data/labels.npy', labels)
plt.scatter(points[labels == 0, 0], points[labels == 0, 1])
plt.scatter(points[labels == 1, 0], points[labels == 1, 1])
plt.savefig('./plot.png')
| [
"os.path.exists",
"random.uniform",
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"matplotlib.use",
"numpy.zeros",
"os.mkdir",
"matplotlib.pyplot.scatter",
"numpy.cos",
"numpy.sin",
"numpy.load",
"numpy.save"
] | [((336, 357), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (350, 357), False, 'import matplotlib\n'), ((937, 963), 'numpy.zeros', 'np.zeros', (['[num_samples, 2]'], {}), '([num_samples, 2])\n', (945, 963), True, 'import numpy as np\n'), ((1992, 2016), 'os.path.exists', 'os.path.exists', (['"""./data"""'], {}), "('./data')\n", (2006, 2016), False, 'import os\n'), ((2324, 2383), 'matplotlib.pyplot.scatter', 'plt.scatter', (['points[labels == 0, 0]', 'points[labels == 0, 1]'], {}), '(points[labels == 0, 0], points[labels == 0, 1])\n', (2335, 2383), True, 'import matplotlib.pyplot as plt\n'), ((2388, 2447), 'matplotlib.pyplot.scatter', 'plt.scatter', (['points[labels == 1, 0]', 'points[labels == 1, 1]'], {}), '(points[labels == 1, 0], points[labels == 1, 1])\n', (2399, 2447), True, 'import matplotlib.pyplot as plt\n'), ((2452, 2477), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./plot.png"""'], {}), "('./plot.png')\n", (2463, 2477), True, 'import matplotlib.pyplot as plt\n'), ((1111, 1142), 'random.uniform', 'random.uniform', (['(0)', '(radius * 0.5)'], {}), '(0, radius * 0.5)\n', (1125, 1142), False, 'import random\n'), ((1159, 1187), 'random.uniform', 'random.uniform', (['(0)', '(2 * np.pi)'], {}), '(0, 2 * np.pi)\n', (1173, 1187), False, 'import random\n'), ((1574, 1610), 'random.uniform', 'random.uniform', (['(radius * 0.7)', 'radius'], {}), '(radius * 0.7, radius)\n', (1588, 1610), False, 'import random\n'), ((1627, 1655), 'random.uniform', 'random.uniform', (['(0)', '(2 * np.pi)'], {}), '(0, 2 * np.pi)\n', (1641, 1655), False, 'import random\n'), ((2069, 2097), 'numpy.load', 'np.load', (['"""./data/points.npy"""'], {}), "('./data/points.npy')\n", (2076, 2097), True, 'import numpy as np\n'), ((2115, 2143), 'numpy.load', 'np.load', (['"""./data/labels.npy"""'], {}), "('./data/labels.npy')\n", (2122, 2143), True, 'import numpy as np\n'), ((2162, 2180), 'os.mkdir', 'os.mkdir', (['"""./data"""'], {}), "('./data')\n", (2170, 2180), False, 'import os\n'), ((2237, 2273), 'numpy.save', 'np.save', (['"""./data/points.npy"""', 'points'], {}), "('./data/points.npy', points)\n", (2244, 2273), True, 'import numpy as np\n'), ((2282, 2318), 'numpy.save', 'np.save', (['"""./data/labels.npy"""', 'labels'], {}), "('./data/labels.npy', labels)\n", (2289, 2318), True, 'import numpy as np\n'), ((977, 998), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (985, 998), True, 'import numpy as np\n'), ((1204, 1217), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1210, 1217), True, 'import numpy as np\n'), ((1234, 1247), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1240, 1247), True, 'import numpy as np\n'), ((1266, 1297), 'random.uniform', 'random.uniform', (['(-radius)', 'radius'], {}), '(-radius, radius)\n', (1280, 1297), False, 'import random\n'), ((1324, 1355), 'random.uniform', 'random.uniform', (['(-radius)', 'radius'], {}), '(-radius, radius)\n', (1338, 1355), False, 'import random\n'), ((1672, 1685), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1678, 1685), True, 'import numpy as np\n'), ((1702, 1715), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1708, 1715), True, 'import numpy as np\n'), ((1734, 1765), 'random.uniform', 'random.uniform', (['(-radius)', 'radius'], {}), '(-radius, radius)\n', (1748, 1765), False, 'import random\n'), ((1792, 1823), 'random.uniform', 'random.uniform', (['(-radius)', 'radius'], {}), '(-radius, radius)\n', (1806, 1823), False, 'import random\n'), ((821, 859), 'numpy.sqrt', 'np.sqrt', (['((x - xc) ** 2 + (y - yc) ** 2)'], {}), '((x - xc) ** 2 + (y - yc) ** 2)\n', (828, 859), True, 'import numpy as np\n')] |
import os
import sys
import PIL
import math
import time
import json
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torchvision
from pathlib import Path
from PIL import Image, ImageOps, ImageFilter
from torch import nn, optim
from torchvision import transforms, utils
import torchvision.models as models
from torchvision.transforms import ToTensor
import torchvision.transforms.functional as VisionF
from torchvision.utils import make_grid
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
torch.cuda.empty_cache()
torch.manual_seed(42)
class DoctableDataset(Dataset):
def __init__(self, root, image_dir, csv_file, train=True, transform=None):
self.root = root
self.image_dir = image_dir
self.image_files = os.listdir(image_dir)
self.data = pd.read_csv(csv_file, header=None)
self.transform = transform
self.train = train
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_name = self.data.iloc[idx,0]
label = self.data.iloc[idx,1]
image_name = os.path.join(self.image_dir, img_name)
image = PIL.Image.open(image_name).convert('RGB')
if self.transform:
image = self.transform(image)
return (image, label)
def uossl_doctab_main():
# check if CUDA is available
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
num_workers = 2
batch_size = 8
valid_size = 0.2
n_epochs = 400
root = "data/ssldoctable"
train_image_dir = "data/ssldoctable/train"
test_image_dir = "data/ssldoctable/test"
train_csv_file = "data/ssldoctable/train_labels.csv"
test_csv_file = "data/ssldoctable/test_labels.csv"
checkpoint_dir = Path("chkpt/")
start_time = time.time()
checkpoint_dir.mkdir(parents=True, exist_ok=True)
stats_file = open(checkpoint_dir / 'stats.txt', 'a', buffering=1)
val_stats_file = open(checkpoint_dir / 'val_stats.txt', 'a', buffering=1)
print(' '.join(sys.argv))
print(' '.join(sys.argv), file=stats_file)
print(' '.join(sys.argv), file=val_stats_file)
# convert data to a normalized torch.FloatTensor
transform = transforms.Compose([
transforms.Resize(256, interpolation=Image.BICUBIC),
transforms.RandomResizedCrop(224, interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
# choose the training and test datasets
train_data = DoctableDataset(root, train_image_dir, train_csv_file, train=True, transform= transform)
test_data = DoctableDataset(root, test_image_dir, test_csv_file, train=False, transform= transform)
# obtain training indices that will be used for validation
num_train = len(train_data)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
# define samplers for obtaining training and validation batches
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# prepare data loaders (combine dataset and sampler)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
sampler=train_sampler, num_workers=num_workers)
valid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
sampler=valid_sampler, num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
# specify the image classes
classes = ['bargraph', 'doctable']
model = models.resnet50(pretrained=True)
#print(model)
# move tensors to GPU if CUDA is available
if train_on_gpu:
model.cuda()
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer
optimizer = torch.optim.Adam(model.parameters(),lr = 0.001)
# number of epochs to train the model
valid_loss_min = np.Inf # track change in validation loss
for epoch in range(1, n_epochs+1):
# keep track of training and validation loss
train_loss = 0.0
valid_loss = 0.0
###################
# train the model #
###################
model.train()
for data, target in train_loader:
# move tensors to GPU if CUDA is available
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the batch loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update training loss
train_loss += loss.item()*data.size(0)
######################
# validate the model #
######################
model.eval()
for data, target in valid_loader:
# move tensors to GPU if CUDA is available
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the batch loss
loss = criterion(output, target)
# update average validation loss
valid_loss += loss.item()*data.size(0)
# calculate average losses
train_loss = train_loss/len(train_loader.sampler)
valid_loss = valid_loss/len(valid_loader.sampler)
stats = dict(epoch=epoch, train_loss=train_loss, valid_loss=valid_loss,
time=int(time.time() - start_time))
print(json.dumps(stats))
print(json.dumps(stats), file=stats_file)
# print training/validation statistics
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, train_loss, valid_loss))
# save model if validation loss has decreased
if valid_loss <= valid_loss_min:
val_stats = dict(valid_loss_min=valid_loss_min,valid_loss=valid_loss)
print(json.dumps(val_stats))
print(json.dumps(val_stats), file=val_stats_file)
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
valid_loss))
torch.save(model.state_dict(), 'chkpt/model_sl_doctable.pt')
valid_loss_min = valid_loss
######################
#model.load_state_dict(torch.load('chkpt/model_sl_doctable.pt'))
if __name__ == "__main__":
uossl_doctab_main() | [
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"torch.cuda.is_available",
"os.listdir",
"pathlib.Path",
"json.dumps",
"torchvision.transforms.ToTensor",
"torchvision.transforms.RandomResizedCrop",
"torchvision.models.resnet50",
"torch.utils.data.sampler.SubsetRandomSampler",
"numpy.floor",
"t... | [((653, 677), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (675, 677), False, 'import torch\n'), ((678, 699), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (695, 699), False, 'import torch\n'), ((1630, 1655), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1653, 1655), False, 'import torch\n'), ((2145, 2159), 'pathlib.Path', 'Path', (['"""chkpt/"""'], {}), "('chkpt/')\n", (2149, 2159), False, 'from pathlib import Path\n'), ((2177, 2188), 'time.time', 'time.time', ([], {}), '()\n', (2186, 2188), False, 'import time\n'), ((3351, 3377), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (3368, 3377), True, 'import numpy as np\n'), ((3577, 3607), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['train_idx'], {}), '(train_idx)\n', (3596, 3607), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((3628, 3658), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['valid_idx'], {}), '(valid_idx)\n', (3647, 3658), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((3736, 3851), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_data'], {'batch_size': 'batch_size', 'sampler': 'train_sampler', 'num_workers': 'num_workers'}), '(train_data, batch_size=batch_size, sampler=\n train_sampler, num_workers=num_workers)\n', (3763, 3851), False, 'import torch\n'), ((3874, 3989), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_data'], {'batch_size': 'batch_size', 'sampler': 'valid_sampler', 'num_workers': 'num_workers'}), '(train_data, batch_size=batch_size, sampler=\n valid_sampler, num_workers=num_workers)\n', (3901, 3989), False, 'import torch\n'), ((4012, 4103), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_data'], {'batch_size': 'batch_size', 'num_workers': 'num_workers'}), '(test_data, batch_size=batch_size, num_workers=\n num_workers)\n', (4039, 4103), False, 'import torch\n'), ((4193, 4225), 'torchvision.models.resnet50', 'models.resnet50', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (4208, 4225), True, 'import torchvision.models as models\n'), ((4415, 4436), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4434, 4436), False, 'from torch import nn, optim\n'), ((899, 920), 'os.listdir', 'os.listdir', (['image_dir'], {}), '(image_dir)\n', (909, 920), False, 'import os\n'), ((941, 975), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {'header': 'None'}), '(csv_file, header=None)\n', (952, 975), True, 'import pandas as pd\n'), ((1171, 1191), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (1186, 1191), False, 'import torch\n'), ((1337, 1375), 'os.path.join', 'os.path.join', (['self.image_dir', 'img_name'], {}), '(self.image_dir, img_name)\n', (1349, 1375), False, 'import os\n'), ((3394, 3426), 'numpy.floor', 'np.floor', (['(valid_size * num_train)'], {}), '(valid_size * num_train)\n', (3402, 3426), True, 'import numpy as np\n'), ((2624, 2675), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {'interpolation': 'Image.BICUBIC'}), '(256, interpolation=Image.BICUBIC)\n', (2641, 2675), True, 'import torchvision.transforms as transforms\n'), ((2685, 2747), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {'interpolation': 'Image.BICUBIC'}), '(224, interpolation=Image.BICUBIC)\n', (2713, 2747), True, 'import torchvision.transforms as transforms\n'), ((2757, 2795), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (2788, 2795), True, 'import torchvision.transforms as transforms\n'), ((2805, 2826), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2824, 2826), True, 'import torchvision.transforms as transforms\n'), ((2836, 2911), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (2856, 2911), True, 'import torchvision.transforms as transforms\n'), ((6586, 6603), 'json.dumps', 'json.dumps', (['stats'], {}), '(stats)\n', (6596, 6603), False, 'import json\n'), ((6619, 6636), 'json.dumps', 'json.dumps', (['stats'], {}), '(stats)\n', (6629, 6636), False, 'import json\n'), ((1392, 1418), 'PIL.Image.open', 'PIL.Image.open', (['image_name'], {}), '(image_name)\n', (1406, 1418), False, 'import PIL\n'), ((7044, 7065), 'json.dumps', 'json.dumps', (['val_stats'], {}), '(val_stats)\n', (7054, 7065), False, 'import json\n'), ((7085, 7106), 'json.dumps', 'json.dumps', (['val_stats'], {}), '(val_stats)\n', (7095, 7106), False, 'import json\n'), ((6545, 6556), 'time.time', 'time.time', ([], {}), '()\n', (6554, 6556), False, 'import time\n')] |
'''
Random Breakout AI player
@author: <NAME> <<EMAIL>>
'''
import gym
import numpy
import random
import pandas
if __name__ == '__main__':
env = gym.make('Breakout-v0')
env.monitor.start('/tmp/breakout-experiment-1', force=True)
# video_callable=lambda count: count % 10 == 0)
goal_average_steps = 195
max_number_of_steps = 200
last_time_steps = numpy.ndarray(0)
n_bins = 8
n_bins_angle = 10
number_of_features = env.observation_space.shape[0]
last_time_steps = numpy.ndarray(0)
action_attack = [False]*43
action_attack[0] = True
action_right = [False]*43
action_right[10] = True
action_left = [False]*43
action_left[11] = True
actions = [action_attack, action_left, action_right]
done = False
observation = env.reset()
for i_episode in xrange(30):
if done:
observation = env.reset()
for t in xrange(max_number_of_steps):
env.render()
# Execute the action and get feedback
observation, reward, done, info = env.step(env.action_space.sample())
if done:
break
l = last_time_steps.tolist()
l.sort()
print("Overall score: {:0.2f}".format(last_time_steps.mean()))
print("Best 100 score: {:0.2f}".format(reduce(lambda x, y: x + y, l[-100:]) / len(l[-100:])))
env.monitor.close()
# gym.upload('/tmp/cartpole-experiment-1', algorithm_id='vmayoral simple Q-learning', api_key='your-key')
| [
"numpy.ndarray",
"gym.make"
] | [((155, 178), 'gym.make', 'gym.make', (['"""Breakout-v0"""'], {}), "('Breakout-v0')\n", (163, 178), False, 'import gym\n'), ((381, 397), 'numpy.ndarray', 'numpy.ndarray', (['(0)'], {}), '(0)\n', (394, 397), False, 'import numpy\n'), ((514, 530), 'numpy.ndarray', 'numpy.ndarray', (['(0)'], {}), '(0)\n', (527, 530), False, 'import numpy\n')] |
import pytest
from ipyplotly.basevalidators import StringValidator
import numpy as np
# Fixtures
# --------
@pytest.fixture()
def validator():
return StringValidator('prop', 'parent')
@pytest.fixture()
def validator_values():
return StringValidator('prop', 'parent', values=['foo', 'BAR', ''])
@pytest.fixture()
def validator_no_blanks():
return StringValidator('prop', 'parent', no_blank=True)
@pytest.fixture
def validator_aok():
return StringValidator('prop', 'parent', array_ok=True)
@pytest.fixture
def validator_aok_values():
return StringValidator('prop', 'parent', values=['foo', 'BAR', '', 'baz'], array_ok=True)
@pytest.fixture()
def validator_no_blanks_aok():
return StringValidator('prop', 'parent', no_blank=True, array_ok=True)
# Array not ok
# ------------
# ### Acceptance ###
@pytest.mark.parametrize('val',
['bar', 'HELLO!!!', 'world!@#$%^&*()', ''])
def test_acceptance(val, validator: StringValidator):
assert validator.validate_coerce(val) == val
# ### Rejection by value ###
@pytest.mark.parametrize('val',
[(), [], [1, 2, 3], set(), np.nan, np.pi])
def test_rejection(val, validator: StringValidator):
with pytest.raises(ValueError) as validation_failure:
validator.validate_coerce(val)
assert 'Invalid value' in str(validation_failure.value)
# Valid values
# ------------
@pytest.mark.parametrize('val',
['foo', 'BAR', ''])
def test_acceptance_values(val, validator_values: StringValidator):
assert validator_values.validate_coerce(val) == val
@pytest.mark.parametrize('val',
['FOO', 'bar', 'other', '1234'])
def test_rejection_values(val, validator_values: StringValidator):
with pytest.raises(ValueError) as validation_failure:
validator_values.validate_coerce(val)
assert 'Invalid value'.format(val=val) in str(validation_failure.value)
assert "['foo', 'BAR', '']" in str(validation_failure.value)
# ### No blanks ###
@pytest.mark.parametrize('val',
['bar', 'HELLO!!!', 'world!@#$%^&*()'])
def test_acceptance_no_blanks(val, validator_no_blanks: StringValidator):
assert validator_no_blanks.validate_coerce(val) == val
@pytest.mark.parametrize('val',
[''])
def test_rejection_no_blanks(val, validator_no_blanks: StringValidator):
with pytest.raises(ValueError) as validation_failure:
validator_no_blanks.validate_coerce(val)
assert 'A non-empty string' in str(validation_failure.value)
# Array ok
# --------
# ### Acceptance ###
@pytest.mark.parametrize('val',
['foo', 'BAR', '', 'baz'])
def test_acceptance_aok_scalars(val, validator_aok: StringValidator):
assert validator_aok.validate_coerce(val) == val
@pytest.mark.parametrize('val',
['foo', ['foo'], np.array(['BAR', ''], dtype='object'), ['baz', 'baz', 'baz']])
def test_acceptance_aok_list(val, validator_aok: StringValidator):
coerce_val = validator_aok.validate_coerce(val)
if isinstance(val, (list, np.ndarray)):
assert np.array_equal(coerce_val, np.array(val, dtype=coerce_val.dtype))
else:
assert coerce_val == val
# ### Rejection by type ###
@pytest.mark.parametrize('val',
[['foo', ()], ['foo', 3, 4], [3, 2, 1]])
def test_rejection_aok(val, validator_aok: StringValidator):
with pytest.raises(ValueError) as validation_failure:
validator_aok.validate_coerce(val)
assert 'Invalid element(s)' in str(validation_failure.value)
# ### Rejection by value ###
@pytest.mark.parametrize('val',
[['foo', 'bar'], ['3', '4'], ['BAR', 'BAR', 'hello!']])
def test_rejection_aok_values(val, validator_aok_values: StringValidator):
with pytest.raises(ValueError) as validation_failure:
validator_aok_values.validate_coerce(val)
assert 'Invalid element(s)' in str(validation_failure.value)
# ### No blanks ###
@pytest.mark.parametrize('val',
['123', ['bar', 'HELLO!!!'], ['world!@#$%^&*()']])
def test_acceptance_no_blanks_aok(val, validator_no_blanks_aok: StringValidator):
coerce_val = validator_no_blanks_aok.validate_coerce(val)
if isinstance(val, (list, np.ndarray)):
assert np.array_equal(coerce_val, np.array(val, dtype=coerce_val.dtype))
else:
assert coerce_val == val
@pytest.mark.parametrize('val',
['', ['foo', 'bar', ''], ['']])
def test_rejection_no_blanks_aok(val, validator_no_blanks_aok: StringValidator):
with pytest.raises(ValueError) as validation_failure:
validator_no_blanks_aok.validate_coerce(val)
assert 'A non-empty string' in str(validation_failure.value)
| [
"ipyplotly.basevalidators.StringValidator",
"pytest.mark.parametrize",
"numpy.array",
"pytest.raises",
"pytest.fixture"
] | [((111, 127), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (125, 127), False, 'import pytest\n'), ((193, 209), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (207, 209), False, 'import pytest\n'), ((309, 325), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (323, 325), False, 'import pytest\n'), ((655, 671), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (669, 671), False, 'import pytest\n'), ((832, 906), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', "['bar', 'HELLO!!!', 'world!@#$%^&*()', '']"], {}), "('val', ['bar', 'HELLO!!!', 'world!@#$%^&*()', ''])\n", (855, 906), False, 'import pytest\n'), ((1410, 1460), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', "['foo', 'BAR', '']"], {}), "('val', ['foo', 'BAR', ''])\n", (1433, 1460), False, 'import pytest\n'), ((1613, 1676), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', "['FOO', 'bar', 'other', '1234']"], {}), "('val', ['FOO', 'bar', 'other', '1234'])\n", (1636, 1676), False, 'import pytest\n'), ((2038, 2108), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', "['bar', 'HELLO!!!', 'world!@#$%^&*()']"], {}), "('val', ['bar', 'HELLO!!!', 'world!@#$%^&*()'])\n", (2061, 2108), False, 'import pytest\n'), ((2270, 2306), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', "['']"], {}), "('val', [''])\n", (2293, 2306), False, 'import pytest\n'), ((2624, 2681), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', "['foo', 'BAR', '', 'baz']"], {}), "('val', ['foo', 'BAR', '', 'baz'])\n", (2647, 2681), False, 'import pytest\n'), ((3287, 3358), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', "[['foo', ()], ['foo', 3, 4], [3, 2, 1]]"], {}), "('val', [['foo', ()], ['foo', 3, 4], [3, 2, 1]])\n", (3310, 3358), False, 'import pytest\n'), ((3644, 3734), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', "[['foo', 'bar'], ['3', '4'], ['BAR', 'BAR', 'hello!']]"], {}), "('val', [['foo', 'bar'], ['3', '4'], ['BAR', 'BAR',\n 'hello!']])\n", (3667, 3734), False, 'import pytest\n'), ((4028, 4114), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', "['123', ['bar', 'HELLO!!!'], ['world!@#$%^&*()']]"], {}), "('val', ['123', ['bar', 'HELLO!!!'], [\n 'world!@#$%^&*()']])\n", (4051, 4114), False, 'import pytest\n'), ((4450, 4512), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', "['', ['foo', 'bar', ''], ['']]"], {}), "('val', ['', ['foo', 'bar', ''], ['']])\n", (4473, 4512), False, 'import pytest\n'), ((156, 189), 'ipyplotly.basevalidators.StringValidator', 'StringValidator', (['"""prop"""', '"""parent"""'], {}), "('prop', 'parent')\n", (171, 189), False, 'from ipyplotly.basevalidators import StringValidator\n'), ((245, 305), 'ipyplotly.basevalidators.StringValidator', 'StringValidator', (['"""prop"""', '"""parent"""'], {'values': "['foo', 'BAR', '']"}), "('prop', 'parent', values=['foo', 'BAR', ''])\n", (260, 305), False, 'from ipyplotly.basevalidators import StringValidator\n'), ((364, 412), 'ipyplotly.basevalidators.StringValidator', 'StringValidator', (['"""prop"""', '"""parent"""'], {'no_blank': '(True)'}), "('prop', 'parent', no_blank=True)\n", (379, 412), False, 'from ipyplotly.basevalidators import StringValidator\n'), ((463, 511), 'ipyplotly.basevalidators.StringValidator', 'StringValidator', (['"""prop"""', '"""parent"""'], {'array_ok': '(True)'}), "('prop', 'parent', array_ok=True)\n", (478, 511), False, 'from ipyplotly.basevalidators import StringValidator\n'), ((569, 655), 'ipyplotly.basevalidators.StringValidator', 'StringValidator', (['"""prop"""', '"""parent"""'], {'values': "['foo', 'BAR', '', 'baz']", 'array_ok': '(True)'}), "('prop', 'parent', values=['foo', 'BAR', '', 'baz'],\n array_ok=True)\n", (584, 655), False, 'from ipyplotly.basevalidators import StringValidator\n'), ((714, 777), 'ipyplotly.basevalidators.StringValidator', 'StringValidator', (['"""prop"""', '"""parent"""'], {'no_blank': '(True)', 'array_ok': '(True)'}), "('prop', 'parent', no_blank=True, array_ok=True)\n", (729, 777), False, 'from ipyplotly.basevalidators import StringValidator\n'), ((1228, 1253), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1241, 1253), False, 'import pytest\n'), ((1778, 1803), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1791, 1803), False, 'import pytest\n'), ((2414, 2439), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2427, 2439), False, 'import pytest\n'), ((2906, 2943), 'numpy.array', 'np.array', (["['BAR', '']"], {'dtype': '"""object"""'}), "(['BAR', ''], dtype='object')\n", (2914, 2943), True, 'import numpy as np\n'), ((3454, 3479), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3467, 3479), False, 'import pytest\n'), ((3840, 3865), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3853, 3865), False, 'import pytest\n'), ((4628, 4653), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4641, 4653), False, 'import pytest\n'), ((3174, 3211), 'numpy.array', 'np.array', (['val'], {'dtype': 'coerce_val.dtype'}), '(val, dtype=coerce_val.dtype)\n', (3182, 3211), True, 'import numpy as np\n'), ((4365, 4402), 'numpy.array', 'np.array', (['val'], {'dtype': 'coerce_val.dtype'}), '(val, dtype=coerce_val.dtype)\n', (4373, 4402), True, 'import numpy as np\n')] |
import sys
import numpy as np
import skvideo.io
import concurrent.futures
import time
def _detect_black_bars_from_video(frames, blackbar_threshold=16, max_perc_to_trim=.2):
"""
:param frames: [num_frames, height, width, 3]
:param blackbar_threshold: Pixels must be this intense for us to not trim
:param max_perc_to_prim: Will trim 20% by default of the image at most in each dimension
:return:
"""
# Detect black bars####################
has_content = frames.max(axis=(0, -1)) >= blackbar_threshold
h, w = has_content.shape
y_frames = np.where(has_content.any(1))[0]
if y_frames.size == 0:
print("Oh no, there are no valid yframes")
y_frames = [h // 2]
y1 = min(y_frames[0], int(h * max_perc_to_trim))
y2 = max(y_frames[-1] + 1, int(h * (1 - max_perc_to_trim)))
x_frames = np.where(has_content.any(0))[0]
if x_frames.size == 0:
print("Oh no, there are no valid xframes")
x_frames = [w // 2]
x1 = min(x_frames[0], int(w * max_perc_to_trim))
x2 = max(x_frames[-1] + 1, int(w * (1 - max_perc_to_trim)))
return y1, y2, x1, x2
def extract_all_frames_from_video(video_file, blackbar_threshold=32, max_perc_to_trim=0.2,
every_nth_frame=1, verbosity=0):
"""
Same as exact_frames_from_video but no times meaning we grab every single frame
:param video_file:
:param r:
:param blackbar_threshold:
:param max_perc_to_trim:
:return:
"""
reader = skvideo.io.FFmpegReader(video_file, outputdict={'-r': '1', '-q:v': '2', '-pix_fmt': 'rgb24'},
verbosity=verbosity)
# frames = [x for x in iter(reader.nextFrame())]
frames = []
for i, frame in enumerate(reader.nextFrame()):
if (i % every_nth_frame) == 0:
frames.append(frame)
frames = np.stack(frames)
y1, y2, x1, x2 = _detect_black_bars_from_video(frames, blackbar_threshold=blackbar_threshold,
max_perc_to_trim=max_perc_to_trim)
frames = frames[:, y1:y2, x1:x2]
return frames
def extract_single_frame_from_video(video_file, t, verbosity=0):
"""
Reads the video, seeks to the given second option
:param video_file: input video file
:param t: where 2 seek to
:param use_rgb: True if use RGB, else BGR
:return: the frame at that timestep.
"""
timecode = '{:.3f}'.format(t)
input_dict ={ '-ss': timecode, '-threads': '1',}
reader = skvideo.io.FFmpegReader(video_file,
inputdict=input_dict,
outputdict={'-r': '1', '-q:v': '2', '-pix_fmt': 'rgb24', '-frames:v': '1'},
verbosity=verbosity,
)
try:
frame = next(iter(reader.nextFrame()))
except StopIteration:
frame = None
return frame
def extract_frames_from_video(video_file, times, info, use_multithreading=False, use_rgb=True,
blackbar_threshold=32, max_perc_to_trim=.20, verbose=False):
"""
Extracts multiple things from the video and even handles black bars
:param video_file: what we are loading
:param times: timestamps to use
:param use_multithreading: Whether to use multithreading
:param use_rgb whether to use RGB (default) or BGR
:param blackbar_threshold: Pixels must be this intense for us to not trim
:param max_perc_to_prim: Will trim 20% by default of the image at most in each dimension
:return:
"""
def _extract(i):
return i, extract_single_frame_from_video(video_file, times[i], verbosity=10 if verbose else 0)
time1 = time.time()
if not use_multithreading:
frames = [_extract(i)[1] for i in range(len(times))]
else:
frames = [None for t in times]
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
submitted_threads = (executor.submit(_extract, i) for i in range(len(times)))
for future in concurrent.futures.as_completed(submitted_threads):
try:
i, img = future.result()
frames[i] = img
except Exception as exc:
print("Oh no {}".format(str(exc)), flush=True)
if verbose:
print("Extracting frames from video, multithreading={} took {:.3f}".format(use_multithreading,
time.time() - time1), flush=True)
if any([x is None for x in frames]):
print(f"Fail on {video_file}", flush=True)
return None
frames = np.stack(frames)
y1, y2, x1, x2 = _detect_black_bars_from_video(frames, blackbar_threshold=blackbar_threshold,
max_perc_to_trim=max_perc_to_trim)
frames = frames[:, y1:y2, x1:x2]
#############
return frames | [
"numpy.stack",
"time.time"
] | [((1873, 1889), 'numpy.stack', 'np.stack', (['frames'], {}), '(frames)\n', (1881, 1889), True, 'import numpy as np\n'), ((3742, 3753), 'time.time', 'time.time', ([], {}), '()\n', (3751, 3753), False, 'import time\n'), ((4711, 4727), 'numpy.stack', 'np.stack', (['frames'], {}), '(frames)\n', (4719, 4727), True, 'import numpy as np\n'), ((4551, 4562), 'time.time', 'time.time', ([], {}), '()\n', (4560, 4562), False, 'import time\n')] |
import numpy as np
import torch
def evaluate_performance(environment, policy, GAMMA, agent):
pi = policy.table.detach().numpy().T
pi = np.exp(pi) / np.sum(np.exp(pi), axis=1)[:, None]
nS, nA = pi.shape
mu0 = np.full(nS, 1/nS)
pi2 = np.tile(pi, (1, nS)) * np.kron(np.eye(nS), np.ones((1, nA)))
r = []
for k in sorted(environment.transitions.keys(), key=lambda x: x[0] * nS + x[1]):
r.append(environment.transitions[k][0]["reward"])
r = np.array(r)
P = []
for k in sorted(environment.transitions.keys(), key=lambda x: x[0] * nS + x[1]):
P_row = []
for i in range(nS):
P_row.append(environment.transitions[k][i]["probability"])
P.append(P_row)
P = np.array(P)
mu = (1-GAMMA) * np.linalg.inv(np.eye(nS) - GAMMA * pi2 @ P).T @ mu0
J = 1 / (1 - GAMMA) * mu @ pi2 @ r
print(f"Performance J: {J}")
value = agent.algorithm.critic(torch.tensor(np.arange(nS))).detach().numpy()
values = [item for item in value for i in range(nA)]
# Essendo calcolati in maniera approssimata i delta vanno usati per definire la nuova policy
# che va poi normalizzata
delta = values - GAMMA * P @ value
pi2_star = pi2 * delta[None, :]
pi2_star = pi2_star / np.sum(pi2_star, axis=1)[:, None]
nu_star = (1 - GAMMA) * np.linalg.inv(np.eye(nS) - GAMMA * pi2_star @ P).T @ mu0
J_star = 1 / (1 - GAMMA) * nu_star @ pi2_star @ r
print(f"Performance J_star: {J_star}") | [
"numpy.tile",
"numpy.eye",
"numpy.ones",
"numpy.exp",
"numpy.array",
"numpy.sum",
"numpy.full",
"numpy.arange"
] | [((228, 247), 'numpy.full', 'np.full', (['nS', '(1 / nS)'], {}), '(nS, 1 / nS)\n', (235, 247), True, 'import numpy as np\n'), ((487, 498), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (495, 498), True, 'import numpy as np\n'), ((752, 763), 'numpy.array', 'np.array', (['P'], {}), '(P)\n', (760, 763), True, 'import numpy as np\n'), ((145, 155), 'numpy.exp', 'np.exp', (['pi'], {}), '(pi)\n', (151, 155), True, 'import numpy as np\n'), ((257, 277), 'numpy.tile', 'np.tile', (['pi', '(1, nS)'], {}), '(pi, (1, nS))\n', (264, 277), True, 'import numpy as np\n'), ((288, 298), 'numpy.eye', 'np.eye', (['nS'], {}), '(nS)\n', (294, 298), True, 'import numpy as np\n'), ((300, 316), 'numpy.ones', 'np.ones', (['(1, nA)'], {}), '((1, nA))\n', (307, 316), True, 'import numpy as np\n'), ((1289, 1313), 'numpy.sum', 'np.sum', (['pi2_star'], {'axis': '(1)'}), '(pi2_star, axis=1)\n', (1295, 1313), True, 'import numpy as np\n'), ((165, 175), 'numpy.exp', 'np.exp', (['pi'], {}), '(pi)\n', (171, 175), True, 'import numpy as np\n'), ((808, 818), 'numpy.eye', 'np.eye', (['nS'], {}), '(nS)\n', (814, 818), True, 'import numpy as np\n'), ((1366, 1376), 'numpy.eye', 'np.eye', (['nS'], {}), '(nS)\n', (1372, 1376), True, 'import numpy as np\n'), ((969, 982), 'numpy.arange', 'np.arange', (['nS'], {}), '(nS)\n', (978, 982), True, 'import numpy as np\n')] |
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def plot_loss(history):
plt.plot(history[0], label='Training')
plt.plot(history[1], label='Validation')
plt.title('Training Curves')
plt.legend()
plt.grid()
plt.ylabel('loss')
plt.xlabel('epoch')
plt.tight_layout()
plt.show()
def main():
if len(sys.argv) == 2:
history_file = sys.argv[1]
else:
print("Usage: python plot_loss.py <history csv file> ")
return 1
if not os.path.isfile(history_file):
print(f"{history_file} not found. ")
return 1
history = np.genfromtxt(history_file, delimiter=',')
plot_loss(history)
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.path.isfile",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"numpy.genfromtxt",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((103, 141), 'matplotlib.pyplot.plot', 'plt.plot', (['history[0]'], {'label': '"""Training"""'}), "(history[0], label='Training')\n", (111, 141), True, 'import matplotlib.pyplot as plt\n'), ((146, 186), 'matplotlib.pyplot.plot', 'plt.plot', (['history[1]'], {'label': '"""Validation"""'}), "(history[1], label='Validation')\n", (154, 186), True, 'import matplotlib.pyplot as plt\n'), ((192, 220), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Curves"""'], {}), "('Training Curves')\n", (201, 220), True, 'import matplotlib.pyplot as plt\n'), ((225, 237), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (235, 237), True, 'import matplotlib.pyplot as plt\n'), ((242, 252), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (250, 252), True, 'import matplotlib.pyplot as plt\n'), ((257, 275), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (267, 275), True, 'import matplotlib.pyplot as plt\n'), ((280, 299), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (290, 299), True, 'import matplotlib.pyplot as plt\n'), ((304, 322), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (320, 322), True, 'import matplotlib.pyplot as plt\n'), ((328, 338), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (336, 338), True, 'import matplotlib.pyplot as plt\n'), ((626, 668), 'numpy.genfromtxt', 'np.genfromtxt', (['history_file'], {'delimiter': '""","""'}), "(history_file, delimiter=',')\n", (639, 668), True, 'import numpy as np\n'), ((519, 547), 'os.path.isfile', 'os.path.isfile', (['history_file'], {}), '(history_file)\n', (533, 547), False, 'import os\n')] |
import os
import time
from collections import deque
import numpy as np
import tensorflow as tf
from ppo2 import explained_variance, mean_or_nan, swap_and_flatten
from utils import loghandler
from utils.distributions import CategoricalPdType
"""
Implementation of Advantage Actor Critic (A2C):
Advantage Actor Critic, is a synchronous version of the A3C policy gradient method. As an alternative to the
asynchronous implementation of A3C, A2C is a synchronous, deterministic implementation that waits for each actor
to finish its segment of experience before updating, averaging over all of the actors. This more effectively uses
GPUs due to larger batch sizes.
----------------------------------------------
Created:
14.01.2021, <NAME> <<EMAIL>>
Paper:
Asynchronous Methods for Deep Reinforcement Learning (https://arxiv.org/abs/1602.01783)
Code-Sources:
https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail
https://github.com/hill-a/stable-baselines
https://stable-baselines3.readthedocs.io/en/master/modules/a2c.html
https://github.com/openai/baselines
"""
class InverseTimeDecay(tf.keras.optimizers.schedules.LearningRateSchedule):
"""
Custom Learning rate scheduler: Inverse linear time decay
initial_learning_rate * (1 - step / number_of_updates)
"""
def __init__(self, init_lr, number_of_updates):
super().__init__()
self.init_lr = init_lr
self.number_of_updates = number_of_updates
def __call__(self, step):
"""
Returns the new learning rate: initial_learning_rate * (1 - step / number_of_updates)
:param step: The current step in the environment
:return: the new learning rate
"""
with tf.name_scope('inverse_time_decay'):
init_lr = tf.convert_to_tensor(self.init_lr)
number_of_updates_t = tf.convert_to_tensor(self.number_of_updates, dtype=init_lr.dtype)
step_t = tf.cast(step, init_lr.dtype)
return init_lr * (1.0 - step_t / number_of_updates_t)
def get_config(self):
"""
Method which stores the tf config (must be overwritten)
"""
return {
"init_lr": self.init_lr,
"number_of_updates": self.number_of_updates,
"name": 'inverse_time_decay'
}
class PolicyAndValueNetwork(tf.Module):
def __init__(self, ac_space, ob_space):
super().__init__()
input_layer = tf.keras.Input(shape=ob_space.shape)
layers = input_layer
for i in range(2):
layers = tf.keras.layers.Dense(units=64, activation=tf.keras.activations.relu)(layers)
self.policy_network = tf.keras.Model(inputs=[input_layer], outputs=[layers])
self.policy_distribution = CategoricalPdType(self.policy_network.output_shape, ac_space.n, init_scale=0.01)
print(self.policy_network.summary())
with tf.name_scope('value_function'):
layer = tf.keras.layers.Dense(units=1, bias_initializer=tf.keras.initializers.Constant(0.01))
layer.build(self.policy_network.output_shape)
self.value_fc = layer
@tf.function
def step(self, observation):
"""
Calcualte action for a given observation
"""
latent = self.policy_network(observation)
pd, _ = self.policy_distribution.pdfromlatent(latent)
action = pd.sample()
vf = tf.squeeze(self.value_fc(latent), axis=1)
return action, vf
@tf.function
def value(self, observation):
"""
Compute value given the observation
"""
latent = self.policy_network(observation)
result = tf.squeeze(self.value_fc(latent), axis=1)
return result
class ActorCriticModel(tf.keras.Model):
def __init__(self, action_space, observation_space, number_of_updates, lr):
super().__init__()
self.train_model = PolicyAndValueNetwork(action_space, observation_space)
lr_scheduler = InverseTimeDecay(init_lr=lr, number_of_updates=number_of_updates)
self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=lr_scheduler, rho=0.99, epsilon=1e-5)
self.entropy_coefficient = 0.01
self.value_function_coefficient = 0.5
self.max_grad_norm = 0.5
self.step = self.train_model.step
self.value = self.train_model.value
@tf.function
def train(self, obs, rewards, actions, values):
"""
Train the network: First, calculate the policy distribution based on the current observation (with the policy
network). From this distribution, the policy loss and the policy entropy can be calculated. Then estimate the
value with the same network.
"""
advantage = rewards - values
with tf.GradientTape() as tape:
policy_latent = self.train_model.policy_network(obs)
policy_distribution, _ = self.train_model.policy_distribution.pdfromlatent(policy_latent)
neg_log_probability = policy_distribution.neglogp(actions)
policy_entropy = tf.reduce_mean(policy_distribution.entropy())
# compute the estimated value given the observation (the critic)
value_predicted = self.train_model.value(obs)
# compute the overall loss
value_loss = tf.reduce_mean(tf.square(value_predicted - rewards))
policy_loss = tf.reduce_mean(advantage * neg_log_probability)
loss = policy_loss - policy_entropy * self.entropy_coefficient + value_loss * self.value_function_coefficient
# Update the gradients
variables = tape.watched_variables()
gradients = tape.gradient(loss, variables)
gradients, _ = tf.clip_by_global_norm(gradients, self.max_grad_norm)
gradients_variables = list(zip(gradients, variables))
self.optimizer.apply_gradients(gradients_variables)
return policy_loss, value_loss, policy_entropy
def discount_with_dones(rewards, dones, gamma):
"""
Apply the discount value to the reward, where the environment is not done
"""
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma * r * (1. - done)
discounted.append(r)
return discounted[::-1]
class MinibatchExecuter:
def __init__(self, environment, model, number_of_steps, gamma):
self.gamma = gamma
self.environment = environment
self.model = model
self.number_of_steps = number_of_steps
self.number_of_envs = environment.num_envs if hasattr(environment, 'num_envs') else 1
self.observations = np.zeros((self.number_of_envs,) + environment.observation_space.shape,
dtype=environment.observation_space.dtype.name)
self.observations[:] = environment.reset()
self.dones = [False for _ in range(self.number_of_envs)]
def run(self):
observations_minibatch, rewards_minibatch, actions_minibatch, values_minibatch, dones_minibatch = [], [], [], [], []
episode_infos = []
for _ in range(self.number_of_steps):
# Get action and value for a given observation
observations_t = tf.constant(self.observations)
actions, values, = self.model.step(observations_t)
actions = actions._numpy()
observations_minibatch.append(self.observations.copy())
actions_minibatch.append(actions)
values_minibatch.append(values._numpy())
dones_minibatch.append(self.dones)
# Take actions in env and look the results
self.observations[:], rewards, self.dones, logs = self.environment.step(actions)
for log in logs:
episode_log = log.get('episode')
if episode_log:
episode_infos.append(episode_log)
rewards_minibatch.append(rewards)
dones_minibatch.append(self.dones)
# Batch of steps to batch of rollouts
observations_minibatch = swap_and_flatten(np.asarray(observations_minibatch, dtype=self.observations.dtype))
rewards_minibatch = np.asarray(rewards_minibatch, dtype=np.float32).swapaxes(1, 0)
actions_minibatch = swap_and_flatten(np.asarray(actions_minibatch, dtype=actions.dtype))
values_minibatch = np.asarray(values_minibatch, dtype=np.float32).swapaxes(1, 0)
dones_minibatch = np.asarray(dones_minibatch, dtype=np.bool).swapaxes(1, 0)
dones_minibatch = dones_minibatch[:, 1:]
if self.gamma > 0.0:
# Discount value function
last_values = self.model.value(tf.constant(self.observations))._numpy().tolist()
for i, (rewards, dones, value) in enumerate(zip(rewards_minibatch, dones_minibatch, last_values)):
rewards = rewards.tolist()
dones = dones.tolist()
if dones[-1] == 0:
rewards = discount_with_dones(rewards + [value], dones + [0], self.gamma)[:-1]
else:
rewards = discount_with_dones(rewards, dones, self.gamma)
rewards_minibatch[i] = rewards
rewards_minibatch = rewards_minibatch.flatten()
values_minibatch = values_minibatch.flatten()
return observations_minibatch, rewards_minibatch, actions_minibatch, values_minibatch, episode_infos
def load_checkpoint(conf, model):
if conf['load_path'] is not None and conf['load_path'] is not 'None':
load_path = os.path.expanduser(conf['load_path'])
ckpt = tf.train.Checkpoint(model=model)
manager = tf.train.CheckpointManager(ckpt, load_path, max_to_keep=None)
ckpt.restore(manager.latest_checkpoint)
def get_model(env, conf, number_of_updates):
ob_space = env.observation_space
ac_space = env.action_space
return ActorCriticModel(action_space=ac_space, observation_space=ob_space, number_of_updates=number_of_updates,
lr=conf['lr'])
def train(env, conf):
total_timesteps = int(10e7)
logger = loghandler.LogHandler(conf)
# Calculate the batch_size
number_of_envs = env.num_envs
assert env.num_envs == conf['num_env']
number_of_batches = number_of_envs * conf['nsteps']
number_of_updates = total_timesteps // number_of_batches
model = get_model(env, conf, number_of_updates)
load_checkpoint(conf, model)
# Instantiate the runner object
minibatch_executer = MinibatchExecuter(env, model, number_of_steps=conf['nsteps'], gamma=conf['gamma'])
episode_infos = deque(maxlen=100)
best_reward = -100000000000
time_start = time.time()
for update in range(1, number_of_updates + 1):
logs = {}
# Get mini batch of experiences
observations, rewards, actions, values, epinfos = minibatch_executer.run()
episode_infos.extend(epinfos)
observations = tf.constant(observations)
rewards = tf.constant(rewards)
actions = tf.constant(actions)
values = tf.constant(values)
policy_loss, value_loss, policy_entropy = model.train(observations, rewards, actions, values)
running_time_sec = time.time() - time_start
fps = int((update * number_of_batches) / running_time_sec)
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, rewards)
mean_reward = mean_or_nan([epinfo['r'] for epinfo in episode_infos])
logs["number_of_updates"] = update
logs["total_timesteps"] = update * number_of_batches
logs["fps"] = fps
logs["policy_entropy"] = float(policy_entropy)
logs["value_loss"] = float(value_loss)
logs["explained_variance"] = float(ev)
logs["mean_episode_reward"] = mean_reward
logs["mean_episode_length"] = mean_or_nan([epinfo['l'] for epinfo in episode_infos])
logger.log(logs)
if update > 15000 and mean_reward > best_reward:
best_reward = mean_reward
save_path = os.path.expanduser('best_models/a2c')
ckpt = tf.train.Checkpoint(model=model)
manager = tf.train.CheckpointManager(ckpt, save_path, max_to_keep=None)
manager.save()
def val(env, conf):
observation = env.reset()
if conf['load_path'] is None or conf['load_path'] is 'None':
raise AttributeError("Load path must be defined to validate model!!!")
total_timesteps = int(10e6)
# Calculate the batch_size
number_of_batches = conf['num_env'] * conf['nsteps']
number_of_updates = total_timesteps // number_of_batches
model = get_model(env, conf, number_of_updates)
load_checkpoint(conf, model)
episode_reward = np.zeros(env.num_envs)
while True:
actions, _ = model.step(observation)
observation, reward, done, _ = env.step(actions.numpy())
episode_reward += reward
env.render()
done_any = done.any() if isinstance(done, np.ndarray) else done
if done_any:
for i in np.nonzero(done)[0]:
print('episode reward = {}'.format(episode_reward[i]))
episode_reward[i] = 0
| [
"tensorflow.train.Checkpoint",
"tensorflow.GradientTape",
"tensorflow.keras.layers.Dense",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tensorflow.clip_by_global_norm",
"ppo2.mean_or_nan",
"collections.deque",
"numpy.asarray",
"utils.distributions.CategoricalPdType",
"tensorflow.square",
"ten... | [((10111, 10138), 'utils.loghandler.LogHandler', 'loghandler.LogHandler', (['conf'], {}), '(conf)\n', (10132, 10138), False, 'from utils import loghandler\n'), ((10616, 10633), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (10621, 10633), False, 'from collections import deque\n'), ((10684, 10695), 'time.time', 'time.time', ([], {}), '()\n', (10693, 10695), False, 'import time\n'), ((12846, 12868), 'numpy.zeros', 'np.zeros', (['env.num_envs'], {}), '(env.num_envs)\n', (12854, 12868), True, 'import numpy as np\n'), ((2473, 2509), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'ob_space.shape'}), '(shape=ob_space.shape)\n', (2487, 2509), True, 'import tensorflow as tf\n'), ((2696, 2750), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[input_layer]', 'outputs': '[layers]'}), '(inputs=[input_layer], outputs=[layers])\n', (2710, 2750), True, 'import tensorflow as tf\n'), ((2786, 2871), 'utils.distributions.CategoricalPdType', 'CategoricalPdType', (['self.policy_network.output_shape', 'ac_space.n'], {'init_scale': '(0.01)'}), '(self.policy_network.output_shape, ac_space.n, init_scale=0.01\n )\n', (2803, 2871), False, 'from utils.distributions import CategoricalPdType\n'), ((4097, 4182), 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', ([], {'learning_rate': 'lr_scheduler', 'rho': '(0.99)', 'epsilon': '(1e-05)'}), '(learning_rate=lr_scheduler, rho=0.99, epsilon=1e-05\n )\n', (4124, 4182), True, 'import tensorflow as tf\n'), ((5741, 5794), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['gradients', 'self.max_grad_norm'], {}), '(gradients, self.max_grad_norm)\n', (5763, 5794), True, 'import tensorflow as tf\n'), ((6664, 6786), 'numpy.zeros', 'np.zeros', (['((self.number_of_envs,) + environment.observation_space.shape)'], {'dtype': 'environment.observation_space.dtype.name'}), '((self.number_of_envs,) + environment.observation_space.shape,\n dtype=environment.observation_space.dtype.name)\n', (6672, 6786), True, 'import numpy as np\n'), ((9553, 9590), 'os.path.expanduser', 'os.path.expanduser', (["conf['load_path']"], {}), "(conf['load_path'])\n", (9571, 9590), False, 'import os\n'), ((9606, 9638), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'model': 'model'}), '(model=model)\n', (9625, 9638), True, 'import tensorflow as tf\n'), ((9657, 9718), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['ckpt', 'load_path'], {'max_to_keep': 'None'}), '(ckpt, load_path, max_to_keep=None)\n', (9683, 9718), True, 'import tensorflow as tf\n'), ((10951, 10976), 'tensorflow.constant', 'tf.constant', (['observations'], {}), '(observations)\n', (10962, 10976), True, 'import tensorflow as tf\n'), ((10995, 11015), 'tensorflow.constant', 'tf.constant', (['rewards'], {}), '(rewards)\n', (11006, 11015), True, 'import tensorflow as tf\n'), ((11034, 11054), 'tensorflow.constant', 'tf.constant', (['actions'], {}), '(actions)\n', (11045, 11054), True, 'import tensorflow as tf\n'), ((11072, 11091), 'tensorflow.constant', 'tf.constant', (['values'], {}), '(values)\n', (11083, 11091), True, 'import tensorflow as tf\n'), ((11477, 11512), 'ppo2.explained_variance', 'explained_variance', (['values', 'rewards'], {}), '(values, rewards)\n', (11495, 11512), False, 'from ppo2 import explained_variance, mean_or_nan, swap_and_flatten\n'), ((11535, 11589), 'ppo2.mean_or_nan', 'mean_or_nan', (["[epinfo['r'] for epinfo in episode_infos]"], {}), "([epinfo['r'] for epinfo in episode_infos])\n", (11546, 11589), False, 'from ppo2 import explained_variance, mean_or_nan, swap_and_flatten\n'), ((11957, 12011), 'ppo2.mean_or_nan', 'mean_or_nan', (["[epinfo['l'] for epinfo in episode_infos]"], {}), "([epinfo['l'] for epinfo in episode_infos])\n", (11968, 12011), False, 'from ppo2 import explained_variance, mean_or_nan, swap_and_flatten\n'), ((1750, 1785), 'tensorflow.name_scope', 'tf.name_scope', (['"""inverse_time_decay"""'], {}), "('inverse_time_decay')\n", (1763, 1785), True, 'import tensorflow as tf\n'), ((1809, 1843), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.init_lr'], {}), '(self.init_lr)\n', (1829, 1843), True, 'import tensorflow as tf\n'), ((1878, 1943), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.number_of_updates'], {'dtype': 'init_lr.dtype'}), '(self.number_of_updates, dtype=init_lr.dtype)\n', (1898, 1943), True, 'import tensorflow as tf\n'), ((1965, 1993), 'tensorflow.cast', 'tf.cast', (['step', 'init_lr.dtype'], {}), '(step, init_lr.dtype)\n', (1972, 1993), True, 'import tensorflow as tf\n'), ((2927, 2958), 'tensorflow.name_scope', 'tf.name_scope', (['"""value_function"""'], {}), "('value_function')\n", (2940, 2958), True, 'import tensorflow as tf\n'), ((4800, 4817), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (4815, 4817), True, 'import tensorflow as tf\n'), ((5420, 5467), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(advantage * neg_log_probability)'], {}), '(advantage * neg_log_probability)\n', (5434, 5467), True, 'import tensorflow as tf\n'), ((7243, 7273), 'tensorflow.constant', 'tf.constant', (['self.observations'], {}), '(self.observations)\n', (7254, 7273), True, 'import tensorflow as tf\n'), ((8090, 8155), 'numpy.asarray', 'np.asarray', (['observations_minibatch'], {'dtype': 'self.observations.dtype'}), '(observations_minibatch, dtype=self.observations.dtype)\n', (8100, 8155), True, 'import numpy as np\n'), ((8293, 8343), 'numpy.asarray', 'np.asarray', (['actions_minibatch'], {'dtype': 'actions.dtype'}), '(actions_minibatch, dtype=actions.dtype)\n', (8303, 8343), True, 'import numpy as np\n'), ((11221, 11232), 'time.time', 'time.time', ([], {}), '()\n', (11230, 11232), False, 'import time\n'), ((12157, 12194), 'os.path.expanduser', 'os.path.expanduser', (['"""best_models/a2c"""'], {}), "('best_models/a2c')\n", (12175, 12194), False, 'import os\n'), ((12214, 12246), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'model': 'model'}), '(model=model)\n', (12233, 12246), True, 'import tensorflow as tf\n'), ((12269, 12330), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['ckpt', 'save_path'], {'max_to_keep': 'None'}), '(ckpt, save_path, max_to_keep=None)\n', (12295, 12330), True, 'import tensorflow as tf\n'), ((2587, 2656), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(64)', 'activation': 'tf.keras.activations.relu'}), '(units=64, activation=tf.keras.activations.relu)\n', (2608, 2656), True, 'import tensorflow as tf\n'), ((5356, 5392), 'tensorflow.square', 'tf.square', (['(value_predicted - rewards)'], {}), '(value_predicted - rewards)\n', (5365, 5392), True, 'import tensorflow as tf\n'), ((8185, 8232), 'numpy.asarray', 'np.asarray', (['rewards_minibatch'], {'dtype': 'np.float32'}), '(rewards_minibatch, dtype=np.float32)\n', (8195, 8232), True, 'import numpy as np\n'), ((8372, 8418), 'numpy.asarray', 'np.asarray', (['values_minibatch'], {'dtype': 'np.float32'}), '(values_minibatch, dtype=np.float32)\n', (8382, 8418), True, 'import numpy as np\n'), ((8460, 8502), 'numpy.asarray', 'np.asarray', (['dones_minibatch'], {'dtype': 'np.bool'}), '(dones_minibatch, dtype=np.bool)\n', (8470, 8502), True, 'import numpy as np\n'), ((13163, 13179), 'numpy.nonzero', 'np.nonzero', (['done'], {}), '(done)\n', (13173, 13179), True, 'import numpy as np\n'), ((3028, 3064), 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(0.01)'], {}), '(0.01)\n', (3058, 3064), True, 'import tensorflow as tf\n'), ((8678, 8708), 'tensorflow.constant', 'tf.constant', (['self.observations'], {}), '(self.observations)\n', (8689, 8708), True, 'import tensorflow as tf\n')] |
from scipy.stats import kstest
from scipy.stats import norm
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
# create data with specific distribution
# data = np.random.normal(0, 1, 1000)
data = np.random.uniform(0, 1, 50)
# KS test and output the results
test_stat = kstest(data, 'norm')
print(test_stat)
# Choose how many bins you want here
num_bins = 20
# create cdf of random sample
ecdf = sm.distributions.ECDF(data)
x = np.linspace(min(data), max(data),num_bins)
y = ecdf(x)
# create cdf of normal distribution
yy = norm.cdf((x-x.mean())/x.std(), 0, 1)
# find D's index
z = abs(yy-y).tolist()
ind = z.index(max(z))
print(x)
print(len(y))
print(yy)
# # plot
# fig1 = plt.figure('fig1')
# n, bins, patches = plt.hist(data, bins=30, normed=1, facecolor='green', alpha=0.75)
# plt.title("random sample's pdf")
# fig2 = plt.figure('fig2')
# plt.step(x,y,label="Fn(x)")
plt.step(x,yy,label="F0(x)")
# plt.errorbar(((x[ind]+x[ind-1])/2),(abs(yy[ind]+y[ind]))/2,abs(yy[ind]-y[ind])/2,fmt='-ro',label="D")
# plt.legend(loc='upper left')
# plt.title("random sample and normal distribution's cdf")
#
# fig3 = plt.figure('fig3')
# plt.plot(data)
plt.show()
| [
"scipy.stats.kstest",
"statsmodels.api.distributions.ECDF",
"numpy.random.uniform",
"matplotlib.pyplot.step",
"matplotlib.pyplot.show"
] | [((237, 264), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(50)'], {}), '(0, 1, 50)\n', (254, 264), True, 'import numpy as np\n'), ((316, 336), 'scipy.stats.kstest', 'kstest', (['data', '"""norm"""'], {}), "(data, 'norm')\n", (322, 336), False, 'from scipy.stats import kstest\n'), ((451, 478), 'statsmodels.api.distributions.ECDF', 'sm.distributions.ECDF', (['data'], {}), '(data)\n', (472, 478), True, 'import statsmodels.api as sm\n'), ((952, 982), 'matplotlib.pyplot.step', 'plt.step', (['x', 'yy'], {'label': '"""F0(x)"""'}), "(x, yy, label='F0(x)')\n", (960, 982), True, 'import matplotlib.pyplot as plt\n'), ((1230, 1240), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1238, 1240), True, 'import matplotlib.pyplot as plt\n')] |
import infery, numpy as np
model = infery.load(model_path='resnet_dynamic_1_1.pkl', framework_type='trt', inference_hardware='gpu')
inputs = np.random.random((16, 3, 224, 224)).astype('float32')
model.predict(inputs)
model.benchmark(batch_size=16)
| [
"numpy.random.random",
"infery.load"
] | [((35, 135), 'infery.load', 'infery.load', ([], {'model_path': '"""resnet_dynamic_1_1.pkl"""', 'framework_type': '"""trt"""', 'inference_hardware': '"""gpu"""'}), "(model_path='resnet_dynamic_1_1.pkl', framework_type='trt',\n inference_hardware='gpu')\n", (46, 135), False, 'import infery, numpy as np\n'), ((142, 177), 'numpy.random.random', 'np.random.random', (['(16, 3, 224, 224)'], {}), '((16, 3, 224, 224))\n', (158, 177), True, 'import infery, numpy as np\n')] |
import numpy as np
import scipy
from ... import spectrum
from ... import utilits as ut
def ica_kurtosis(x, order, mode = 'full'):
'''
FUNCTION IN TEST
Max-kurtosis Independent Component Analysis (ICA)
References
------------------------
[1] http://www.cs.nyu.edu/~roweis/kica.html
'''
X = signals.matrix.kernel_martix(x, mode=mode, ktype='linear', kpar=0.001, lags = x.size//2)
invCov = np.linalg.inv(X.T.dot(np.conj(X)))
W = scipy.linalg.sqrtm(invCov)
Xcw = np.dot(W , X)
gg = repmat(np.sum(np.square(Xcw),axis=1), Xcw.shape[0], 1)
TEST= np.dot(gg*Xcw, Xcw.T)
es,ev = np.linalg.eig(TEST)
Zica = np.dot(ev[:order,:], Xcw)
return Zica
def repmat(a, m, n):
a = np.asanyarray(a)
ndim = a.ndim
if ndim == 0:
origrows, origcols = (1, 1)
elif ndim == 1:
origrows, origcols = (1, a.shape[0])
else:
origrows, origcols = a.shape
rows = origrows * m
cols = origcols * n
c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0)
return c.reshape(rows, cols) | [
"scipy.linalg.sqrtm",
"numpy.linalg.eig",
"numpy.conj",
"numpy.asanyarray",
"numpy.square",
"numpy.dot"
] | [((488, 514), 'scipy.linalg.sqrtm', 'scipy.linalg.sqrtm', (['invCov'], {}), '(invCov)\n', (506, 514), False, 'import scipy\n'), ((525, 537), 'numpy.dot', 'np.dot', (['W', 'X'], {}), '(W, X)\n', (531, 537), True, 'import numpy as np\n'), ((621, 644), 'numpy.dot', 'np.dot', (['(gg * Xcw)', 'Xcw.T'], {}), '(gg * Xcw, Xcw.T)\n', (627, 644), True, 'import numpy as np\n'), ((655, 674), 'numpy.linalg.eig', 'np.linalg.eig', (['TEST'], {}), '(TEST)\n', (668, 674), True, 'import numpy as np\n'), ((686, 712), 'numpy.dot', 'np.dot', (['ev[:order, :]', 'Xcw'], {}), '(ev[:order, :], Xcw)\n', (692, 712), True, 'import numpy as np\n'), ((760, 776), 'numpy.asanyarray', 'np.asanyarray', (['a'], {}), '(a)\n', (773, 776), True, 'import numpy as np\n'), ((467, 477), 'numpy.conj', 'np.conj', (['X'], {}), '(X)\n', (474, 477), True, 'import numpy as np\n'), ((570, 584), 'numpy.square', 'np.square', (['Xcw'], {}), '(Xcw)\n', (579, 584), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# wujian@2019
import argparse
import numpy as np
from libs.ssl import ml_ssl, srp_ssl, music_ssl
from libs.data_handler import SpectrogramReader, NumpyReader
from libs.utils import get_logger, EPSILON
from libs.opts import StftParser, str2tuple
logger = get_logger(__name__)
def add_wta(masks_list, eps=1e-4):
"""
Produce winner-take-all masks
"""
masks = np.stack(masks_list, axis=-1)
max_mask = np.max(masks, -1)
wta_masks = []
for spk_mask in masks_list:
m = np.where(spk_mask == max_mask, spk_mask, eps)
wta_masks.append(m)
return wta_masks
def run(args):
stft_kwargs = {
"frame_len": args.frame_len,
"frame_hop": args.frame_hop,
"round_power_of_two": args.round_power_of_two,
"window": args.window,
"center": args.center,
"transpose": True
}
steer_vector = np.load(args.steer_vector)
logger.info(f"Shape of the steer vector: {steer_vector.shape}")
num_doa, _, _ = steer_vector.shape
min_doa, max_doa = str2tuple(args.doa_range)
if args.output == "radian":
angles = np.linspace(min_doa * np.pi / 180, max_doa * np.pi / 180,
num_doa + 1)
else:
angles = np.linspace(min_doa, max_doa, num_doa + 1)
spectrogram_reader = SpectrogramReader(args.wav_scp, **stft_kwargs)
mask_reader = None
if args.mask_scp:
mask_reader = [NumpyReader(scp) for scp in args.mask_scp.split(",")]
online = (args.chunk_len > 0 and args.look_back > 0)
if online:
logger.info("Set up in online mode: chunk_len " +
f"= {args.chunk_len}, look_back = {args.look_back}")
if args.backend == "srp":
split_index = lambda sstr: [
tuple(map(int, p.split(","))) for p in sstr.split(";")
]
srp_pair = split_index(args.srp_pair)
srp_pair = ([t[0] for t in srp_pair], [t[1] for t in srp_pair])
logger.info(f"Choose srp-based algorithm, srp pair is {srp_pair}")
else:
srp_pair = None
with open(args.doa_scp, "w") as doa_out:
for key, stft in spectrogram_reader:
# stft: M x T x F
_, _, F = stft.shape
if mask_reader:
# T x F => F x T
mask = [r[key] for r in mask_reader] if mask_reader else None
if args.mask_eps >= 0 and len(mask_reader) > 1:
mask = add_wta(mask, eps=args.mask_eps)
mask = mask[0]
# F x T => T x F
if mask.shape[-1] != F:
mask = mask.transpose()
else:
mask = None
if not online:
if srp_pair:
idx = srp_ssl(stft,
steer_vector,
srp_pair=srp_pair,
mask=mask)
elif args.backend == "ml":
idx = ml_ssl(stft,
steer_vector,
mask=mask,
compression=-1,
eps=EPSILON)
else:
idx = music_ssl(stft, steer_vector, mask=mask)
doa = idx if args.output == "index" else angles[idx]
logger.info(f"Processing utterance {key}: {doa:.4f}")
doa_out.write(f"{key}\t{doa:.4f}\n")
else:
logger.info(f"Processing utterance {key}...")
_, T, _ = stft.shape
online_doa = []
for t in range(0, T, args.chunk_len):
s = max(t - args.look_back, 0)
if mask is not None:
chunk_mask = mask[..., s:t + args.chunk_len]
else:
chunk_mask = None
stft_chunk = stft[:, s:t + args.chunk_len, :]
if srp_pair:
idx = srp_ssl(stft_chunk,
steer_vector,
srp_pair=srp_pair,
mask=chunk_mask)
elif args.backend == "ml":
idx = ml_ssl(stft_chunk,
steer_vector,
mask=chunk_mask,
compression=-1,
eps=EPSILON)
else:
idx = music_ssl(stft_chunk,
steer_vector,
mask=chunk_mask)
doa = idx if args.output == "index" else angles[idx]
online_doa.append(doa)
doa_str = " ".join([f"{d:.4f}" for d in online_doa])
doa_out.write(f"{key}\t{doa_str}\n")
logger.info(f"Processing {len(spectrogram_reader)} utterance done")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Command to ML/SRP based sound souce localization (SSL)."
"Also see scripts/sptk/compute_steer_vector.py",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=[StftParser.parser])
parser.add_argument("wav_scp",
type=str,
help="Multi-channel wave rspecifier")
parser.add_argument("steer_vector",
type=str,
help="Pre-computed steer vector in each "
"directions (in shape A x M x F, A: number "
"of DoAs, M: microphone number, F: FFT bins)")
parser.add_argument("doa_scp",
type=str,
help="Wspecifier for estimated DoA")
parser.add_argument("--backend",
type=str,
default="ml",
choices=["ml", "srp", "music"],
help="Which algorithm to choose for SSL")
parser.add_argument("--srp-pair",
type=str,
default="",
help="Microphone index pair to compute srp response")
parser.add_argument("--doa-range",
type=str,
default="0,360",
help="DoA range")
parser.add_argument("--mask-scp",
type=str,
default="",
help="Rspecifier for TF-masks in numpy format")
parser.add_argument("--output",
type=str,
default="degree",
choices=["radian", "degree", "index"],
help="Output type of the DoA")
parser.add_argument("--mask-eps",
type=float,
default=-1,
help="Value of eps used in masking winner-take-all")
parser.add_argument("--chunk-len",
type=int,
default=-1,
help="Number frames per chunk "
"(for online setups)")
parser.add_argument("--look-back",
type=int,
default=125,
help="Number of frames to look back "
"(for online setups)")
args = parser.parse_args()
run(args) | [
"argparse.ArgumentParser",
"numpy.where",
"libs.data_handler.NumpyReader",
"libs.ssl.srp_ssl",
"numpy.max",
"numpy.stack",
"numpy.linspace",
"libs.utils.get_logger",
"libs.opts.str2tuple",
"libs.ssl.music_ssl",
"numpy.load",
"libs.data_handler.SpectrogramReader",
"libs.ssl.ml_ssl"
] | [((282, 302), 'libs.utils.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (292, 302), False, 'from libs.utils import get_logger, EPSILON\n'), ((402, 431), 'numpy.stack', 'np.stack', (['masks_list'], {'axis': '(-1)'}), '(masks_list, axis=-1)\n', (410, 431), True, 'import numpy as np\n'), ((447, 464), 'numpy.max', 'np.max', (['masks', '(-1)'], {}), '(masks, -1)\n', (453, 464), True, 'import numpy as np\n'), ((902, 928), 'numpy.load', 'np.load', (['args.steer_vector'], {}), '(args.steer_vector)\n', (909, 928), True, 'import numpy as np\n'), ((1059, 1084), 'libs.opts.str2tuple', 'str2tuple', (['args.doa_range'], {}), '(args.doa_range)\n', (1068, 1084), False, 'from libs.opts import StftParser, str2tuple\n'), ((1330, 1376), 'libs.data_handler.SpectrogramReader', 'SpectrogramReader', (['args.wav_scp'], {}), '(args.wav_scp, **stft_kwargs)\n', (1347, 1376), False, 'from libs.data_handler import SpectrogramReader, NumpyReader\n'), ((5066, 5305), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Command to ML/SRP based sound souce localization (SSL).Also see scripts/sptk/compute_steer_vector.py"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'parents': '[StftParser.parser]'}), "(description=\n 'Command to ML/SRP based sound souce localization (SSL).Also see scripts/sptk/compute_steer_vector.py'\n , formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=[\n StftParser.parser])\n", (5089, 5305), False, 'import argparse\n'), ((528, 573), 'numpy.where', 'np.where', (['(spk_mask == max_mask)', 'spk_mask', 'eps'], {}), '(spk_mask == max_mask, spk_mask, eps)\n', (536, 573), True, 'import numpy as np\n'), ((1134, 1204), 'numpy.linspace', 'np.linspace', (['(min_doa * np.pi / 180)', '(max_doa * np.pi / 180)', '(num_doa + 1)'], {}), '(min_doa * np.pi / 180, max_doa * np.pi / 180, num_doa + 1)\n', (1145, 1204), True, 'import numpy as np\n'), ((1261, 1303), 'numpy.linspace', 'np.linspace', (['min_doa', 'max_doa', '(num_doa + 1)'], {}), '(min_doa, max_doa, num_doa + 1)\n', (1272, 1303), True, 'import numpy as np\n'), ((1445, 1461), 'libs.data_handler.NumpyReader', 'NumpyReader', (['scp'], {}), '(scp)\n', (1456, 1461), False, 'from libs.data_handler import SpectrogramReader, NumpyReader\n'), ((2767, 2824), 'libs.ssl.srp_ssl', 'srp_ssl', (['stft', 'steer_vector'], {'srp_pair': 'srp_pair', 'mask': 'mask'}), '(stft, steer_vector, srp_pair=srp_pair, mask=mask)\n', (2774, 2824), False, 'from libs.ssl import ml_ssl, srp_ssl, music_ssl\n'), ((2996, 3062), 'libs.ssl.ml_ssl', 'ml_ssl', (['stft', 'steer_vector'], {'mask': 'mask', 'compression': '(-1)', 'eps': 'EPSILON'}), '(stft, steer_vector, mask=mask, compression=-1, eps=EPSILON)\n', (3002, 3062), False, 'from libs.ssl import ml_ssl, srp_ssl, music_ssl\n'), ((3243, 3283), 'libs.ssl.music_ssl', 'music_ssl', (['stft', 'steer_vector'], {'mask': 'mask'}), '(stft, steer_vector, mask=mask)\n', (3252, 3283), False, 'from libs.ssl import ml_ssl, srp_ssl, music_ssl\n'), ((4037, 4106), 'libs.ssl.srp_ssl', 'srp_ssl', (['stft_chunk', 'steer_vector'], {'srp_pair': 'srp_pair', 'mask': 'chunk_mask'}), '(stft_chunk, steer_vector, srp_pair=srp_pair, mask=chunk_mask)\n', (4044, 4106), False, 'from libs.ssl import ml_ssl, srp_ssl, music_ssl\n'), ((4298, 4376), 'libs.ssl.ml_ssl', 'ml_ssl', (['stft_chunk', 'steer_vector'], {'mask': 'chunk_mask', 'compression': '(-1)', 'eps': 'EPSILON'}), '(stft_chunk, steer_vector, mask=chunk_mask, compression=-1, eps=EPSILON)\n', (4304, 4376), False, 'from libs.ssl import ml_ssl, srp_ssl, music_ssl\n'), ((4581, 4633), 'libs.ssl.music_ssl', 'music_ssl', (['stft_chunk', 'steer_vector'], {'mask': 'chunk_mask'}), '(stft_chunk, steer_vector, mask=chunk_mask)\n', (4590, 4633), False, 'from libs.ssl import ml_ssl, srp_ssl, music_ssl\n')] |
# - import modules - #
import os, sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
if not os.path.exists('pyAp') and os.path.exists('../pyAp'): # hack to allow scripts to be placed in subdirectories next to pyAp:
sys.path.insert(1, os.path.abspath('..'))
from pyAp import pyApthermo
from pyAp.pyAp_tools import ap_mc, yes_or_no
##############################
### Import data ####
df = pd.read_excel('data_calc_water.xlsx')
## place data columns in certain order for melt water calculation
order = ['XF', 'XCL', 'T,C', 'MELTF', 'MELTCL', 'MELTCOMP']
data = df[order]
## calculate melt water contents using parameter values (ignoring errors)
results = pd.DataFrame()
fn = 'outputs_melt_water.csv'
# create dataframe for mc collection
ap_mc_collect = pd.DataFrame([])
comp = df[['XF', 'XCL', 'T,C', 'MELTF', 'MELTCL']]
std = df[['XF_SD', 'XCL_SD', 'T_SD','MELTF_SD', 'MELTCL_SD']]
## calculate errors in melt water contents consdidering errors in parameter values
if yes_or_no("\nRun MC for error propagation?"):
#### +++++ Entry of MCS +++++ #####
mc = 1000 # set mc
print('>> Simulation starts ...')
for idx in range(len(df)):
df_iter = ap_mc(comp, std, idx, mc)
ap_mc_collect = ap_mc_collect.append(df_iter)
ap_mc_collect.columns = comp.columns
ap_mc_collect['MELTCOMP'] = df.loc[df.index.repeat(mc)]['MELTCOMP']
ap_mc_collect['water_estimates'] = ap_mc_collect.apply(lambda row: pyApthermo.ApThermo(inputs=row[order], cal_H2O=True,cal_gamma=False).meltH2O(),axis=1)
ap_mc_collect['sample'] = df.loc[df.index.repeat(mc)]['sample']
print('\n>> Simulation completed ...')
results_mc = pd.DataFrame()
results_mc['MeltWater_calcfromF'] = [x[0] for x in ap_mc_collect['water_estimates']]
results_mc['MeltWater_calcfromCl'] = [x[1] for x in ap_mc_collect['water_estimates']]
results_mc['sample'] = ap_mc_collect.reset_index()['sample']
fn_mc = 'outputs_melt_water_mc.csv'
results_mc.to_csv(fn_mc, index=False)
print('\n>> mc = ' + str(mc) + '. All MC results are saved in csv file: ' + fn_mc + '\n')
### median and standard deviation calculation
# for melt water calculated from F
results_mc.fillna(0)
results_mc = results_mc[results_mc['MeltWater_calcfromF']>0]
median_F = results_mc.groupby('sample')['MeltWater_calcfromF'].median()
sd_F = results_mc.groupby('sample')['MeltWater_calcfromF'].transform(lambda s: (np.percentile(s, 84)-np.percentile(s, 16))/2).unique()
# for melt water calculated from Cl
results_mc = results_mc[results_mc['MeltWater_calcfromCl']>0]
median_Cl = results_mc.groupby('sample')['MeltWater_calcfromCl'].median()
sd_Cl = results_mc.groupby('sample')['MeltWater_calcfromCl'].transform(lambda s: (np.percentile(s, 84)-np.percentile(s, 16))/2).unique()
results['MeltWater_Fmedian'] = [x for x in median_F]
results['MeltWater_F1sd'] = [x for x in sd_F]
# results['MeltWater_F_error,100%'] = results['MeltWater_F1sd']/results['MeltWater_Fmedian']*100
results['MeltWater_Clmedian'] = [x for x in median_Cl]
results['MeltWater_Cl1sd'] = [x for x in sd_Cl]
# results['MeltWater_Cl_error,100%'] = results['MeltWater_Cl1sd']/results['MeltWater_Clmedian']*100
results.to_csv(fn)
print(results)
print('\n>> The median and standard deviation of MC results are saved in csv file: '+ fn + '\n>> Close the figure to exit. \n')
### plot results ###
fig, axes = plt.subplots(1, 2, figsize=(9,4), constrained_layout=True)
sns.kdeplot(x = 'MeltWater_calcfromF', data=results_mc, hue='sample', ax = axes[0])
sns.kdeplot(x = 'MeltWater_calcfromCl', data=results_mc, hue='sample', ax = axes[1])
plt.show()
else:
list_result = data.apply(lambda row: pyApthermo.ApThermo(inputs=row[order], cal_H2O=True,cal_gamma=False).meltH2O(),axis=1)
results['MeltWater_calcfromF'] = [x[0] for x in list_result]
results['MeltWater_calcfromCl'] = [x[1] for x in list_result]
results['sample'] = df['sample']
results.to_csv(fn)
print('\n>> Results are saved in ' + fn)
| [
"os.path.exists",
"pyAp.pyAp_tools.ap_mc",
"seaborn.kdeplot",
"pyAp.pyApthermo.ApThermo",
"pyAp.pyAp_tools.yes_or_no",
"pandas.read_excel",
"pandas.DataFrame",
"numpy.percentile",
"os.path.abspath",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((441, 478), 'pandas.read_excel', 'pd.read_excel', (['"""data_calc_water.xlsx"""'], {}), "('data_calc_water.xlsx')\n", (454, 478), True, 'import pandas as pd\n'), ((711, 725), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (723, 725), True, 'import pandas as pd\n'), ((811, 827), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {}), '([])\n', (823, 827), True, 'import pandas as pd\n'), ((1030, 1077), 'pyAp.pyAp_tools.yes_or_no', 'yes_or_no', (['"""\nRun MC for error propagation?"""'], {}), '("""\nRun MC for error propagation?""")\n', (1039, 1077), False, 'from pyAp.pyAp_tools import ap_mc, yes_or_no\n'), ((167, 192), 'os.path.exists', 'os.path.exists', (['"""../pyAp"""'], {}), "('../pyAp')\n", (181, 192), False, 'import os, sys\n'), ((1719, 1733), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1731, 1733), True, 'import pandas as pd\n'), ((3549, 3608), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(9, 4)', 'constrained_layout': '(True)'}), '(1, 2, figsize=(9, 4), constrained_layout=True)\n', (3561, 3608), True, 'import matplotlib.pyplot as plt\n'), ((3612, 3691), 'seaborn.kdeplot', 'sns.kdeplot', ([], {'x': '"""MeltWater_calcfromF"""', 'data': 'results_mc', 'hue': '"""sample"""', 'ax': 'axes[0]'}), "(x='MeltWater_calcfromF', data=results_mc, hue='sample', ax=axes[0])\n", (3623, 3691), True, 'import seaborn as sns\n'), ((3700, 3785), 'seaborn.kdeplot', 'sns.kdeplot', ([], {'x': '"""MeltWater_calcfromCl"""', 'data': 'results_mc', 'hue': '"""sample"""', 'ax': 'axes[1]'}), "(x='MeltWater_calcfromCl', data=results_mc, hue='sample', ax=axes[1]\n )\n", (3711, 3785), True, 'import seaborn as sns\n'), ((3789, 3799), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3797, 3799), True, 'import matplotlib.pyplot as plt\n'), ((140, 162), 'os.path.exists', 'os.path.exists', (['"""pyAp"""'], {}), "('pyAp')\n", (154, 162), False, 'import os, sys\n'), ((286, 307), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (301, 307), False, 'import os, sys\n'), ((1236, 1261), 'pyAp.pyAp_tools.ap_mc', 'ap_mc', (['comp', 'std', 'idx', 'mc'], {}), '(comp, std, idx, mc)\n', (1241, 1261), False, 'from pyAp.pyAp_tools import ap_mc, yes_or_no\n'), ((1501, 1570), 'pyAp.pyApthermo.ApThermo', 'pyApthermo.ApThermo', ([], {'inputs': 'row[order]', 'cal_H2O': '(True)', 'cal_gamma': '(False)'}), '(inputs=row[order], cal_H2O=True, cal_gamma=False)\n', (1520, 1570), False, 'from pyAp import pyApthermo\n'), ((3853, 3922), 'pyAp.pyApthermo.ApThermo', 'pyApthermo.ApThermo', ([], {'inputs': 'row[order]', 'cal_H2O': '(True)', 'cal_gamma': '(False)'}), '(inputs=row[order], cal_H2O=True, cal_gamma=False)\n', (3872, 3922), False, 'from pyAp import pyApthermo\n'), ((2500, 2520), 'numpy.percentile', 'np.percentile', (['s', '(84)'], {}), '(s, 84)\n', (2513, 2520), True, 'import numpy as np\n'), ((2521, 2541), 'numpy.percentile', 'np.percentile', (['s', '(16)'], {}), '(s, 16)\n', (2534, 2541), True, 'import numpy as np\n'), ((2830, 2850), 'numpy.percentile', 'np.percentile', (['s', '(84)'], {}), '(s, 84)\n', (2843, 2850), True, 'import numpy as np\n'), ((2851, 2871), 'numpy.percentile', 'np.percentile', (['s', '(16)'], {}), '(s, 16)\n', (2864, 2871), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
from evidently.analyzers.base_analyzer import Analyzer
import pandas as pd
from pandas.api.types import is_numeric_dtype
import numpy as np
from scipy.stats import ks_2samp, chisquare
class DataDriftAnalyzer(Analyzer):
def calculate(self, reference_data: pd.DataFrame, production_data: pd.DataFrame, column_mapping):
result = dict()
if column_mapping:
date_column = column_mapping.get('datetime')
id_column = column_mapping.get('id')
target_column = column_mapping.get('target')
prediction_column = column_mapping.get('prediction')
num_feature_names = column_mapping.get('numerical_features')
if num_feature_names is None:
num_feature_names = []
else:
num_feature_names = [name for name in num_feature_names if is_numeric_dtype(reference_data[name])]
cat_feature_names = column_mapping.get('categorical_features')
if cat_feature_names is None:
cat_feature_names = []
else:
cat_feature_names = [name for name in cat_feature_names if is_numeric_dtype(reference_data[name])]
else:
date_column = 'datetime' if 'datetime' in reference_data.columns else None
id_column = None
target_column = 'target' if 'target' in reference_data.columns else None
prediction_column = 'prediction' if 'prediction' in reference_data.columns else None
utility_columns = [date_column, id_column, target_column, prediction_column]
num_feature_names = list(set(reference_data.select_dtypes([np.number]).columns) - set(utility_columns))
cat_feature_names = list(set(reference_data.select_dtypes([np.object]).columns) - set(utility_columns))
result["utility_columns"] = {'date':date_column, 'id':id_column, 'target':target_column, 'prediction':prediction_column}
result["cat_feature_names"] = cat_feature_names
result["num_feature_names"] = num_feature_names
#calculate result
result['metrics'] = {}
for feature_name in num_feature_names:
result['metrics'][feature_name] = dict(
prod_small_hist=[t.tolist() for t in np.histogram(production_data[feature_name][np.isfinite(production_data[feature_name])],
bins=10, density=True)],
ref_small_hist=[t.tolist() for t in np.histogram(reference_data[feature_name][np.isfinite(reference_data[feature_name])],
bins=10, density=True)],
feature_type='num',
p_value=ks_2samp(reference_data[feature_name], production_data[feature_name])[1]
)
for feature_name in cat_feature_names:
ref_feature_vc = reference_data[feature_name][np.isfinite(reference_data[feature_name])].value_counts()
prod_feature_vc = production_data[feature_name][np.isfinite(production_data[feature_name])].value_counts()
keys = set(list(reference_data[feature_name][np.isfinite(reference_data[feature_name])].unique()) +
list(production_data[feature_name][np.isfinite(production_data[feature_name])].unique()))
ref_feature_dict = dict.fromkeys(keys, 0)
for key, item in zip(ref_feature_vc.index, ref_feature_vc.values):
ref_feature_dict[key] = item
prod_feature_dict = dict.fromkeys(keys, 0)
for key, item in zip(prod_feature_vc.index, prod_feature_vc.values):
prod_feature_dict[key] = item
f_exp = [value[1] for value in sorted(ref_feature_dict.items())]
f_obs = [value[1] for value in sorted(prod_feature_dict.items())]
# CHI2 to be implemented for cases with different categories
p_value = chisquare(f_exp, f_obs)[1]
result['metrics'][feature_name] = dict(
prod_small_hist=[t.tolist() for t in np.histogram(production_data[feature_name][np.isfinite(production_data[feature_name])],
bins=10, density=True)],
ref_small_hist=[t.tolist() for t in np.histogram(reference_data[feature_name][np.isfinite(reference_data[feature_name])],
bins=10, density=True)],
feature_type='cat',
p_value=p_value,
)
return result
| [
"pandas.api.types.is_numeric_dtype",
"scipy.stats.chisquare",
"scipy.stats.ks_2samp",
"numpy.isfinite"
] | [((3941, 3964), 'scipy.stats.chisquare', 'chisquare', (['f_exp', 'f_obs'], {}), '(f_exp, f_obs)\n', (3950, 3964), False, 'from scipy.stats import ks_2samp, chisquare\n'), ((889, 927), 'pandas.api.types.is_numeric_dtype', 'is_numeric_dtype', (['reference_data[name]'], {}), '(reference_data[name])\n', (905, 927), False, 'from pandas.api.types import is_numeric_dtype\n'), ((1179, 1217), 'pandas.api.types.is_numeric_dtype', 'is_numeric_dtype', (['reference_data[name]'], {}), '(reference_data[name])\n', (1195, 1217), False, 'from pandas.api.types import is_numeric_dtype\n'), ((2731, 2800), 'scipy.stats.ks_2samp', 'ks_2samp', (['reference_data[feature_name]', 'production_data[feature_name]'], {}), '(reference_data[feature_name], production_data[feature_name])\n', (2739, 2800), False, 'from scipy.stats import ks_2samp, chisquare\n'), ((2924, 2965), 'numpy.isfinite', 'np.isfinite', (['reference_data[feature_name]'], {}), '(reference_data[feature_name])\n', (2935, 2965), True, 'import numpy as np\n'), ((3042, 3084), 'numpy.isfinite', 'np.isfinite', (['production_data[feature_name]'], {}), '(production_data[feature_name])\n', (3053, 3084), True, 'import numpy as np\n'), ((2349, 2391), 'numpy.isfinite', 'np.isfinite', (['production_data[feature_name]'], {}), '(production_data[feature_name])\n', (2360, 2391), True, 'import numpy as np\n'), ((2558, 2599), 'numpy.isfinite', 'np.isfinite', (['reference_data[feature_name]'], {}), '(reference_data[feature_name])\n', (2569, 2599), True, 'import numpy as np\n'), ((3159, 3200), 'numpy.isfinite', 'np.isfinite', (['reference_data[feature_name]'], {}), '(reference_data[feature_name])\n', (3170, 3200), True, 'import numpy as np\n'), ((3272, 3314), 'numpy.isfinite', 'np.isfinite', (['production_data[feature_name]'], {}), '(production_data[feature_name])\n', (3283, 3314), True, 'import numpy as np\n'), ((4117, 4159), 'numpy.isfinite', 'np.isfinite', (['production_data[feature_name]'], {}), '(production_data[feature_name])\n', (4128, 4159), True, 'import numpy as np\n'), ((4326, 4367), 'numpy.isfinite', 'np.isfinite', (['reference_data[feature_name]'], {}), '(reference_data[feature_name])\n', (4337, 4367), True, 'import numpy as np\n')] |
"""Classes that represent the state of the entire system and entities within.
These classes wrap protobufs, which are basically a fancy NamedTuple that is
generated by the `build` Makefile target. You can read more about protobufs
online, but mainly they're helpful for serializing data over the network."""
import logging
from enum import Enum
from typing import List, Dict, Optional, Union
import numpy as np
import vpython
from orbitx import orbitx_pb2 as protos
from orbitx import strings
log = logging.getLogger()
# This Request class is just an alias of the Command protobuf message. We
# provide this so that nobody has to directly import orbitx_pb2, and so that
# we can this wrapper class in the future.
Request = protos.Command
# These entity fields do not change during simulation. Thus, we don't have to
# store them in a big 1D numpy array for use in scipy.solve_ivp.
_PER_ENTITY_UNCHANGING_FIELDS = [
'name', 'mass', 'r', 'artificial', 'atmosphere_thickness',
'atmosphere_scaling'
]
_PER_ENTITY_MUTABLE_FIELDS = [field.name for
field in protos.Entity.DESCRIPTOR.fields if
field.name not in _PER_ENTITY_UNCHANGING_FIELDS]
_ENTITY_FIELD_ORDER = {name: index for index, name in
enumerate(_PER_ENTITY_MUTABLE_FIELDS)}
_N_COMPONENTS = len(strings.COMPONENT_NAMES)
_N_COOLANT_LOOPS = len(strings.COOLANT_LOOP_NAMES)
_N_RADIATORS = len(strings.RADIATOR_NAMES)
_N_COMPONENT_FIELDS = len(protos.EngineeringState.Component.DESCRIPTOR.fields)
_N_COOLANT_FIELDS = len(protos.EngineeringState.CoolantLoop.DESCRIPTOR.fields)
_N_RADIATOR_FIELDS = len(protos.EngineeringState.Radiator.DESCRIPTOR.fields)
# A special field, we reference it a couple times so turn it into a symbol
# to guard against string literal typos.
_LANDED_ON = "landed_on"
assert _LANDED_ON in [field.name for field in protos.Entity.DESCRIPTOR.fields]
# Make sure this is in sync with the corresponding enum in orbitx.proto!
Navmode = Enum('Navmode', zip([ # type: ignore
'Manual', 'CCW Prograde', 'CW Retrograde', 'Depart Reference',
'Approach Target', 'Pro Targ Velocity', 'Anti Targ Velocity'
], protos.Navmode.values()))
class Entity:
"""A wrapper around protos.Entity.
Example usage:
assert Entity(protos.Entity(x=5)).x == 5
assert Entity(protos.Entity(x=1, y=2)).pos == [1, 2]
To add fields, or see what fields exists, please see orbitx.proto,
specifically the "message Entity" declaration.
"""
def __init__(self, entity: protos.Entity):
self.proto = entity
def __repr__(self):
return self.proto.__repr__()
def __str__(self):
return self.proto.__str__()
# These are filled in just below this class definition. These stubs are for
# static type analysis with mypy.
name: str
x: float
y: float
vx: float
vy: float
r: float
mass: float
heading: float
spin: float
fuel: float
throttle: float
landed_on: str
broken: bool
artificial: bool
atmosphere_thickness: float
atmosphere_scaling: float
def screen_pos(self, origin: 'Entity') -> vpython.vector:
"""The on-screen position of this entity, relative to the origin."""
return vpython.vector(self.x - origin.x, self.y - origin.y, 0)
@property
def pos(self):
return np.array((self.x, self.y), dtype=PhysicsState.DTYPE, copy=True)
@pos.setter
def pos(self, coord):
self.x = coord[0]
self.y = coord[1]
@property
def v(self):
return np.asarray([self.vx, self.vy])
@v.setter
def v(self, coord):
self.vx = coord[0]
self.vy = coord[1]
@property
def dockable(self):
return self.name == strings.AYSE
def landed(self) -> bool:
"""Convenient and more elegant check to see if the entity is landed."""
return self.landed_on != ''
class _EntityView(Entity):
"""A view into a PhysicsState, very fast to create and use.
Setting fields will update the parent PhysicsState appropriately."""
def __init__(self, creator: 'PhysicsState', index: int):
self._creator = creator
self._index = index
def __repr__(self):
# This is actually a bit hacky. This line implies that orbitx_pb2
# protobuf generated code can't tell the difference between an
# orbitx_pb2.Entity and an _EntityView. Turns out, it can't! But
# hopefully this assumption always holds.
return repr(Entity(self))
def __str__(self):
return str(Entity(self))
# I feel like I should apologize before things get too crazy. Once you read
# the following module-level loop and ask "why _EntityView a janky subclass of
# Entity, and is implemented using janky array indexing into data owned by a
# PhysicsState?".
# My excuse is that I wanted a way to index into PhysicsState and get an Entity
# for ease of use and code. I found this to be a useful API that made physics
# code cleaner, but it was _too_ useful! The PhysicsState.__getitem__ method
# that implemented this indexing was so expensive and called so often that it
# was _half_ the runtime of OrbitX at high time accelerations! My solution to
# this performance issue was to optimize PhysicsState.__getitem__ by return
# an Entity (specifically, an _EntityView) that was very fast to instantiate
# and very fast to access.
# Hence: janky array-indexing accessors is my super-optimization! 2x speedup!
for field in protos.Entity.DESCRIPTOR.fields:
# For every field in the underlying protobuf entity, make a
# convenient equivalent property to allow code like the following:
# Entity(entity).heading = 5
def entity_fget(self, name=field.name):
return getattr(self.proto, name)
def entity_fset(self, val, name=field.name):
return setattr(self.proto, name, val)
def entity_fdel(self, name=field.name):
return delattr(self.proto, name)
setattr(Entity, field.name, property(
fget=entity_fget, fset=entity_fset, fdel=entity_fdel,
doc=f"Entity proxy of the underlying field, self.proto.{field.name}"))
def entity_view_unchanging_fget(self, name=field.name):
return getattr(self._creator._proto_state.entities[self._index], name)
def entity_view_unchanging_fset(self, val, name=field.name):
return setattr(
self._creator._proto_state.entities[self._index], name, val)
field_n: Optional[int]
if field.name in _PER_ENTITY_MUTABLE_FIELDS:
field_n = _ENTITY_FIELD_ORDER[field.name]
else:
field_n = None
if field.cpp_type in [field.CPPTYPE_FLOAT, field.CPPTYPE_DOUBLE]:
def entity_view_mutable_fget(self, field_n=field_n):
return self._creator._array_rep[
self._creator._n * field_n + self._index]
def entity_view_mutable_fset(self, val, field_n=field_n):
self._creator._array_rep[
self._creator._n * field_n + self._index] = val
elif field.cpp_type == field.CPPTYPE_BOOL:
# Same as if it's a float, but we have to convert float -> bool.
def entity_view_mutable_fget(self, field_n=field_n):
return bool(
self._creator._array_rep[
self._creator._n * field_n + self._index])
def entity_view_mutable_fset(self, val, field_n=field_n):
self._creator._array_rep[
self._creator._n * field_n + self._index] = val
elif field.name == _LANDED_ON:
# Special case, we store the index of the entity we're landed on as a
# float, but we have to convert this to an int then the name of the
# entity.
def entity_view_mutable_fget(self, field_n=field_n):
entity_index = int(
self._creator._array_rep[
self._creator._n * field_n + self._index])
if entity_index == PhysicsState.NO_INDEX:
return ''
return self._creator._entity_names[entity_index]
def entity_view_mutable_fset(self, val, field_n=field_n):
assert isinstance(val, str)
self._creator._array_rep[
self._creator._n * field_n + self._index] = \
self._creator._name_to_index(val)
elif field.cpp_type == field.CPPTYPE_STRING:
assert field.name in _PER_ENTITY_UNCHANGING_FIELDS
else:
raise NotImplementedError(
"Encountered a field in the protobuf definition of Entity that "
"is of a type we haven't handled.")
if field.name in _PER_ENTITY_UNCHANGING_FIELDS:
# Note there is no fdel defined. The data is owned by the PhysicalState
# so the PhysicalState should delete data on its own time.
setattr(_EntityView, field.name, property(
fget=entity_view_unchanging_fget,
fset=entity_view_unchanging_fset,
doc=f"_EntityView proxy of unchanging field {field.name}"
))
else:
assert field.name in _PER_ENTITY_MUTABLE_FIELDS
setattr(_EntityView, field.name, property(
fget=entity_view_mutable_fget,
fset=entity_view_mutable_fset,
doc=f"_EntityView proxy of mutable field {field.name}"
))
class CoolantView:
"""Represents a single Coolant Loop.
Should not be instantiated outside of EngineeringState."""
def __init__(self, array_rep: np.ndarray, coolant_n: int):
"""Called by an EngineeringState factory.
array_rep: an array that, starting at 0, contains all data for all components.
coolant_n: an index specifying which coolant loop, starting at 0.
"""
self._array = array_rep
self._n = coolant_n
def name(self):
return strings.COOLANT_LOOP_NAMES[self._n]
@property
def coolant_temp(self) -> float:
return self._array[self._n * _N_COOLANT_FIELDS + 0]
@coolant_temp.setter
def coolant_temp(self, val: float):
self._array[self._n * _N_COOLANT_FIELDS + 0] = val
@property
def primary_pump_on(self) -> bool:
return bool(self._array[self._n * _N_COOLANT_FIELDS + 1])
@primary_pump_on.setter
def primary_pump_on(self, val: bool):
self._array[self._n * _N_COOLANT_FIELDS + 1] = val
@property
def secondary_pump_on(self) -> bool:
return bool(self._array[self._n * _N_COOLANT_FIELDS + 2])
@secondary_pump_on.setter
def secondary_pump_on(self, val: bool):
self._array[self._n * _N_COOLANT_FIELDS + 2] = val
# Alias this protobuf type so other users of the data_structures library
# don't have to import the protobuf file themselves.
ComponentCoolantCnxn = protos.ComponentCoolantCnxn
class ComponentView:
"""Represents a single Component.
Should not be instantiated outside of EngineeringState."""
def __init__(self, parent: 'EngineeringState', array_rep: np.ndarray, component_n: int):
"""Called by an EngineeringState factory.
array_rep: an array that, starting at 0, contains all data for all components.
component_n: an index specifying which component, starting at 0.
"""
self._parent = parent
self._array = array_rep
self._n = component_n
def name(self):
return strings.COMPONENT_NAMES[self._n]
@property
def connected(self) -> bool:
return bool(self._array[self._n * _N_COMPONENT_FIELDS + 0])
@connected.setter
def connected(self, val: bool):
self._array[self._n * _N_COMPONENT_FIELDS + 0] = val
@property
def temperature(self) -> float:
return self._array[self._n * _N_COMPONENT_FIELDS + 1]
@temperature.setter
def temperature(self, val: float):
self._array[self._n * _N_COMPONENT_FIELDS + 1] = val
@property
def resistance(self) -> float:
return self._array[self._n * _N_COMPONENT_FIELDS + 2]
@resistance.setter
def resistance(self, val: float):
self._array[self._n * _N_COMPONENT_FIELDS + 2] = val
@property
def voltage(self) -> float:
return self._array[self._n * _N_COMPONENT_FIELDS + 3]
@voltage.setter
def voltage(self, val: float):
self._array[self._n * _N_COMPONENT_FIELDS + 3] = val
@property
def current(self) -> float:
return self._array[self._n * _N_COMPONENT_FIELDS + 4]
@current.setter
def current(self, val: float):
self._array[self._n * _N_COMPONENT_FIELDS + 4] = val
def get_coolant_loops(self) -> List[CoolantView]:
if self.coolant_connection == ComponentCoolantCnxn.DISCONNECTED:
return []
elif self.coolant_connection == ComponentCoolantCnxn.HAB_ONE:
return [self._parent.coolant_loops[0]]
elif self.coolant_connection == ComponentCoolantCnxn.HAB_TWO:
return [self._parent.coolant_loops[1]]
elif self.coolant_connection == ComponentCoolantCnxn.HAB_BOTH:
return [self._parent.coolant_loops[0],
self._parent.coolant_loops[1]]
elif self.coolant_connection == ComponentCoolantCnxn.AYSE_ONE:
return [self._parent.coolant_loops[2]]
@property
def coolant_connection(self) -> int:
return int(self._array[self._n * _N_COMPONENT_FIELDS + 5])
@coolant_connection.setter
def coolant_connection(self, val: ComponentCoolantCnxn):
log.info(f'setting coolant {self._n} to {float(val)}')
self._array[self._n * _N_COMPONENT_FIELDS + 5] = float(val)
class RadiatorView:
"""Represents a single Radiator.
Should not be instantiated outside of EngineeringState.
Useful function: get_coolant_loops()! Gives the coolant loops this radiator is attached to.
e.g.
physics_state.engineering.radiator[RAD2].get_coolant_loops()[0].coolant_temp
"""
def __init__(self, parent: 'EngineeringState', array_rep: np.ndarray, radiator_n: int):
"""Called by an EngineeringState factory.
parent: an EngineeringState that this RadiatorView will use to get the associated coolant loop.
array_rep: an array that, starting at 0, contains all data for all radiators.
radiator_n: an index specifying which component, starting at 0.
"""
self._parent = parent
self._array = array_rep
self._n = radiator_n
def name(self):
return strings.RADIATOR_NAMES[self._n]
def get_coolant_loop(self) -> CoolantView:
return self._parent.coolant_loops[self.attached_to_coolant_loop - 1]
@property
def attached_to_coolant_loop(self) -> int:
return int(self._array[self._n * _N_RADIATOR_FIELDS + 0])
@attached_to_coolant_loop.setter
def attached_to_coolant_loop(self, val: int):
self._array[self._n * _N_RADIATOR_FIELDS + 0] = val
@property
def functioning(self) -> bool:
return bool(self._array[self._n * _N_RADIATOR_FIELDS + 1])
@functioning.setter
def functioning(self, val: bool):
self._array[self._n * _N_RADIATOR_FIELDS + 1] = val
class EngineeringState:
"""Wrapper around protos.EngineeringState.
Access with physics_state.engineering, e.g.
eng_state = physics_state.engineering
eng_state.master_alarm = True
print(eng_state.components[AUXCOM].resistance)
eng_state.components[LOS].connected = True
eng_state.radiators[RAD2].functioning = False
eng_state.radiators[RAD2].get_coolant_loop().coolant_temp = 50
"""
N_ENGINEERING_FIELDS = (
_N_COMPONENTS * _N_COMPONENT_FIELDS +
_N_COOLANT_LOOPS * _N_COOLANT_FIELDS +
_N_RADIATORS * _N_RADIATOR_FIELDS
)
_COMPONENT_START_INDEX = 0
_COOLANT_START_INDEX = _COMPONENT_START_INDEX + _N_COMPONENTS * _N_COMPONENT_FIELDS
_RADIATOR_START_INDEX = _COOLANT_START_INDEX + _N_COOLANT_LOOPS * _N_COOLANT_FIELDS
class ComponentList:
"""Allows engineering.components[LOS] style indexing."""
def __init__(self, owner: 'EngineeringState'):
self._owner = owner
def __getitem__(self, index: Union[str, int]) -> ComponentView:
if isinstance(index, str):
index = strings.COMPONENT_NAMES.index(index)
elif index >= _N_COMPONENTS:
raise IndexError()
return ComponentView(
self._owner,
self._owner._array[self._owner._COMPONENT_START_INDEX:self._owner._COOLANT_START_INDEX],
index
)
# Use list slicing (with strides, so there's two colons) to get a list of
# all values of each quantity for each Component.
# We only define this accessor for fields we use in _derive.
def Temperature(self) -> np.ndarray:
return self._owner._array[self._owner._COMPONENT_START_INDEX+1:self._owner._COOLANT_START_INDEX:_N_COMPONENT_FIELDS]
def Resistance(self) -> np.ndarray:
return self._owner._array[self._owner._COMPONENT_START_INDEX+2:self._owner._COOLANT_START_INDEX:_N_COMPONENT_FIELDS]
def Voltage(self) -> np.ndarray:
return self._owner._array[self._owner._COMPONENT_START_INDEX+3:self._owner._COOLANT_START_INDEX:_N_COMPONENT_FIELDS]
def Current(self) -> np.ndarray:
return self._owner._array[self._owner._COMPONENT_START_INDEX+4:self._owner._COOLANT_START_INDEX:_N_COMPONENT_FIELDS]
class CoolantLoopList:
"""Allows engineering.coolant_loops[LP1] style indexing."""
def __init__(self, owner: 'EngineeringState'):
self._owner = owner
def __getitem__(self, index: Union[str, int]) -> CoolantView:
if isinstance(index, str):
index = strings.COOLANT_LOOP_NAMES.index(index)
elif index >= _N_COOLANT_LOOPS:
raise IndexError()
return CoolantView(
self._owner._array[self._owner._COOLANT_START_INDEX:self._owner._RADIATOR_START_INDEX],
index
)
# As above, list slicing with strides.
def CoolantTemp(self) -> np.ndarray:
return self._owner._array[self._owner._COOLANT_START_INDEX+0:self._owner._RADIATOR_START_INDEX:_N_COOLANT_FIELDS]
class RadiatorList:
"""Allows engineering.radiators[RAD1] style indexing."""
def __init__(self, owner: 'EngineeringState'):
self._owner = owner
def __getitem__(self, index: Union[str, int]) -> RadiatorView:
if isinstance(index, str):
index = strings.RADIATOR_NAMES.index(index)
elif index >= _N_RADIATORS:
raise IndexError()
return RadiatorView(
self._owner,
self._owner._array[self._owner._RADIATOR_START_INDEX:],
index
)
# And as above, list slicing with strides.
def Functioning(self) -> np.ndarray:
return self._owner._array[self._owner._RADIATOR_START_INDEX+1::_N_RADIATOR_FIELDS]
def __init__(self,
array_rep: np.ndarray, proto_state: protos.EngineeringState, *,
parent_state: 'PhysicsState', populate_array: bool):
"""Called by a PhysicsState on creation.
array_rep: a sufficiently-sized array to store all component, coolant,
and radiator data. EngineeringState has full control over
contents, starting at element 0.
proto_state: the underlying proto we're wrapping.
parent_state: provides a way for EngineeringState to mirror a couple
pieces of data from the parent, e.g. hab fuel.
populate_array: flag that is set when we need to fill array_rep with data.
"""
assert len(proto_state.components) == _N_COMPONENTS
assert len(proto_state.coolant_loops) == _N_COOLANT_LOOPS
assert len(proto_state.radiators) == _N_RADIATORS
self.components = self.ComponentList(self)
self.coolant_loops = self.CoolantLoopList(self)
self.radiators = self.RadiatorList(self)
self._array = array_rep
self._proto_state = proto_state
self._parent_state = parent_state
if populate_array:
# We've been asked to populate the data array.
# The order of data in the array is of course important.
write_marker = 0
# Is this loop janky? I would say yes! Could this result in
# out-of-bounds writes? I hope not!
for proto_list, descriptor in [
(proto_state.components, protos.EngineeringState.Component.DESCRIPTOR),
(proto_state.coolant_loops, protos.EngineeringState.CoolantLoop.DESCRIPTOR),
(proto_state.radiators, protos.EngineeringState.Radiator.DESCRIPTOR),
]:
for proto in proto_list:
for field in descriptor.fields:
array_rep[write_marker] = getattr(proto, field.name)
write_marker += 1
@property
def habitat_fuel(self):
return self._parent_state[strings.HABITAT].fuel
@property
def ayse_fuel(self):
return self._parent_state[strings.AYSE].fuel
@property
def master_alarm(self) -> bool:
return self._proto_state.master_alarm
@master_alarm.setter
def master_alarm(self, val: bool):
self._proto_state.master_alarm = val
@property
def radiation_alarm(self) -> bool:
return self._proto_state.radiation_alarm
@radiation_alarm.setter
def radiation_alarm(self, val: bool):
self._proto_state.radiation_alarm = val
@property
def asteroid_alarm(self) -> bool:
return self._proto_state.asteroid_alarm
@asteroid_alarm.setter
def asteroid_alarm(self, val: bool):
self._proto_state.asteroid_alarm = val
@property
def hab_reactor_alarm(self) -> bool:
return self._proto_state.hab_reactor_alarm
@hab_reactor_alarm.setter
def hab_reactor_alarm(self, val: bool):
self._proto_state.hab_reactor_alarm = val
@property
def ayse_reactor_alarm(self) -> bool:
return self._proto_state.ayse_reactor_alarm
@ayse_reactor_alarm.setter
def ayse_reactor_alarm(self, val: bool):
self._proto_state.ayse_reactor_alarm = val
@property
def hab_gnomes(self) -> bool:
return self._proto_state.hab_gnomes
@hab_gnomes.setter
def hab_gnomes(self, val: bool):
self._proto_state.hab_gnomes = val
# TODO(patrick): Make sure this is also represented in the proto, and array rep.
@property
def rad_shield_percentage(self) -> int:
return self._proto_state.rad_shield_percentage
@rad_shield_percentage.setter
def rad_shield_percentage(self, val: int):
self._proto_state.rad_shield_percentage = val
def as_proto(self) -> protos.EngineeringState:
"""Returns a deep copy of this EngineeringState as a protobuf."""
constructed_protobuf = protos.EngineeringState()
constructed_protobuf.CopyFrom(self._proto_state)
for component_data, component in zip(self.components, constructed_protobuf.components):
(
component.connected, component.temperature,
component.resistance, component.voltage,
component.current, component.coolant_connection
) = (
component_data.connected, component_data.temperature,
component_data.resistance, component_data.voltage,
component_data.current, component_data.coolant_connection
)
for coolant_data, coolant in zip(self.coolant_loops, constructed_protobuf.coolant_loops):
(
coolant.coolant_temp, coolant.primary_pump_on,
coolant.secondary_pump_on
) = (
coolant_data.coolant_temp, coolant_data.primary_pump_on,
coolant_data.secondary_pump_on
)
for radiator_data, radiator in zip(self.radiators, constructed_protobuf.radiators):
(
radiator.attached_to_coolant_loop, radiator.functioning,
) = (
radiator_data.attached_to_coolant_loop, radiator_data.functioning,
)
return constructed_protobuf
class PhysicsState:
"""The physical state of the system for use in solve_ivp and elsewhere.
The following operations are supported:
# Construction without a y-vector, taking all data from a PhysicalState
PhysicsState(None, protos.PhysicalState)
# Faster Construction from a y-vector and protos.PhysicalState
PhysicsState(ivp_solution.y, protos.PhysicalState)
# Access of a single Entity in the PhysicsState, by index or Entity name
my_entity: Entity = PhysicsState[0]
my_entity: Entity = PhysicsState['Earth']
# Iteration over all Entitys in the PhysicsState
for entity in my_physics_state:
print(entity.name, entity.pos)
# Convert back to a protos.PhysicalState (this almost never happens)
my_physics_state.as_proto()
Example usage:
y = PhysicsState(y_1d, physical_state)
entity = y[0]
y[HABITAT] = habitat
scipy.solve_ivp(y.y0())
See help(PhysicsState.__init__) for how to initialize. Basically, the first
`y` instantiated in the lifetime of the program will be created by a call to
PhysicsState.__init__. But for the program to have good performance,
PhysicsState.__init__ should have both parameters filled if it's being
called more than once a second while OrbitX is running normally.
"""
class NoEntityError(ValueError):
"""Raised when an entity is not found."""
pass
# For if an entity is not landed to anything
NO_INDEX = -1
# The number of single-element values at the end of the y-vector.
# Currently just SRB_TIME and TIME_ACC are appended to the end. If there
# are more values appended to the end, increment this and follow the same
# code for .srb_time and .time_acc
N_SINGULAR_ELEMENTS = 2
ENTITY_START_INDEX = 0
ENGINEERING_START_INDEX = -(EngineeringState.N_ENGINEERING_FIELDS)
SRB_TIME_INDEX = ENGINEERING_START_INDEX - 2
TIME_ACC_INDEX = SRB_TIME_INDEX + 1
# Datatype of internal y-vector
DTYPE = np.float64
def __init__(self,
y: Optional[np.ndarray],
proto_state: protos.PhysicalState):
"""Collects data from proto_state and y, when y is not None.
There are two kinds of values we care about:
1) values that change during simulation (like position, velocity, etc)
2) values that do not change (like mass, radius, name, etc)
If both proto_state and y are given, 1) is taken from y and
2) is taken from proto_state. This is a very quick operation.
If y is None, both 1) and 2) are taken from proto_state, and a new
y vector is generated. This is a somewhat expensive operation."""
assert isinstance(proto_state, protos.PhysicalState)
assert isinstance(y, np.ndarray) or y is None
# self._proto_state will have positions, velocities, etc for all
# entities. DO NOT USE THESE they will be stale. Use the accessors of
# this class instead!
self._proto_state = protos.PhysicalState()
self._proto_state.CopyFrom(proto_state)
self._n = len(proto_state.entities)
self._entity_names = \
[entity.name for entity in self._proto_state.entities]
self._array_rep: np.ndarray
if y is None:
# We rely on having an internal array representation we can refer
# to, so we have to build up this array representation.
self._array_rep = np.empty(
len(proto_state.entities) * len(_PER_ENTITY_MUTABLE_FIELDS)
+ self.N_SINGULAR_ELEMENTS
+ EngineeringState.N_ENGINEERING_FIELDS, dtype=self.DTYPE)
for field_name, field_n in _ENTITY_FIELD_ORDER.items():
for entity_index, entity in enumerate(proto_state.entities):
proto_value = getattr(entity, field_name)
# Internally translate string names to indices, otherwise
# our entire y vector will turn into a string vector oh no.
# Note this will convert to floats, not integer indices.
if field_name == _LANDED_ON:
proto_value = self._name_to_index(proto_value)
self._array_rep[self._n * field_n + entity_index] = proto_value
self._array_rep[self.SRB_TIME_INDEX] = proto_state.srb_time
self._array_rep[self.TIME_ACC_INDEX] = proto_state.time_acc
# It's IMPORTANT that we pass in self._array_rep, because otherwise the numpy
# array will be copied and EngineeringState won't be modifying our numpy array.
self.engineering = EngineeringState(
self._array_rep[self.ENGINEERING_START_INDEX:],
self._proto_state.engineering,
parent_state=self,
populate_array=True
)
else:
self._array_rep = y.astype(self.DTYPE)
self._proto_state.srb_time = y[self.SRB_TIME_INDEX]
self._proto_state.time_acc = y[self.TIME_ACC_INDEX]
self.engineering = EngineeringState(
self._array_rep[self.ENGINEERING_START_INDEX:],
self._proto_state.engineering,
parent_state=self,
populate_array=False )
assert len(self._array_rep.shape) == 1, \
f'y is not 1D: {self._array_rep.shape}'
n_entities = len(proto_state.entities)
assert self._array_rep.size == (
n_entities * len(_PER_ENTITY_MUTABLE_FIELDS)
+ self.N_SINGULAR_ELEMENTS
+ EngineeringState.N_ENGINEERING_FIELDS
)
np.mod(self.Heading, 2 * np.pi, out=self.Heading)
self._entities_with_atmospheres: Optional[List[int]] = None
def _y_component(self, field_name: str) -> np.ndarray:
"""Returns an n-array with the value of a component for each entity."""
return self._array_rep[
_ENTITY_FIELD_ORDER[field_name] * self._n:
(_ENTITY_FIELD_ORDER[field_name] + 1) * self._n
]
def _index_to_name(self, index: int) -> str:
"""Translates an index into the entity list to the right name."""
i = int(index)
return self._entity_names[i] if i != self.NO_INDEX else ''
def _name_to_index(self, name: Optional[str]) -> int:
"""Finds the index of the entity with the given name."""
try:
assert name is not None
return self._entity_names.index(name) if name != '' \
else self.NO_INDEX
except ValueError:
raise self.NoEntityError(f'{name} not in entity list')
def y0(self):
"""Returns a y-vector suitable as input for scipy.solve_ivp."""
return self._array_rep
def as_proto(self) -> protos.PhysicalState:
"""Creates a protos.PhysicalState view into all internal data.
Expensive. Consider one of the other accessors, which are faster.
For example, if you want to iterate over all elements, use __iter__
by doing:
for entity in my_physics_state: print(entity.name)"""
constructed_protobuf = protos.PhysicalState()
constructed_protobuf.CopyFrom(self._proto_state)
for entity_data, entity in zip(self, constructed_protobuf.entities):
(
entity.x, entity.y, entity.vx, entity.vy,
entity.heading, entity.spin, entity.fuel,
entity.throttle, entity.landed_on,
entity.broken
) = (
entity_data.x, entity_data.y, entity_data.vx, entity_data.vy,
entity_data.heading, entity_data.spin, entity_data.fuel,
entity_data.throttle, entity_data.landed_on,
entity_data.broken
)
constructed_protobuf.engineering.CopyFrom(self.engineering.as_proto())
return constructed_protobuf
def __len__(self):
"""Implements `len(physics_state)`."""
return self._n
def __iter__(self):
"""Implements `for entity in physics_state:` loops."""
for i in range(0, self._n):
yield self.__getitem__(i)
def __getitem__(self, index: Union[str, int]) -> Entity:
"""Returns a Entity view at a given name or index.
Allows the following:
entity = physics_state[2]
entity = physics_state[HABITAT]
entity.x = 5 # Propagates to physics_state.
"""
if isinstance(index, str):
# Turn a name-based index into an integer
index = self._entity_names.index(index)
i = int(index)
return _EntityView(self, i)
def __setitem__(self, index: Union[str, int], val: Entity):
"""Puts a Entity at a given name or index in the state.
Allows the following:
PhysicsState[2] = physics_entity
PhysicsState[HABITAT] = physics_entity
"""
if isinstance(val, _EntityView) and val._creator == self:
# The _EntityView is a view into our own data, so we already have
# the data.
return
if isinstance(index, str):
# Turn a name-based index into an integer
index = self._entity_names.index(index)
i = int(index)
entity = self[i]
(
entity.x, entity.y, entity.vx, entity.vy, entity.heading,
entity.spin, entity.fuel, entity.throttle, entity.landed_on,
entity.broken
) = (
val.x, val.y, val.vx, val.vy, val.heading,
val.spin, val.fuel, val.throttle, val.landed_on,
val.broken
)
def __repr__(self):
return self.as_proto().__repr__()
def __str__(self):
return self.as_proto().__str__()
@property
def timestamp(self) -> float:
return self._proto_state.timestamp
@timestamp.setter
def timestamp(self, t: float):
self._proto_state.timestamp = t
@property
def srb_time(self) -> float:
return self._proto_state.srb_time
@srb_time.setter
def srb_time(self, val: float):
self._proto_state.srb_time = val
self._array_rep[self.SRB_TIME_INDEX] = val
@property
def parachute_deployed(self) -> bool:
return self._proto_state.parachute_deployed
@parachute_deployed.setter
def parachute_deployed(self, val: bool):
self._proto_state.parachute_deployed = val
@property
def X(self):
return self._y_component('x')
@property
def Y(self):
return self._y_component('y')
@property
def VX(self):
return self._y_component('vx')
@property
def VY(self):
return self._y_component('vy')
@property
def Heading(self):
return self._y_component('heading')
@property
def Spin(self):
return self._y_component('spin')
@property
def Fuel(self):
return self._y_component('fuel')
@property
def Throttle(self):
return self._y_component('throttle')
@property
def LandedOn(self) -> Dict[int, int]:
"""Returns a mapping from index to index of entity landings.
If the 0th entity is landed on the 2nd entity, 0 -> 2 will be mapped.
"""
landed_map = {}
for landed, landee in enumerate(
self._y_component('landed_on')):
if int(landee) != self.NO_INDEX:
landed_map[landed] = int(landee)
return landed_map
@property
def Broken(self):
return self._y_component('broken')
@property
def Atmospheres(self) -> List[int]:
"""Returns a list of indexes of entities that have an atmosphere."""
if self._entities_with_atmospheres is None:
self._entities_with_atmospheres = []
for index, entity in enumerate(self._proto_state.entities):
if entity.atmosphere_scaling != 0 and \
entity.atmosphere_thickness != 0:
self._entities_with_atmospheres.append(index)
return self._entities_with_atmospheres
@property
def time_acc(self) -> float:
"""Returns the time acceleration, e.g. 1x or 50x."""
return self._proto_state.time_acc
@time_acc.setter
def time_acc(self, new_acc: float):
self._proto_state.time_acc = new_acc
self._array_rep[self.TIME_ACC_INDEX] = new_acc
def craft_entity(self):
"""Convenience function, a full Entity representing the craft."""
return self[self.craft]
@property
def craft(self) -> Optional[str]:
"""Returns the currently-controlled craft.
Not actually backed by any stored field, just a calculation."""
if strings.HABITAT not in self._entity_names and \
strings.AYSE not in self._entity_names:
return None
if strings.AYSE not in self._entity_names:
return strings.HABITAT
hab_index = self._name_to_index(strings.HABITAT)
ayse_index = self._name_to_index(strings.AYSE)
if self._y_component('landed_on')[hab_index] == ayse_index:
# Habitat is docked with AYSE, AYSE is active craft
return strings.AYSE
else:
return strings.HABITAT
def reference_entity(self):
"""Convenience function, a full Entity representing the reference."""
return self[self._proto_state.reference]
@property
def reference(self) -> str:
"""Returns current reference of the physics system, shown in GUI."""
return self._proto_state.reference
@reference.setter
def reference(self, name: str):
self._proto_state.reference = name
def target_entity(self):
"""Convenience function, a full Entity representing the target."""
return self[self._proto_state.target]
@property
def target(self) -> str:
"""Returns landing/docking target, shown in GUI."""
return self._proto_state.target
@target.setter
def target(self, name: str):
self._proto_state.target = name
@property
def navmode(self) -> Navmode:
return Navmode(self._proto_state.navmode)
@navmode.setter
def navmode(self, navmode: Navmode):
self._proto_state.navmode = navmode.value
| [
"logging.getLogger",
"orbitx.strings.RADIATOR_NAMES.index",
"orbitx.orbitx_pb2.PhysicalState",
"orbitx.strings.COMPONENT_NAMES.index",
"numpy.asarray",
"orbitx.orbitx_pb2.Navmode.values",
"vpython.vector",
"numpy.array",
"orbitx.strings.COOLANT_LOOP_NAMES.index",
"orbitx.orbitx_pb2.EngineeringStat... | [((504, 523), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (521, 523), False, 'import logging\n'), ((2180, 2203), 'orbitx.orbitx_pb2.Navmode.values', 'protos.Navmode.values', ([], {}), '()\n', (2201, 2203), True, 'from orbitx import orbitx_pb2 as protos\n'), ((3273, 3328), 'vpython.vector', 'vpython.vector', (['(self.x - origin.x)', '(self.y - origin.y)', '(0)'], {}), '(self.x - origin.x, self.y - origin.y, 0)\n', (3287, 3328), False, 'import vpython\n'), ((3378, 3441), 'numpy.array', 'np.array', (['(self.x, self.y)'], {'dtype': 'PhysicsState.DTYPE', 'copy': '(True)'}), '((self.x, self.y), dtype=PhysicsState.DTYPE, copy=True)\n', (3386, 3441), True, 'import numpy as np\n'), ((3584, 3614), 'numpy.asarray', 'np.asarray', (['[self.vx, self.vy]'], {}), '([self.vx, self.vy])\n', (3594, 3614), True, 'import numpy as np\n'), ((23100, 23125), 'orbitx.orbitx_pb2.EngineeringState', 'protos.EngineeringState', ([], {}), '()\n', (23123, 23125), True, 'from orbitx import orbitx_pb2 as protos\n'), ((27438, 27460), 'orbitx.orbitx_pb2.PhysicalState', 'protos.PhysicalState', ([], {}), '()\n', (27458, 27460), True, 'from orbitx import orbitx_pb2 as protos\n'), ((30097, 30146), 'numpy.mod', 'np.mod', (['self.Heading', '(2 * np.pi)'], {'out': 'self.Heading'}), '(self.Heading, 2 * np.pi, out=self.Heading)\n', (30103, 30146), True, 'import numpy as np\n'), ((31612, 31634), 'orbitx.orbitx_pb2.PhysicalState', 'protos.PhysicalState', ([], {}), '()\n', (31632, 31634), True, 'from orbitx import orbitx_pb2 as protos\n'), ((16246, 16282), 'orbitx.strings.COMPONENT_NAMES.index', 'strings.COMPONENT_NAMES.index', (['index'], {}), '(index)\n', (16275, 16282), False, 'from orbitx import strings\n'), ((17777, 17816), 'orbitx.strings.COOLANT_LOOP_NAMES.index', 'strings.COOLANT_LOOP_NAMES.index', (['index'], {}), '(index)\n', (17809, 17816), False, 'from orbitx import strings\n'), ((18599, 18634), 'orbitx.strings.RADIATOR_NAMES.index', 'strings.RADIATOR_NAMES.index', (['index'], {}), '(index)\n', (18627, 18634), False, 'from orbitx import strings\n')] |
# Import libraries
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
# Load the train and test datasets to create two DataFrames
train = pd.read_csv('train.csv')
test=pd.read_csv('test.csv')
#Print the `head` of the train and test dataframes
print(train.head())
print(test.head())
# Convert the male and female groups to integer form
train["Sex"][train["Sex"] == "male"] = 0
train["Sex"][train["Sex"] == "female"] = 1
test["Sex"][test["Sex"] == "male"] = 0
test["Sex"][test["Sex"] == "female"] = 1
# Impute the Embarked variable
train["Embarked"] = train["Embarked"].fillna("S")
test["Embarked"] = test["Embarked"].fillna("S")
# Convert the Embarked classes to integer form
train["Embarked"][train["Embarked"] == "S"] = 0
train["Embarked"][train["Embarked"] == "C"] = 1
train["Embarked"][train["Embarked"] == "Q"] = 2
test["Embarked"][test["Embarked"] == "S"] = 0
test["Embarked"][test["Embarked"] == "C"] = 1
test["Embarked"][test["Embarked"] == "Q"] = 2
#Print the Sex and Embarked columns
target = train["Survived"].values
test.Fare[152] = test["Fare"].median()
train["Age"] = train["Age"].fillna(train["Age"].median())
test["Age"] = test["Age"].fillna(test["Age"].median())
features_forest = train[["Pclass", "Age", "Sex", "Fare", "SibSp", "Parch", "Embarked"]].values
# Building and fitting my_forest
forest = RandomForestClassifier(max_depth = 10, min_samples_split=2, n_estimators = 100, random_state = 1)
my_forest = forest.fit(features_forest, target)
# Print the score of the fitted random forest
print(my_forest.score(features_forest, target))
# Compute predictions on our test set features then print the length of the prediction vector
test_features = test[["Pclass", "Age", "Sex", "Fare", "SibSp", "Parch", "Embarked"]].values
pred_forest = my_forest.predict(test_features)
print(len(pred_forest))
PassengerId =np.array(test["PassengerId"]).astype(int)
my_solution = pd.DataFrame(pred_forest, PassengerId, columns = ["Survived"])
print(my_solution)
# Check that your data frame has 418 entries
print(my_solution.shape)
# Write your solution to a csv file with the name my_solution.csv
my_solution.to_csv("my_solution.csv", index_label = ["PassengerId"])
| [
"pandas.DataFrame",
"numpy.array",
"sklearn.ensemble.RandomForestClassifier",
"pandas.read_csv"
] | [((180, 204), 'pandas.read_csv', 'pd.read_csv', (['"""train.csv"""'], {}), "('train.csv')\n", (191, 204), True, 'import pandas as pd\n'), ((210, 233), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {}), "('test.csv')\n", (221, 233), True, 'import pandas as pd\n'), ((1366, 1461), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'max_depth': '(10)', 'min_samples_split': '(2)', 'n_estimators': '(100)', 'random_state': '(1)'}), '(max_depth=10, min_samples_split=2, n_estimators=100,\n random_state=1)\n', (1388, 1461), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1936, 1996), 'pandas.DataFrame', 'pd.DataFrame', (['pred_forest', 'PassengerId'], {'columns': "['Survived']"}), "(pred_forest, PassengerId, columns=['Survived'])\n", (1948, 1996), True, 'import pandas as pd\n'), ((1880, 1909), 'numpy.array', 'np.array', (["test['PassengerId']"], {}), "(test['PassengerId'])\n", (1888, 1909), True, 'import numpy as np\n')] |
#******************************************************************************
#
# tempoGAN: A Temporally Coherent, Volumetric GAN for Super-resolution Fluid Flow
# Copyright 2018 <NAME>, <NAME>, <NAME>, <NAME>
#
# This program is free software, distributed under the terms of the
# Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
#******************************************************************************
import os, math
import shutil, sys
from random import seed, random, randrange
import uniio
import numpy as np
import scipy.misc
import scipy.ndimage
import imageio
# check whether matplotlib is available to generate vector/quiver plots
import imp
try:
imp.find_module('matplotlib')
import matplotlib.pyplot
found_matplotlib = True
except ImportError:
found_matplotlib = False
#import matplotlib.pyplot as plt
# global channel keys, have to be one char
C_KEY_DEFAULT = 'd'
C_KEY_VELOCITY = 'v'
C_KEY_VORTICITY = 'x'
C_KEY_POSITION = 'p'
DATA_KEY_LOW = 0
DATA_KEY_HIGH= 1
#keys for augmentation operations
AOPS_KEY_ROTATE = 'rot'
AOPS_KEY_SCALE = 'scale'
AOPS_KEY_ROT90 = 'rot90'
AOPS_KEY_FLIP = 'flip'
seed( 42 )
# default channel layouts
C_LAYOUT = {
'dens':C_KEY_DEFAULT,
'dens_vel':'d,vx,vy,vz'
}
class TileCreator(object):
def __init__(self, tileSizeLow, simSizeLow=64, upres=2, dim=2, dim_t=1, overlapping=0, densityMinimum=0.02, premadeTiles=False, partTrain=0.8, partTest=0.2, partVal=0, channelLayout_low=C_LAYOUT['dens_vel'], channelLayout_high=C_LAYOUT['dens'], highIsLabel=False, loadPN=False, padding=0):
'''
tileSizeLow, simSizeLow: int, [int,int] if 2D, [int,int,int]
channelLayout: 'key,key,...'
the keys are NOT case sensitive and leading and trailing whitespace characters are REMOVED.
key:
default: d
velocity: v[label](x|y|z)
label can be arbitrary or empty,
key must be unique and x,y must exist while z is optional in 2D, x,y,z must exist in 3D.
if x does not exist y,z will be ignored (treaded as 'd').
rest is not yet supported
premadeTiles: cut regular tiles when loading data, can't use data augmentation
part(Train|Test|Val): relative size of the different data sets
highIsLabel: high data is not augmented
loadHigh:
simPath: path to the uni simulation files
loadPath: packed simulations are stored here
'''
# DATA DIMENSION
self.dim_t = dim_t # same for hi_res or low_res
if dim!=2 and dim!=3:
self.TCError('Data dimension must be 2 or 3.')
self.dim = dim
# TILE SIZE
if np.isscalar(tileSizeLow):
self.tileSizeLow = [tileSizeLow, tileSizeLow, tileSizeLow]
elif len(tileSizeLow)==2 and self.dim==2:
self.tileSizeLow = [1]+tileSizeLow
elif len(tileSizeLow)==3:
self.tileSizeLow = tileSizeLow
else:
self.TCError('Tile size mismatch.')
self.tileSizeLow = np.asarray(self.tileSizeLow)
#SIM SIZE
if np.isscalar(simSizeLow):
self.simSizeLow = [simSizeLow, simSizeLow, simSizeLow]
elif len(simSizeLow)==2 and self.dim==2:
self.simSizeLow = [1]+simSizeLow
elif len(simSizeLow)==3:
self.simSizeLow = simSizeLow
else:
self.TCError('Simulation size mismatch.')
self.simSizeLow = np.asarray(self.simSizeLow)
if upres < 1:
self.TCError('Upres must be at least 1.')
self.upres = upres
if not highIsLabel:
self.tileSizeHigh = self.tileSizeLow*upres
self.simSizeHigh = self.simSizeLow*upres
else:
self.tileSizeHigh = np.asarray([1])
self.simSizeHigh = np.asarray([1])
if self.dim==2:
self.tileSizeLow[0]=1
self.tileSizeHigh[0]=1
self.simSizeLow[0]=1
self.simSizeHigh[0]=1
if np.less(self.simSizeLow, self.tileSizeLow).any():
self.TCError('Tile size {} can not be larger than sim size {}, {}.'.format(self.tileSizeLow,self.simSizeLow))
if densityMinimum<0.:
self.TCError('densityMinimum can not be negative.')
self.densityMinimum = densityMinimum
self.premadeTiles = premadeTiles
self.useDataAug = False
#CHANNELS
self.c_lists = {}
self.c_low, self.c_lists[DATA_KEY_LOW] = self.parseChannels(channelLayout_low)
self.c_high, self.c_lists[DATA_KEY_HIGH] = self.parseChannels(channelLayout_high)
# print info
print('\n')
print('Dimension: {}, time dimension: {}'.format(self.dim,self.dim_t))
print('Low-res data:')
print(' channel layout: {}'.format(self.c_low))
print(' default channels: {}'.format(self.c_lists[DATA_KEY_LOW][C_KEY_DEFAULT]))
if len(self.c_lists[DATA_KEY_LOW][C_KEY_VELOCITY])>0:
print(' velocity channels: {}'.format(self.c_lists[DATA_KEY_LOW][C_KEY_VELOCITY]))
if len(self.c_lists[DATA_KEY_LOW][C_KEY_VORTICITY])>0:
print(' vorticity channels: {}'.format(self.c_lists[DATA_KEY_LOW][C_KEY_VORTICITY]))
print('High-res data:')
if highIsLabel:
print(' is Label')
print(' channel layout: {}'.format(self.c_high))
print(' default channels: {}'.format(self.c_lists[DATA_KEY_HIGH][C_KEY_DEFAULT]))
if len(self.c_lists[DATA_KEY_HIGH][C_KEY_VELOCITY])>0:
print(' velocity channels: {}'.format(self.c_lists[DATA_KEY_HIGH][C_KEY_VELOCITY]))
if len(self.c_lists[DATA_KEY_HIGH][C_KEY_VORTICITY])>0:
print(' vorticity channels: {}'.format(self.c_lists[DATA_KEY_HIGH][C_KEY_VORTICITY]))
#self.channels=len(self.c)
self.data_flags = {
DATA_KEY_LOW:{
'isLabel':False,
'channels':len(self.c_low),
C_KEY_VELOCITY:len(self.c_lists[DATA_KEY_LOW][C_KEY_VELOCITY])>0,
C_KEY_VORTICITY:len(self.c_lists[DATA_KEY_LOW][C_KEY_VORTICITY])>0,
C_KEY_POSITION:False
},
DATA_KEY_HIGH:{
'isLabel':highIsLabel,
'channels':len(self.c_high),
C_KEY_VELOCITY:len(self.c_lists[DATA_KEY_HIGH][C_KEY_VELOCITY])>0,
C_KEY_VORTICITY:len(self.c_lists[DATA_KEY_HIGH][C_KEY_VORTICITY])>0,
C_KEY_POSITION:False
}
}
if loadPN:
self.TCError('prev and next tiles not supported.')
self.hasPN = loadPN
self.padding=padding
#if self.hasPN:
#[z,y,x, velocities an/or position if enabled (density,vel,vel,vel, pos, pos [,pos])]
#DATA SHAPES
self.tile_shape_low = np.append(self.tileSizeLow, [self.data_flags[DATA_KEY_LOW]['channels']])
self.frame_shape_low = np.append(self.simSizeLow, [self.data_flags[DATA_KEY_LOW]['channels']])
if not self.data_flags[DATA_KEY_HIGH]['isLabel']:
self.tile_shape_high = np.append(self.tileSizeHigh, [self.data_flags[DATA_KEY_HIGH]['channels']])
self.frame_shape_high = np.append(self.simSizeHigh, [self.data_flags[DATA_KEY_HIGH]['channels']])
else:
self.tile_shape_high = self.tileSizeHigh[:]
self.frame_shape_high = self.simSizeHigh[:]
self.densityThreshold = (self.densityMinimum * self.tile_shape_low[0] * self.tile_shape_low[1] * self.tile_shape_low[2])
self.data = {
DATA_KEY_LOW:[],
DATA_KEY_HIGH:[]
}
all=partTrain+partTest+partVal
self.part_train=partTrain/all
self.part_test=partTest/all
self.part_validation=partVal/all
def initDataAugmentation(self, rot=2, minScale=0.85, maxScale=1.15 ,flip=True):
'''
set up data augmentation
rot: 1: 90 degree rotations; 2: full rotation; else: nop rotation
Scale: if both 1 disable scaling
'''
self.useDataAug = True
"""
specify the special augmentation operation needed for some channel types here
will only be applyed if the specified channel type is in the data
** Tempo Datum may have multiple channels as coherent frames, [batch, z, y, x, t*channels]
** They are reshaped first before these aops, [batch, z, y, x, t, channels], and then reshape back
** Because of this extra time dimention, all aops can only do isolate calculations, for e.g., value scaling,
** Any calculation relay on neighborhood will be wrong, for e.g., spacial scaling (zoom).
"""
self.aops = {
DATA_KEY_LOW:{
AOPS_KEY_ROTATE:{
C_KEY_VELOCITY:self.rotateVelocities,
C_KEY_VORTICITY:self.rotateVelocities
},
AOPS_KEY_SCALE:{
C_KEY_VELOCITY:self.scaleVelocities,
C_KEY_VORTICITY:self.scaleVelocities
},
AOPS_KEY_ROT90:{
C_KEY_VELOCITY:self.rotate90Velocities,
C_KEY_VORTICITY:self.rotate90Velocities
},
AOPS_KEY_FLIP:{
C_KEY_VELOCITY:self.flipVelocities,
C_KEY_VORTICITY:self.flipVelocities
}
},
DATA_KEY_HIGH:{
AOPS_KEY_ROTATE:{
C_KEY_VELOCITY:self.rotateVelocities,
C_KEY_VORTICITY:self.rotateVelocities
},
AOPS_KEY_SCALE:{
C_KEY_VELOCITY:self.scaleVelocities,
C_KEY_VORTICITY:self.scaleVelocities
},
AOPS_KEY_ROT90:{
C_KEY_VELOCITY:self.rotate90Velocities,
C_KEY_VORTICITY:self.rotate90Velocities
},
AOPS_KEY_FLIP:{
C_KEY_VELOCITY:self.flipVelocities,
C_KEY_VORTICITY:self.flipVelocities
}
}
}
msg = 'data augmentation: '
if rot==2:
self.do_rotation = True
self.do_rot90 = False
msg += 'rotation, '
elif rot==1:
self.do_rotation = False
self.do_rot90 = True
msg += 'rot90, '
z=(2,1)
nz=(1,2)
x=(1,0)
y=(0,2)
nx=(0,1)
ny=(2,0)
# thanks to http://www.euclideanspace.com/maths/discrete/groups/categorise/finite/cube/
self.cube_rot = {2: [[],[z],[z,z],[nz]], 3: [[],[x],[y],[x,x],[x,y],[y,x],[y,y],[nx],[x,x,y],[x,y,x],[x,y,y],[y,x,x],[y,y,x],[ny],[nx,y],[x,x,y,x],[x,x,y,y],[x,y,x,x],[x,ny],[y,nx],[ny,x],[nx,y,x],[x,y,nx],[x,ny,x]]}
else:
self.do_rotation = False
self.do_rot90 = False
self.scaleFactor = [minScale, maxScale]
if (self.scaleFactor[0]==1 and self.scaleFactor[1]==1):
self.do_scaling = False
else:
self.do_scaling = True
msg += 'scaling, '
self.do_flip = flip
if self.do_flip:
msg += 'flip'
msg += '.'
print(msg)
self.interpolation_order = 1
self.fill_mode = 'constant'
def addData(self, low, high):
'''
add data, tiles if premadeTiles, frames otherwise.
low, high: list of or single 3D data np arrays
'''
# check data shape
low = np.asarray(low)
high = np.asarray(high)
if not self.data_flags[DATA_KEY_HIGH]['isLabel']:
if len(low.shape)!=len(high.shape): #high-low mismatch
self.TCError('Data shape mismatch. Dimensions: {} low vs {} high. Dimensions must match or use highIsLabel.'.format(len(low.shape),len(high.shape)) )
if not (len(low.shape)==4 or len(low.shape)==5): #not single frame or sequence of frames
self.TCError('Input must be single 3D data or sequence of 3D data. Format: ([batch,] z, y, x, channels). For 2D use z=1.')
if (low.shape[-1]!=(self.dim_t * self.data_flags[DATA_KEY_LOW]['channels'])):
self.TCError('Dim_t ({}) * Channels ({}, {}) configured for LOW-res data don\'t match channels ({}) of input data.'.format(self.dim_t, self.data_flags[DATA_KEY_LOW]['channels'], self.c_low, low.shape[-1]) )
if not self.data_flags[DATA_KEY_HIGH]['isLabel']:
if (high.shape[-1]!=(self.dim_t * self.data_flags[DATA_KEY_HIGH]['channels'])):
self.TCError('Dim_t ({}) * Channels ({}, {}) configured for HIGH-res data don\'t match channels ({}) of input data.'.format(self.dim_t, self.data_flags[DATA_KEY_HIGH]['channels'], self.c_high, high.shape[-1]) )
low_shape = low.shape
high_shape = high.shape
if len(low.shape)==5: #sequence
if low.shape[0]!=high.shape[0]: #check amount
self.TCError('Unequal amount of low ({}) and high ({}) data.'.format(low.shape[1], high.shape[1]))
# get single data shape
low_shape = low_shape[1:]
if not self.data_flags[DATA_KEY_HIGH]['isLabel']:
high_shape = high_shape[1:]
else: high_shape = [1]
else: #single
low = [low]
high = [high]
if self.premadeTiles:
if not (self.dim_t == 1):
self.TCError('Currently, Dim_t = {} > 1 is not supported by premade tiles'.format(self.dim_t))
if not np.array_equal(low_shape, self.tile_shape_low) or not np.array_equal(high_shape,self.tile_shape_high):
self.TCError('Tile shape mismatch: is - specified\n\tlow: {} - {}\n\thigh {} - {}'.format(low_shape, self.tile_shape_low, high_shape,self.tile_shape_high))
else:
single_frame_low_shape = list(low_shape)
single_frame_high_shape = list(high_shape)
single_frame_low_shape[-1] = low_shape[-1] // self.dim_t
if not self.data_flags[DATA_KEY_HIGH]['isLabel']:
single_frame_high_shape[-1] = high_shape[-1] // self.dim_t
if not np.array_equal(single_frame_low_shape, self.frame_shape_low) or not np.array_equal(single_frame_high_shape,self.frame_shape_high):
self.TCError('Frame shape mismatch: is - specified\n\tlow: {} - {}\n\thigh: {} - {}, given dim_t as {}'.format(single_frame_low_shape, self.frame_shape_low, single_frame_high_shape,self.frame_shape_high, self.dim_t))
self.data[DATA_KEY_LOW].extend(low)
self.data[DATA_KEY_HIGH].extend(high)
print('\n')
print('Added {} datasets. Total: {}'.format(low.shape[0], len(self.data[DATA_KEY_LOW])))
self.splitSets()
def splitSets(self):
'''
calculate the set borders for training, testing and validation set
'''
length = len(self.data[DATA_KEY_LOW])
end_train = int( length * self.part_train )
end_test = end_train + int( length * self.part_test )
#just store the borders of the different sets to avoid data duplication
self.setBorders = [end_train, end_test, length]
print('Training set: {}'.format(self.setBorders[0]))
print('Testing set: {}'.format(self.setBorders[1]-self.setBorders[0]))
print('Validation set: {}'.format(self.setBorders[2]-self.setBorders[1]))
def clearData(self):
'''
clears the data buffer
'''
self.data = {
DATA_KEY_LOW:[],
DATA_KEY_HIGH:[]
}
def createTiles(self, data, tileShape, strides=-1):
'''
create tiles from a single frame. fixed, regular pattern
strides: <=0 or tileShape is normal, otherwise create overlapping tiles
'''
dataShape = data.shape #2D sim: [1,res,res,channels]
pad = [self.padding,self.padding,self.padding,0]
if np.isscalar(strides):
if strides <= 0:
strides = tileShape
else:
strides = [strides,strides,strides]
if dataShape[0]<=1:
pad[0] = 0
strides[0] = 1
channels = dataShape[3]
noTiles = [ (dataShape[0]-tileShape[0])//strides[0]+1, (dataShape[1]-tileShape[1])//strides[1]+1, (dataShape[2]-tileShape[2])//strides[2]+1 ]
tiles = []
for tileZ in range(0, noTiles[0]):
for tileY in range(0, noTiles[1]):
for tileX in range(0, noTiles[2]):
idx_from=[tileZ*strides[0], tileY*strides[1], tileX*strides[2]]
idx_to=[idx_from[0]+tileShape[0], idx_from[1]+tileShape[1], idx_from[2]+tileShape[2]]
currTile=data[ idx_from[0]:idx_to[0], idx_from[1]:idx_to[1], idx_from[2]:idx_to[2], :]
if self.padding > 0:
currTile = np.pad(currTile, pad, 'edge')
tiles.append(currTile)
return np.array(tiles)
def cutTile(self, data, tileShape, offset=[0,0,0]):
'''
cut a tile of with shape and offset
'''
offset = np.asarray(offset)
tileShape = np.asarray(tileShape)
tileShape[-1] = data.shape[-1]
if np.less(data.shape[:3], tileShape[:3]+offset[:3]).any():
self.TCError('Can\'t cut tile with shape {} and offset{} from data with shape {}.'.format(tileShape, offset, data.shape))
tile = data[offset[0]:offset[0]+tileShape[0], offset[1]:offset[1]+tileShape[1], offset[2]:offset[2]+tileShape[2], :]
if not np.array_equal(tile.shape,tileShape):
self.TCError('Wrong tile shape after cutting. is: {}. goal: {}.'.format(tile.shape,tileShape))
return tile
#####################################################################################
# batch creation
#####################################################################################
def selectRandomTiles(self, selectionSize, isTraining=True, augment=False, tile_t = 1):
'''
main method to create baches
Return:
shape: [selectionSize, z, y, x, channels * tile_t]
if 2D z = 1
channels: density, [vel x, vel y, vel z], [pos x, pox y, pos z]
'''
if isTraining:
if self.setBorders[0]<1:
self.TCError('no training data.')
else:
if (self.setBorders[1] - self.setBorders[0])<1:
self.TCError('no test data.')
if(tile_t > self.dim_t):
self.TCError('not enough coherent frames. Requested {}, available {}'.format(tile_t, self.dim_t))
batch_low = []
batch_high = []
for i in range(selectionSize):
if augment and self.useDataAug: #data augmentation
low, high = self.generateTile(isTraining, tile_t)
else: #cut random tile without augmentation
low, high = self.getRandomDatum(isTraining, tile_t)
if not self.premadeTiles:
low, high = self.getRandomTile(low, high)
batch_low.append(low)
batch_high.append(high)
return np.asarray(batch_low), np.asarray(batch_high)
def generateTile(self, isTraining=True, tile_t = 1):
'''
generates a random low-high pair of tiles (data augmentation)
'''
# get a frame, is a copy to avoid transormations affecting the raw dataset
data = {}
data[DATA_KEY_LOW], data[DATA_KEY_HIGH] = self.getRandomDatum(isTraining, tile_t)
if not self.premadeTiles:
#cut a tile for faster transformation
if self.do_scaling or self.do_rotation:
factor = 1
if self.do_rotation: # or self.do_scaling:
factor*=1.5 # scaling: to avoid size errors caused by rounding
if self.do_scaling:
scaleFactor = np.random.uniform(self.scaleFactor[0], self.scaleFactor[1])
factor/= scaleFactor
tileShapeLow = np.ceil(self.tile_shape_low*factor)
if self.dim==2:
tileShapeLow[0] = 1
data[DATA_KEY_LOW], data[DATA_KEY_HIGH] = self.getRandomTile(data[DATA_KEY_LOW], data[DATA_KEY_HIGH], tileShapeLow.astype(int))
#random scaling, changes resolution
if self.do_scaling:
data = self.scale(data, scaleFactor)
bounds = np.zeros(4)
#rotate
if self.do_rotation:
bounds = np.array(data[DATA_KEY_LOW].shape)*0.16 #bounds applied on all sides, 1.5*(1-2*0.16)~1
data = self.rotate(data)
#get a tile
data[DATA_KEY_LOW], data[DATA_KEY_HIGH] = self.getRandomTile(data[DATA_KEY_LOW], data[DATA_KEY_HIGH], bounds=bounds) #includes "shifting"
if self.do_rot90:
rot = np.random.choice(self.cube_rot[self.dim])
for axis in rot:
data = self.rotate90(data, axis)
#flip once
if self.do_flip:
axis = np.random.choice(4)
if axis < 3: # axis < self.dim
data = self.flip(data, [axis])
# check tile size
target_shape_low = np.copy(self.tile_shape_low)
target_shape_high = np.copy(self.tile_shape_high)
target_shape_low[-1] *= tile_t
target_shape_high[-1] *= tile_t
if not np.array_equal(data[DATA_KEY_LOW].shape,target_shape_low) or (not np.array_equal(data[DATA_KEY_HIGH].shape,target_shape_high) and not self.data_flags[DATA_KEY_HIGH]['isLabel']):
self.TCError('Wrong tile shape after data augmentation. is: {},{}. goal: {},{}.'.format(data[DATA_KEY_LOW].shape, data[DATA_KEY_HIGH].shape, target_shape_low, target_shape_high))
return data[DATA_KEY_LOW], data[DATA_KEY_HIGH]
def getRandomDatum(self, isTraining=True, tile_t = 1):
'''returns a copy of a random frame'''
if isTraining:
randNo = randrange(0, self.setBorders[0])
else:
randNo = randrange(self.setBorders[0], self.setBorders[1])
randFrame = 0
if tile_t<self.dim_t:
randFrame = randrange(0, self.dim_t - tile_t)
else:
tile_t = self.dim_t
return self.getDatum(randNo*self.dim_t+randFrame, tile_t)
def getDatum(self, index, tile_t = 1):
'''returns a copy of the indicated frame or tile'''
begin_ch = 0
if(self.dim_t > 1):
begin_ch = (index % self.dim_t) * self.tile_shape_low[-1]
end_ch = begin_ch + tile_t * self.tile_shape_low[-1]
begin_ch_y = 0
if(self.dim_t > 1):
begin_ch_y = (index % self.dim_t) * self.tile_shape_high[-1]
end_c_h_y = begin_ch_y + tile_t * self.tile_shape_high[-1]
if not self.data_flags[DATA_KEY_HIGH]['isLabel']:
return np.copy(self.data[DATA_KEY_LOW][index//self.dim_t][:,:,:,begin_ch:end_ch]), np.copy(self.data[DATA_KEY_HIGH][index//self.dim_t][:,:,:,begin_ch_y:end_c_h_y])
else:
return np.copy(self.data[DATA_KEY_LOW][index//self.dim_t][:,:,:,begin_ch:end_ch]), np.copy(self.data[DATA_KEY_HIGH][index//self.dim_t])
def getRandomTile(self, low, high, tileShapeLow=None, bounds=[0,0,0,0]): #bounds to avoid mirrored parts
'''
cut a random tile (low and high) from a given frame, considers densityMinimum
bounds: ignore edges of frames, used to discard mirrored parts after rotation
'''
if tileShapeLow is None:
tileShapeLow = np.copy(self.tile_shape_low) # use copy is very important!!!
tileShapeHigh = tileShapeLow*self.upres
frameShapeLow = np.asarray(low.shape)
if len(low.shape)!=4 or len(tileShapeLow)!=4:
self.TCError('Data shape mismatch.')
if len(high.shape)!=4 and not self.data_flags[DATA_KEY_HIGH]['isLabel']:
self.TCError('Data shape mismatch.')
start = np.ceil(bounds)
end = frameShapeLow - tileShapeLow + np.ones(4) - start
offset_up = np.array([self.upres, self.upres, self.upres])
if self.dim==2:
start[0] = 0
end[0] = 1
offset_up[0] = 1
tileShapeHigh[0] = 1
# check if possible to cut tile
if np.amin((end-start)[:3]) < 0:
self.TCError('Can\'t cut tile {} from frame {} with bounds {}.'.format(tileShapeLow, frameShapeLow, start))
# cut tile
hasMinDensity = False
i = 1
while (not hasMinDensity) and i<20:
offset = np.asarray([randrange(start[0], end[0]), randrange(start[1], end[1]), randrange(start[2], end[2])])
lowTile = self.cutTile(low, tileShapeLow, offset)
offset *= offset_up
if not self.data_flags[DATA_KEY_HIGH]['isLabel']:
highTile = self.cutTile(high, tileShapeHigh, offset)
else:
highTile = high
hasMinDensity = self.hasMinDensity(lowTile)
i+=1
return lowTile, highTile
#####################################################################################
# AUGMENTATION
#####################################################################################
def special_aug(self, data, ops_key, param):
"""
wrapper to call the augmentation operations specified in self.aops in initAugmentation
"""
for data_key in data:
if self.data_flags[data_key]['isLabel']: continue
orig_shape = data[data_key].shape
tile_t = orig_shape[-1] // self.data_flags[data_key]['channels']
data_array = data[data_key]
if(tile_t > 1): data_array = data[data_key].reshape( (-1, tile_t, self.data_flags[data_key]['channels']) )
for c_key, op in self.aops[data_key][ops_key].items():
if self.data_flags[data_key][c_key]:
data_array = op(data_array, self.c_lists[data_key][c_key], param)
if (tile_t > 1): data[data_key] = data_array.reshape(orig_shape)
return data
def rotate(self, data):
'''
random uniform rotation of low and high data of a given frame
'''
#check if single frame
#2D:
if self.dim==2:
theta = np.pi * np.random.uniform(0, 2)
rotation_matrix = np.array([[1, 0, 0, 0 ],
[0, np.cos(theta), -np.sin(theta), 0],
[0, np.sin(theta), np.cos(theta) , 0],
[0, 0, 0, 1] ])
#3D:
elif self.dim==3:
# random uniform rotation in 3D
quat = np.random.normal(size=4)
quat/= np.linalg.norm(quat)
q = np.outer(quat, quat)*2
rotation_matrix = np.array([[1-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0],
[ q[1, 2]+q[3, 0], 1-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1-q[1, 1]-q[2, 2], 0],
[ 0, 0, 0, 1]])
data = self.special_aug(data, AOPS_KEY_ROTATE, rotation_matrix)
for data_key in data:
if not self.data_flags[data_key]['isLabel']:
data[data_key] = self.applyTransform(data[data_key], rotation_matrix.T)
return data
def rotate_simple(self, low, high, angle):
'''
use a different method for rotation. about 30-40% faster than with rotation matrix, but only one axis.
'''
if len(low.shape)!=4 or len(high.shape)!=4:
self.TCError('Data shape mismatch.')
#test rot around z (axis order z,y,x,c)
low = scipy.ndimage.rotate(low, angle, [1,2] , reshape=False, order=self.interpolation_order, mode=self.fill_mode, cval=1.0)
high = scipy.ndimage.rotate(high, angle, [1,2] , reshape=False, order=self.interpolation_order, mode=self.fill_mode, cval=1.0)
return low, high
def rotateVelocities(self, datum, c_list, rotationMatrix):
'''
rotate vel vectors (channel 1-3)
'''
rotation3 = rotationMatrix[:3, :3]
rotation2 = rotationMatrix[1:3, 1:3]
channels = np.split(datum, datum.shape[-1], -1)
for v in c_list:
if len(v) == 3: # currently always ends here!! even for 2D, #z,y,x to match rotation matrix
vel = np.stack([channels[v[2]].flatten(),channels[v[1]].flatten(),channels[v[0]].flatten()])
vel = rotation3.dot(vel)
channels[v[2]] = np.reshape(vel[0], channels[v[2]].shape)
channels[v[1]] = np.reshape(vel[1], channels[v[1]].shape)
channels[v[0]] = np.reshape(vel[2], channels[v[0]].shape)
if len(v) == 2:
vel = np.concatenate([channels[v[1]],channels[v[0]]], -1) #y,x to match rotation matrix
shape = vel.shape
vel = np.reshape(vel, (-1, 2))
vel = np.reshape(rotation2.dot(vel.T).T, shape)
vel = np.split(vel, 2, -1)
channels[v[1]] = vel[0]
channels[v[0]] = vel[1]
return np.concatenate(channels, -1)
def rotate90(self, data, axes):
'''
rotate the frame by 90 degrees from the first axis counterclockwise to the second
axes: 2 int, from axis to axis; see np.rot90
0,1,2 -> z,y,x
'''
if len(axes)!=2:
self.TCError('need 2 axes for rotate90.')
for data_key in data:
if not self.data_flags[data_key]['isLabel']:
data[data_key] = np.rot90(data[data_key], axes=axes)
data = self.special_aug(data, AOPS_KEY_ROT90, axes)
return data
def rotate90Velocities(self, datum, c_list, axes):
if len(axes)!=2:
self.TCError('need 2 axes for rotate90.')
channels = np.split(datum, datum.shape[-1], -1)
for v in c_list: #axes z,y,x -> vel x,y,z: 0,1,2 -> 2,1,0
channels[v[-axes[0]+2]], channels[v[-axes[1]+2]] = -channels[v[-axes[1]+2]], channels[v[-axes[0]+2]]
return np.concatenate(channels, -1)
def flip(self, data, axes, isFrame=True): #axes=list, flip multiple at once
'''
flip low and high data (single frame/tile) along the specified axes
low, high: data format: (z,x,y,c)
axes: list of axis indices 0,1,2-> z,y,x
'''
# axis: 0,1,2 -> z,y,x
if not isFrame:
axes = np.asarray(axes) + np.ones(axes.shape)
#flip tiles/frames
for axis in axes:
for data_key in data:
if not self.data_flags[data_key]['isLabel']:
data[data_key] = np.flip(data[data_key], axis)
data = self.special_aug(data, AOPS_KEY_FLIP, axes)
return data
def flipVelocities(self, datum, c_list, axes):
'''
flip velocity vectors along the specified axes
low: data with velocity to flip (4 channels: d,vx,vy,vz)
axes: list of axis indices 0,1,2-> z,y,x
'''
# !axis order: data z,y,x
channels = np.split(datum, datum.shape[-1], -1)
for v in c_list: # x,y,[z], 2,1,0
if 2 in axes: # flip vel x
channels[v[0]] *= (-1)
if 1 in axes:
channels[v[1]] *= (-1)
if 0 in axes and len(v)==3:
channels[v[2]] *= (-1)
return np.concatenate(channels, -1)
def scale(self, data, factor):
'''
changes frame resolution to "round((factor) * (original resolution))"
'''
# only same factor in every dim for now. how would it affect vel scaling?
# check for 2D
scale = [factor, factor, factor, 1] #single frame
if self.dim==2:
scale[0] = 1
# to ensure high/low ration stays the same
scale = np.round(np.array(data[DATA_KEY_LOW].shape) * scale )/np.array(data[DATA_KEY_LOW].shape)
if len(data[DATA_KEY_LOW].shape)==5: #frame sequence
scale = np.append([1],scale)
#apply transform
#low = self.applyTransform(low, zoom_matrix)
#high = self.applyTransform(high, zoom_matrix)
#changes the size of the frame. should work well with getRandomTile(), no bounds needed
for data_key in data:
if not self.data_flags[data_key]['isLabel']:
data[data_key] = scipy.ndimage.zoom( data[data_key], scale, order=self.interpolation_order, mode=self.fill_mode, cval=0.0)
#necessary?
data = self.special_aug(data, AOPS_KEY_SCALE, factor)
return data
def scaleVelocities(self, datum, c_list, factor):
#scale vel? vel*=factor
channels = np.split(datum, datum.shape[-1], -1)
for v in c_list: # x,y,[z]; 2,1,0
channels[v[0]] *= factor
channels[v[1]] *= factor
if len(v)==3:
channels[v[2]] *= factor
return np.concatenate(channels, -1)
def applyTransform(self, data, transform_matrix, data_dim=3):
# change axis order from z,y,x to x,y,z? (invert axis order +channel)
if len(data.shape)!=4:
self.TCError('Data shape mismatch.')
#set transform to center; from fluiddatagenerator.py
offset = np.array(data.shape) / 2 - np.array([0.5, 0.5, 0.5, 0])
offset_matrix = np.array([[1, 0, 0, offset[0]], [0, 1, 0, offset[1]], [0, 0, 1, offset[2]], [0, 0, 0, 1]])
reset_matrix = np.array([[1, 0, 0,-offset[0]], [0, 1, 0,-offset[1]], [0, 0, 1,-offset[2]], [0, 0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, transform_matrix), reset_matrix)
data = np.rollaxis(data, 3, 0) #channel to front
channel_data = [scipy.ndimage.interpolation.affine_transform(
channel,
transform_matrix[:data_dim,:data_dim],
transform_matrix[:data_dim, data_dim],
order=self.interpolation_order,
mode=self.fill_mode,
cval=0.) for channel in data]
data = np.stack(channel_data, axis=-1) # stack axis=-1 ?\
return data
#####################################################################################
# HELPER METHODS
#####################################################################################
def concatTiles(self, tiles, frameShape ,tileBorder=[0,0,0,0]):
'''
build a frame by concatenation of the given tiles.
tiles: numpy array of same shaped tiles [batch,z,y,x,c]
frameShape: the shape of the frame in tiles [z,y,x]
tileBorder: cut off borders of the tiles. [z,y,x,c]
'''
if len(tiles.shape)!=5 or len(frameShape)!=3 or len(tileBorder)!=4:
self.TCError('Data shape mismatch.')
tiles_in_frame = frameShape[0]*frameShape[1]*frameShape[2]
if tiles_in_frame != len(tiles):
self.TCError('given tiles do not match required tiles.')
# cut borders
tileBorder = np.asarray(tileBorder)
if np.less(np.zeros(4),tileBorder).any():
tileShape = tiles.shape[1:] - 2*tileBorder
tiles_cut = []
for tile in tiles:
tiles_cut.append(self.cutTile(tile, tileShape, tileBorder))
tiles = tiles_cut
#combine tiles to image
frame = []
for z in range(frameShape[0]):
frame_slices = []
for y in range(frameShape[1]):
offset=z*frameShape[1]*frameShape[2] + y*frameShape[2]
frame_slices.append(np.concatenate(tiles[offset:offset+frameShape[2]],axis=2)) #combine x
frame.append(np.concatenate(frame_slices, axis=1)) #combine y
frame = np.concatenate(frame, axis=0) #combine z
return frame
def hasMinDensity(self, tile):
return self.getTileDensity(tile) >= (self.densityMinimum * tile.shape[0] * tile.shape[1] * tile.shape[2])
def getTileDensity(self, tile):
if self.data_flags[DATA_KEY_LOW]['channels'] > 1:
tile = np.split(tile, [1], axis=-1)[0]
return tile.sum( dtype=np.float64 )
def getFrameTiles(self, index):
''' returns the frame as tiles'''
low, high = self.getDatum(index)
return self.createTiles(low, self.tile_shape_low), self.createTiles(high, self.tile_shape_high)
#####################################################################################
# CHANNEL PARSING
#####################################################################################
def parseChannels(self, channelString):
''' arbitrary channel structure from string, expand if necessary. USE GLOBAL KEYS ^
'd': default/ density; data that needs no special operations during augmentation
'v[label](x|y|z)': vector/velocity; is transformed according to the augmentation
'''
#need this for low and high, +high only labels
c = channelString.lower().split(',')
for i in range(len(c)):
c[i] = c[i].strip()
c_types = {
C_KEY_DEFAULT:[], #list of indices of default channels. e.g. for normal sim data [0]
C_KEY_VELOCITY:[], #list of ordered (x,y,z; chnage to z,y,x to match data?) triples of velocity sets. e.g. for normal sim data [[1,2,3]]
C_KEY_VORTICITY:[]
}
self.parse = {
C_KEY_DEFAULT:self.parseCDefault,
C_KEY_VELOCITY:self.parseCVelocity,
C_KEY_VORTICITY:self.parseCVorticity
}
for i in range(len(c)):
if len(c[i])==0: # check empty key
self.TCError('empty channel key.'.format(i))
try:
self.parse[c[i][0]](c, i, c_types)
except KeyError:
self.TCError('channel {}: unknown channel key \"{}\".'.format(i, c[i]))
# TODO check unused channels here
return c, c_types
def parseCDefault(self, c, i, c_types):
# check length
if c[i]=='d':
c_types[C_KEY_DEFAULT].append(i)
else:
self.TCError('channel {}: unknown channel key \"{}\".'.format(i, c[i]))
def parseCVector(self, c, i, c_types, c_key, c_name='vector'):
# c_key[label](x|y|z)
if c[i][-1] == 'x' or c[i][-1] == 'y' or c[i][-1] == 'z':
label = c[i][1:-1] #can be empty
#get matching keys
v_x = c_key+label+'x'
v_y = c_key+label+'y'
v_z = c_key+label+'z'
#check for duplicates
if c.count(v_x)>1:
self.TCError('Duplicate {} ({}) x-channel with label \"{}\": {}. Vector keys must be unique.'.format(c_name, c_key, label, v_x))
if c.count(v_y)>1:
self.TCError('Duplicate {} ({}) y-channel with label \"{}\": {}. Vector keys must be unique.'.format(c_name, c_key, label, v_y))
if c.count(v_z)>1:
self.TCError('Duplicate {} ({}) z-channel with label \"{}\": {}. Vector keys must be unique.'.format(c_name, c_key, label, v_z))
#check missing
if c.count(v_x)==0:
self.TCError('Missing {} ({}) x-channel with label \"{}\": {}'.format(c_name, c_key, label, v_x))
if c.count(v_y)==0:
self.TCError('Missing {} ({}) y-channel with label \"{}\": {}'.format(c_name, c_key, label, v_y))
if self.dim==3 and c.count(v_z)==0:
self.TCError('Missing {} ({}) z-channel with label \"{}\": {}'.format(c_name, c_key, label, v_z))
if c[i][-1] == 'x':
if(c.count(v_z)==0 and self.dim==2):
c_types[C_KEY_VELOCITY].append([c.index(v_x),c.index(v_y)])
else:
c_types[C_KEY_VELOCITY].append([c.index(v_x),c.index(v_y),c.index(v_z)])
# check wrong suffix
else:
self.TCError('Channel {}, \"{}\": unknown {} ({}) channel suffix \"{}\". Valid suffixes are \"x\", \"y\", \"z\".'.format(i, c[i], c_name, c_key, c[i][-1]))
def parseCVelocity(self, c, i, c_types):
# C_KEY_VELOCITY[label](x|y|z)
self.parseCVector(c, i, c_types, C_KEY_VELOCITY, 'velociy')
def parseCVorticity(self, c, i, c_types):
# C_KEY_VELOCITY[label](x|y|z)
self.parseCVector(c, i, c_types, C_KEY_VELOCITY, 'vorticity')
#####################################################################################
# ERROR HANDLING
#####################################################################################
def TCError(self, msg):
raise TilecreatorError(msg)
class TilecreatorError(Exception):
''' Tilecreator errors '''
#####################################################################################
# IMAGE OUTPUT
#####################################################################################
# save summary images of all channels in a batch generated by the tile creator
# projects 3D data onto different axes, data has to be B-ZYX-C format
batchCounterGlob=0
def savePngsBatch(low,high, TC, path, batchCounter=-1, save_vels=False, dscale=1., vscale=1.):
global batchCounterGlob
if(low.shape[1]==1):
dim=2
else:
dim=3
# figure out good tile size, and project all axes for 3D
batch = low.shape[0]
tileX = 4 if batch>=4 else batch
if batch%tileX != 0: tileX = batch
# file names
if batchCounter < 0:
batchCounter = batchCounterGlob
batchCounterGlob += 1
path = path+"batch{:04d}_".format(batchCounter)
# show scalar channels, in tiled images
aNames = ["xy_","xz_","yz_"]
for axis in range(1 if dim==2 else 3):
suff = aNames[axis]
if dim == 3:
highD = np.average(high, axis=axis+1) * dscale
lowD = np.average(low, axis=axis+1) * dscale
if dim == 2:
highD = high*brightness ; lowD = low * dscale
lowD.shape = (batch, tll, tll, cl)
highD.shape = (batch, tlh, tlh, ch)
# note - outputs all channels as images, also vel channels...
clout = np.arange(low.shape[4])
savePngsGrayscale(lowD, path+'low_'+suff, tiles_in_image=[batch//tileX,tileX], channels=clout )
chout = np.arange(high.shape[4])
savePngsGrayscale(tiles=highD, path=path+'high_'+suff, imageCounter=0, tiles_in_image=[batch//tileX,tileX], channels=chout )
# plot velocities , for individual samples
if save_vels:
for i in range(low.shape[0]):
saveVelChannels(low[i], TC.c_lists[DATA_KEY_LOW][C_KEY_VELOCITY], path=path+'low_vel_i{:02d}_'.format(i), name="", scale=vscale )
for i in range(high.shape[0]):
saveVelChannels(high[i], TC.c_lists[DATA_KEY_HIGH][C_KEY_VELOCITY], path=path+'high_vel_i{:02d}_'.format(i), name="", scale=vscale )
# simpler function to output multiple tiles into grayscale pngs
def savePngsGrayscale(tiles, path, imageCounter=0, tiles_in_image=[1,1], channels=[0], save_gif=False, plot_vel_x_y=False, save_rgb=None, rgb_interval=[-1,1]):
'''
tiles_in_image: (y,x)
tiles: shape: (tile,y,x,c)
'''
tilesInImage = tiles_in_image[0]*tiles_in_image[1]
if len(tiles)%tilesInImage!=0:
print('ERROR: number of tiles does not match tiles per image')
return
tiles = np.asarray(tiles)
noImages = len(tiles)//tilesInImage
if save_gif:
gif=[]
for image in range(noImages):
img = []
#combine tiles to image
for y in range(tiles_in_image[0]):
offset=image*tilesInImage + y*tiles_in_image[1]
img.append(np.concatenate(tiles[offset:offset+tiles_in_image[1]],axis=1)) #combine x
img = np.concatenate(img, axis=0) #combine y
# move channels to first dim.
img_c = np.rollaxis(img, -1, 0)
if len(img_c)>1 and (plot_vel_x_y or save_rgb!=None):
if plot_vel_x_y: saveVel(img, path, imageCounter+image)
if save_rgb!=None: saveRGBChannels(img,path, save_rgb,value_interval=rgb_interval, imageCounter=imageCounter+image)
if len(channels) == 1:
scipy.misc.toimage(img_c[channels[0]], cmin=0.0, cmax=1.0).save(path + 'img_{:04d}.png'.format(imageCounter*noImages+image))
else:
for i in channels:
scipy.misc.toimage(img_c[i], cmin=0.0, cmax=1.0).save(path + 'img_{:04d}_c{:04d}.png'.format(imageCounter*noImages+image, i))
# store velocity as quiver plot
def saveVel(tile, path, imageCounter=0, name='vel-x-y'):
# origin is in upper left corner, transform acordingly
y, x = np.mgrid[-tile.shape[0]:0, 0:tile.shape[1]]
vx = None; vy = None
if tile.shape[-1]==4:
d, vx, vy, vz = np.split(tile, 4, -1)
elif tile.shape[-1]==2:
vx, vy = np.split(tile, 2, -1)
else:
print('ERROR: unknown nr of channels for vel input '+format(tile.shape))
vx = vx[::-1, ...]
vy = vy[::-1, ...]
if found_matplotlib:
matplotlib.pyplot.quiver(x,y,vx.flatten(),vy.flatten(), units = 'xy', scale = 1)
matplotlib.pyplot.axis('equal')
matplotlib.pyplot.savefig(path + '{}_{:04d}.png'.format(name,imageCounter))
matplotlib.pyplot.clf()
# save velocity channels from the tilecreator with multiple axis projections (uses saveVel)
def saveVelChannels(data, c_idx, path, average=False, scale=1.0, normalize=True, name=''):
channels = np.split(data, data.shape[-1], -1)
vpath = path
vcnt = 0
for v in c_idx:
if(len(c_idx)>1):
vpath = path + "vc{}".format(vcnt)
vcnt += 1
# compute scale factor
vscale = scale
if normalize:
vavg = np.concatenate( [ channels[v[0]],channels[v[1]] ], -1)
if(len(v))>2: vavg = np.concatenate( [ vavg, channels[v[2]] ],-1)
vscale *= (1./(np.max( vavg )+1e-10)) # normalize
vavg = np.concatenate( [ channels[v[0]],channels[v[1]] ] , -1)
vavg = np.average(vavg, axis=0)
vavg *= vscale
saveVel(vavg, path=vpath, name='_xy' )
if(len(v))>2: # also output xz,yz
vavg = np.concatenate( [ channels[v[0]],channels[v[2]] ] , -1)
vavg = np.average(vavg, axis=1)
vavg *= vscale
saveVel(vavg, path=vpath, name='_xz' )
vavg = np.concatenate( [ channels[v[1]],channels[v[2]] ] , -1)
vavg = np.average(vavg, axis=2)
vavg *= vscale
saveVel(vavg, path=vpath, name='_yz' )
def saveRGBChannels(data, path, channel_list, imageCounter=0, value_interval=[-1,1]):
"""
data: shape[y,x,c]
channels: list of triples of channel ids saved as RGB image
"""
cmin = value_interval[0]
cmax = value_interval[1]
num_channels = data.shape[-1]
channels = np.split(data, num_channels, -1)
for i in channel_list:
if len(i)==2:
img = np.concatenate([channels[i[0]], channels[i[1]], np.ones_like(channels[i[0]])*cmin], -1)
else:
img = np.concatenate([channels[i[0]], channels[i[1]], channels[i[2]]], -1)
scipy.misc.toimage(img, cmin=-1.0, cmax=1.0).save(path + 'img_rgb_{:04d}.png'.format(imageCounter))
def save3DasUni(tiles, path, motherUniPath, imageCounter=0, tiles_in_image=[1,1]):
'''
tiles_in_image: (y,x)
tiles: shape: (image,y,x,c)
'''
tilesInImage = tiles_in_image[0]*tiles_in_image[1]
if len(tiles)%tilesInImage!=0:
print('ERROR: number of tiles does not match tiles per image')
return
tiles = np.asarray(tiles)
noImages = len(tiles)//tilesInImage
tiles = np.reshape(tiles,(len(tiles), tiles.shape[1], tiles.shape[2], tiles.shape[-1]))
#only save the average
img_all = []
for image in range(noImages):
img = []
#combine tiles to image
for y in range(tiles_in_image[0]):
offset=( image) * tilesInImage + (y)*tiles_in_image[1]
img.append(np.concatenate(tiles[offset:offset+tiles_in_image[1]],axis=2)) #combine y
img = np.array(img)
img = np.concatenate(img, axis=1) #combine x
img = np.array(img)
# move channels to first dim.
img_c = np.rollaxis(img, 0, 0)
img_all.append(img_c)
img_all = np.array(img_all)
img_all = np.concatenate(img_all, axis=0)
img_all = np.array(img_all)
TDarrayToUni(img_all, path + 'source_{:04d}.uni'.format(imageCounter), motherUniPath, img_all.shape[0], img_all.shape[1], img_all.shape[2])
def TDarrayToUni(input, savePath, motherUniPath, imageHeight, imageWidth, imageDepth, is_vel=False):
head, _ = uniio.readUni(motherUniPath)
head['dimX'] = imageWidth
head['dimY'] = imageHeight
head['dimZ'] = imageDepth
if not is_vel:
fixedArray = np.zeros((imageHeight, imageWidth, imageDepth), dtype='f')
for x in range(0, imageHeight):
for y in range(0, imageWidth):
for z in range(0, imageDepth):
fixedArray[x][y][z] = input[imageDepth - 1 - z][y][(imageHeight - 1) - x]
else:
fixedArray = np.zeros((imageHeight, imageWidth, imageDepth, 3), dtype='f')
for x in range(0, imageHeight):
for y in range(0, imageWidth):
for z in range(0, imageDepth):
fixedArray[x][y][z] = input[imageDepth - 1 - z][y][(imageHeight - 1) - x]
uniio.writeUni(savePath, head, fixedArray)
# ******************************************************************************
# faster functions, batch operations
#
# grid interpolation method, order: only linear tested
# for velocity, macgridbatch.shape should be [b,z,y,x,3] (in 2D, should be [b,1,ny,nx,3])
# for density , macgridsource.shape should be [z,y,x,1] (in 2D, should be [1,ny,nx,1])
def gridInterpolBatch(macgridbatch, targetshape, order=1):
assert (targetshape[-1] == macgridbatch.shape[-1]) # no interpolation between channels
assert (len(targetshape) == 5 and len(macgridbatch.shape) == 5)
dim = 3
if (macgridbatch.shape[1] == 1 and targetshape[1] == 1): dim = 2
x_ = np.linspace(0.5, targetshape[3] - 0.5, targetshape[3])
y_ = np.linspace(0.5, targetshape[2] - 0.5, targetshape[2])
z_ = np.linspace(0.5, targetshape[1] - 0.5, targetshape[1])
c_ = np.linspace(0, targetshape[4] - 1, targetshape[4]) # no interpolation between channels
b_ = np.linspace(0, targetshape[0] - 1, targetshape[0]) # no interpolation between batches
b, z, y, x, c = np.meshgrid(b_, z_, y_, x_, c_, indexing='ij')
# scale
fx = float(macgridbatch.shape[3]) / targetshape[3]
fy = float(macgridbatch.shape[2]) / targetshape[2]
fz = float(macgridbatch.shape[1]) / targetshape[1]
mactargetbatch = scipy.ndimage.map_coordinates(macgridbatch, [b, z * fz, y * fy, x * fx, c], order=order, mode='reflect')
return mactargetbatch;
# macgrid_batch shape b, z, y, x, 3
# return a matrix in size [b,z,y,x,3] ( 2D: [b,y,x,2]), last channel in z-y-x order!(y-x order for 2D)
def getMACGridCenteredBatch(macgrid_batch, is3D):
_bn, _zn, _yn, _xn, _cn = macgrid_batch.shape
valid_idx = list(range(1, _xn))
valid_idx.append(_xn - 1)
add_x = macgrid_batch.take(valid_idx, axis=3)[:, :, :, :, 0] # shape, [b,z,y,x]
valid_idx = list(range(1, _yn))
valid_idx.append(_yn - 1)
add_y = macgrid_batch.take(valid_idx, axis=2)[:, :, :, :, 1] # shape, [b,z,y,x]
add_y = add_y.reshape([_bn, _zn, _yn, _xn, 1])
add_x = add_x.reshape([_bn, _zn, _yn, _xn, 1])
if (is3D):
valid_idx = list(range(1, _zn))
valid_idx.append(_zn - 1)
add_z = macgrid_batch.take(valid_idx, axis=1)[:, :, :, :, 2] # shape, [b,z,y,x]
add_z = add_z.reshape([_bn, _zn, _yn, _xn, 1])
resultgrid = 0.5 * (macgrid_batch[:, :, :, :, ::-1] + np.concatenate((add_z, add_y, add_x), axis=-1))
return resultgrid.reshape([_bn, _zn, _yn, _xn, 3])
resultgrid = 0.5 * (macgrid_batch[:, :, :, :, -2::-1] + np.concatenate((add_y, add_x), axis=4))
return resultgrid.reshape([_bn, _yn, _xn, 2])
# macgrid_batch shape b, z, y, x, 3 ( b,1,y,x,3 for 2D )
# return the re-sampling positions as a matrix, in size of [b,z,y,x,3] ( 2D: [b,y,x,2])
def getSemiLagrPosBatch(macgrid_batch, dt, cube_len_output=-1): # check interpolation later
assert (len(macgrid_batch.shape) == 5)
_bn, _zn, _yn, _xn, _cn = macgrid_batch.shape
assert (_cn == 3)
is3D = (_zn > 1)
if (cube_len_output == -1): cube_len_output = _xn
factor = float(_xn) / cube_len_output
x_ = np.linspace(0.5, int(_xn / factor + 0.5) - 0.5, int(_xn / factor + 0.5))
y_ = np.linspace(0.5, int(_yn / factor + 0.5) - 0.5, int(_yn / factor + 0.5))
interp_shape = [_bn, int(_zn / factor + 0.5), int(_yn / factor + 0.5), int(_xn / factor + 0.5), 3]
if (not is3D): interp_shape[1] = 1
if (is3D):
z_ = np.linspace(0.5, int(_zn / factor + 0.5) - 0.5, int(_zn / factor + 0.5))
z, y, x = np.meshgrid(z_, y_, x_, indexing='ij')
posArray = np.stack((z, y, x), axis=-1) # shape, z,y,x,3
tarshape = [1, int(_zn / factor + 0.5), int(_yn / factor + 0.5), int(_xn / factor + 0.5), 3]
else:
y, x = np.meshgrid(y_, x_, indexing='ij')
posArray = np.stack((y, x), axis=-1) # shape, y,x,2
tarshape = [1, int(_yn / factor + 0.5), int(_xn / factor + 0.5), 2]
posArray = posArray.reshape(tarshape)
if (cube_len_output == _xn):
return (posArray - getMACGridCenteredBatch(macgrid_batch, is3D) * dt)
# interpolate first
inter_mac_batch = gridInterpolBatch(macgrid_batch, interp_shape, 1)
inter_mac_batch = getMACGridCenteredBatch(inter_mac_batch, is3D) / factor
return (posArray - (inter_mac_batch) * dt)
# error! muss not shuffle for fluid data loader!
def selectRandomTempoTiles(self, selectionSize, isTraining=True, augment=False, n_t=3, dt=0.5, adv_flag = 1.0):
'''
main method to create coherent baches
Return:
shape: [n_t * selectionSize//n_t, z, y, x, channels]
if 2D z = 1
channels: density, [vel x, vel y, vel z], [pos x, pox y, pos z]
'''
batch_sz = int( max( 1, selectionSize // n_t) )
batch_low, batch_high = self.selectRandomTiles(batch_sz, isTraining, augment, tile_t = n_t)
real_batch_sz = batch_sz * n_t
ori_input_shape = batch_low.reshape((batch_sz, self.tileSizeLow[0], self.tileSizeLow[1], self.tileSizeLow[2], n_t, -1))
ori_input_shape = np.transpose(ori_input_shape, (0,4,1,2,3,5))
ori_input_shape = ori_input_shape.reshape((real_batch_sz, self.tileSizeLow[0], self.tileSizeLow[1], self.tileSizeLow[2], -1))
vel_pos_high_inter = None
if adv_flag:
# TODO check velocity channels and 3D
macgrid_input = ori_input_shape[:, :, :, :, self.c_lists[DATA_KEY_LOW][C_KEY_VELOCITY][0]]
macgrid_input = macgrid_input.reshape( (real_batch_sz, self.tileSizeLow[0], self.tileSizeLow[1], self.tileSizeLow[2], 3))
dtArray = np.array([i * dt for i in range(n_t // 2, -n_t // 2, -1)] * batch_sz, dtype=np.float32)
if (self.dim == 2):
dtArray = dtArray.reshape((-1, 1, 1, 1))
else:
dtArray = dtArray.reshape((-1, 1, 1, 1, 1))
vel_pos_high_inter = getSemiLagrPosBatch(macgrid_input, dtArray, self.tileSizeHigh[1]).reshape((real_batch_sz, -1))
# return reshape_input, selectedOutputs, vel_pos_high_inter
batch_high = batch_high.reshape((batch_sz, self.tileSizeHigh[0], self.tileSizeHigh[1], self.tileSizeHigh[2], n_t, -1))
batch_high = np.transpose(batch_high, (0,4,1,2,3,5))
return ori_input_shape.reshape((real_batch_sz, -1)), batch_high.reshape((real_batch_sz, -1)), vel_pos_high_inter
TileCreator.selectRandomTempoTiles = selectRandomTempoTiles
def pngs_to_gif(path, start_idx=0, end_idx=199, step=1, fps=20, mask="img_%04d.png"):
print("creating gif from {} to {} with {} fps".format(mask % start_idx, mask % end_idx, fps))
with imageio.get_writer(path + 'step%d.gif'%step, mode='I', fps=fps) as writer:
for i in range(start_idx - 1, end_idx, step):
image = imageio.imread(path+(mask % (i+1)))
writer.append_data(image)
| [
"numpy.rollaxis",
"numpy.array",
"numpy.rot90",
"numpy.linalg.norm",
"numpy.sin",
"numpy.arange",
"imageio.get_writer",
"uniio.readUni",
"numpy.less",
"numpy.flip",
"numpy.reshape",
"uniio.writeUni",
"numpy.isscalar",
"numpy.asarray",
"numpy.max",
"numpy.stack",
"numpy.linspace",
"... | [((1200, 1208), 'random.seed', 'seed', (['(42)'], {}), '(42)\n', (1204, 1208), False, 'from random import seed, random, randrange\n'), ((719, 748), 'imp.find_module', 'imp.find_module', (['"""matplotlib"""'], {}), "('matplotlib')\n", (734, 748), False, 'import imp\n'), ((38705, 38722), 'numpy.asarray', 'np.asarray', (['tiles'], {}), '(tiles)\n', (38715, 38722), True, 'import numpy as np\n'), ((40641, 40675), 'numpy.split', 'np.split', (['data', 'data.shape[-1]', '(-1)'], {}), '(data, data.shape[-1], -1)\n', (40649, 40675), True, 'import numpy as np\n'), ((41872, 41904), 'numpy.split', 'np.split', (['data', 'num_channels', '(-1)'], {}), '(data, num_channels, -1)\n', (41880, 41904), True, 'import numpy as np\n'), ((42565, 42582), 'numpy.asarray', 'np.asarray', (['tiles'], {}), '(tiles)\n', (42575, 42582), True, 'import numpy as np\n'), ((43210, 43227), 'numpy.array', 'np.array', (['img_all'], {}), '(img_all)\n', (43218, 43227), True, 'import numpy as np\n'), ((43240, 43271), 'numpy.concatenate', 'np.concatenate', (['img_all'], {'axis': '(0)'}), '(img_all, axis=0)\n', (43254, 43271), True, 'import numpy as np\n'), ((43284, 43301), 'numpy.array', 'np.array', (['img_all'], {}), '(img_all)\n', (43292, 43301), True, 'import numpy as np\n'), ((43562, 43590), 'uniio.readUni', 'uniio.readUni', (['motherUniPath'], {}), '(motherUniPath)\n', (43575, 43590), False, 'import uniio\n'), ((44232, 44274), 'uniio.writeUni', 'uniio.writeUni', (['savePath', 'head', 'fixedArray'], {}), '(savePath, head, fixedArray)\n', (44246, 44274), False, 'import uniio\n'), ((44943, 44997), 'numpy.linspace', 'np.linspace', (['(0.5)', '(targetshape[3] - 0.5)', 'targetshape[3]'], {}), '(0.5, targetshape[3] - 0.5, targetshape[3])\n', (44954, 44997), True, 'import numpy as np\n'), ((45005, 45059), 'numpy.linspace', 'np.linspace', (['(0.5)', '(targetshape[2] - 0.5)', 'targetshape[2]'], {}), '(0.5, targetshape[2] - 0.5, targetshape[2])\n', (45016, 45059), True, 'import numpy as np\n'), ((45067, 45121), 'numpy.linspace', 'np.linspace', (['(0.5)', '(targetshape[1] - 0.5)', 'targetshape[1]'], {}), '(0.5, targetshape[1] - 0.5, targetshape[1])\n', (45078, 45121), True, 'import numpy as np\n'), ((45129, 45179), 'numpy.linspace', 'np.linspace', (['(0)', '(targetshape[4] - 1)', 'targetshape[4]'], {}), '(0, targetshape[4] - 1, targetshape[4])\n', (45140, 45179), True, 'import numpy as np\n'), ((45224, 45274), 'numpy.linspace', 'np.linspace', (['(0)', '(targetshape[0] - 1)', 'targetshape[0]'], {}), '(0, targetshape[0] - 1, targetshape[0])\n', (45235, 45274), True, 'import numpy as np\n'), ((45332, 45378), 'numpy.meshgrid', 'np.meshgrid', (['b_', 'z_', 'y_', 'x_', 'c_'], {'indexing': '"""ij"""'}), "(b_, z_, y_, x_, c_, indexing='ij')\n", (45343, 45378), True, 'import numpy as np\n'), ((49163, 49212), 'numpy.transpose', 'np.transpose', (['ori_input_shape', '(0, 4, 1, 2, 3, 5)'], {}), '(ori_input_shape, (0, 4, 1, 2, 3, 5))\n', (49175, 49212), True, 'import numpy as np\n'), ((50191, 50235), 'numpy.transpose', 'np.transpose', (['batch_high', '(0, 4, 1, 2, 3, 5)'], {}), '(batch_high, (0, 4, 1, 2, 3, 5))\n', (50203, 50235), True, 'import numpy as np\n'), ((2633, 2657), 'numpy.isscalar', 'np.isscalar', (['tileSizeLow'], {}), '(tileSizeLow)\n', (2644, 2657), True, 'import numpy as np\n'), ((2941, 2969), 'numpy.asarray', 'np.asarray', (['self.tileSizeLow'], {}), '(self.tileSizeLow)\n', (2951, 2969), True, 'import numpy as np\n'), ((2989, 3012), 'numpy.isscalar', 'np.isscalar', (['simSizeLow'], {}), '(simSizeLow)\n', (3000, 3012), True, 'import numpy as np\n'), ((3291, 3318), 'numpy.asarray', 'np.asarray', (['self.simSizeLow'], {}), '(self.simSizeLow)\n', (3301, 3318), True, 'import numpy as np\n'), ((6224, 6296), 'numpy.append', 'np.append', (['self.tileSizeLow', "[self.data_flags[DATA_KEY_LOW]['channels']]"], {}), "(self.tileSizeLow, [self.data_flags[DATA_KEY_LOW]['channels']])\n", (6233, 6296), True, 'import numpy as np\n'), ((6323, 6394), 'numpy.append', 'np.append', (['self.simSizeLow', "[self.data_flags[DATA_KEY_LOW]['channels']]"], {}), "(self.simSizeLow, [self.data_flags[DATA_KEY_LOW]['channels']])\n", (6332, 6394), True, 'import numpy as np\n'), ((10157, 10172), 'numpy.asarray', 'np.asarray', (['low'], {}), '(low)\n', (10167, 10172), True, 'import numpy as np\n'), ((10183, 10199), 'numpy.asarray', 'np.asarray', (['high'], {}), '(high)\n', (10193, 10199), True, 'import numpy as np\n'), ((14156, 14176), 'numpy.isscalar', 'np.isscalar', (['strides'], {}), '(strides)\n', (14167, 14176), True, 'import numpy as np\n'), ((15008, 15023), 'numpy.array', 'np.array', (['tiles'], {}), '(tiles)\n', (15016, 15023), True, 'import numpy as np\n'), ((15150, 15168), 'numpy.asarray', 'np.asarray', (['offset'], {}), '(offset)\n', (15160, 15168), True, 'import numpy as np\n'), ((15184, 15205), 'numpy.asarray', 'np.asarray', (['tileShape'], {}), '(tileShape)\n', (15194, 15205), True, 'import numpy as np\n'), ((18745, 18773), 'numpy.copy', 'np.copy', (['self.tile_shape_low'], {}), '(self.tile_shape_low)\n', (18752, 18773), True, 'import numpy as np\n'), ((18797, 18826), 'numpy.copy', 'np.copy', (['self.tile_shape_high'], {}), '(self.tile_shape_high)\n', (18804, 18826), True, 'import numpy as np\n'), ((21015, 21036), 'numpy.asarray', 'np.asarray', (['low.shape'], {}), '(low.shape)\n', (21025, 21036), True, 'import numpy as np\n'), ((21258, 21273), 'numpy.ceil', 'np.ceil', (['bounds'], {}), '(bounds)\n', (21265, 21273), True, 'import numpy as np\n'), ((21352, 21398), 'numpy.array', 'np.array', (['[self.upres, self.upres, self.upres]'], {}), '([self.upres, self.upres, self.upres])\n', (21360, 21398), True, 'import numpy as np\n'), ((25046, 25082), 'numpy.split', 'np.split', (['datum', 'datum.shape[-1]', '(-1)'], {}), '(datum, datum.shape[-1], -1)\n', (25054, 25082), True, 'import numpy as np\n'), ((25848, 25876), 'numpy.concatenate', 'np.concatenate', (['channels', '(-1)'], {}), '(channels, -1)\n', (25862, 25876), True, 'import numpy as np\n'), ((26506, 26542), 'numpy.split', 'np.split', (['datum', 'datum.shape[-1]', '(-1)'], {}), '(datum, datum.shape[-1], -1)\n', (26514, 26542), True, 'import numpy as np\n'), ((26724, 26752), 'numpy.concatenate', 'np.concatenate', (['channels', '(-1)'], {}), '(channels, -1)\n', (26738, 26752), True, 'import numpy as np\n'), ((27630, 27666), 'numpy.split', 'np.split', (['datum', 'datum.shape[-1]', '(-1)'], {}), '(datum, datum.shape[-1], -1)\n', (27638, 27666), True, 'import numpy as np\n'), ((27884, 27912), 'numpy.concatenate', 'np.concatenate', (['channels', '(-1)'], {}), '(channels, -1)\n', (27898, 27912), True, 'import numpy as np\n'), ((29081, 29117), 'numpy.split', 'np.split', (['datum', 'datum.shape[-1]', '(-1)'], {}), '(datum, datum.shape[-1], -1)\n', (29089, 29117), True, 'import numpy as np\n'), ((29276, 29304), 'numpy.concatenate', 'np.concatenate', (['channels', '(-1)'], {}), '(channels, -1)\n', (29290, 29304), True, 'import numpy as np\n'), ((29661, 29755), 'numpy.array', 'np.array', (['[[1, 0, 0, offset[0]], [0, 1, 0, offset[1]], [0, 0, 1, offset[2]], [0, 0, 0, 1]\n ]'], {}), '([[1, 0, 0, offset[0]], [0, 1, 0, offset[1]], [0, 0, 1, offset[2]],\n [0, 0, 0, 1]])\n', (29669, 29755), True, 'import numpy as np\n'), ((29771, 29869), 'numpy.array', 'np.array', (['[[1, 0, 0, -offset[0]], [0, 1, 0, -offset[1]], [0, 0, 1, -offset[2]], [0, 0,\n 0, 1]]'], {}), '([[1, 0, 0, -offset[0]], [0, 1, 0, -offset[1]], [0, 0, 1, -offset[2\n ]], [0, 0, 0, 1]])\n', (29779, 29869), True, 'import numpy as np\n'), ((29960, 29983), 'numpy.rollaxis', 'np.rollaxis', (['data', '(3)', '(0)'], {}), '(data, 3, 0)\n', (29971, 29983), True, 'import numpy as np\n'), ((30271, 30302), 'numpy.stack', 'np.stack', (['channel_data'], {'axis': '(-1)'}), '(channel_data, axis=-1)\n', (30279, 30302), True, 'import numpy as np\n'), ((31149, 31171), 'numpy.asarray', 'np.asarray', (['tileBorder'], {}), '(tileBorder)\n', (31159, 31171), True, 'import numpy as np\n'), ((31761, 31790), 'numpy.concatenate', 'np.concatenate', (['frame'], {'axis': '(0)'}), '(frame, axis=0)\n', (31775, 31790), True, 'import numpy as np\n'), ((37547, 37570), 'numpy.arange', 'np.arange', (['low.shape[4]'], {}), '(low.shape[4])\n', (37556, 37570), True, 'import numpy as np\n'), ((37681, 37705), 'numpy.arange', 'np.arange', (['high.shape[4]'], {}), '(high.shape[4])\n', (37690, 37705), True, 'import numpy as np\n'), ((39049, 39076), 'numpy.concatenate', 'np.concatenate', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (39063, 39076), True, 'import numpy as np\n'), ((39132, 39155), 'numpy.rollaxis', 'np.rollaxis', (['img', '(-1)', '(0)'], {}), '(img, -1, 0)\n', (39143, 39155), True, 'import numpy as np\n'), ((39983, 40004), 'numpy.split', 'np.split', (['tile', '(4)', '(-1)'], {}), '(tile, 4, -1)\n', (39991, 40004), True, 'import numpy as np\n'), ((41058, 41110), 'numpy.concatenate', 'np.concatenate', (['[channels[v[0]], channels[v[1]]]', '(-1)'], {}), '([channels[v[0]], channels[v[1]]], -1)\n', (41072, 41110), True, 'import numpy as np\n'), ((41124, 41148), 'numpy.average', 'np.average', (['vavg'], {'axis': '(0)'}), '(vavg, axis=0)\n', (41134, 41148), True, 'import numpy as np\n'), ((43021, 43034), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (43029, 43034), True, 'import numpy as np\n'), ((43044, 43071), 'numpy.concatenate', 'np.concatenate', (['img'], {'axis': '(1)'}), '(img, axis=1)\n', (43058, 43071), True, 'import numpy as np\n'), ((43092, 43105), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (43100, 43105), True, 'import numpy as np\n'), ((43150, 43172), 'numpy.rollaxis', 'np.rollaxis', (['img', '(0)', '(0)'], {}), '(img, 0, 0)\n', (43161, 43172), True, 'import numpy as np\n'), ((43711, 43769), 'numpy.zeros', 'np.zeros', (['(imageHeight, imageWidth, imageDepth)'], {'dtype': '"""f"""'}), "((imageHeight, imageWidth, imageDepth), dtype='f')\n", (43719, 43769), True, 'import numpy as np\n'), ((43980, 44041), 'numpy.zeros', 'np.zeros', (['(imageHeight, imageWidth, imageDepth, 3)'], {'dtype': '"""f"""'}), "((imageHeight, imageWidth, imageDepth, 3), dtype='f')\n", (43988, 44041), True, 'import numpy as np\n'), ((47736, 47774), 'numpy.meshgrid', 'np.meshgrid', (['z_', 'y_', 'x_'], {'indexing': '"""ij"""'}), "(z_, y_, x_, indexing='ij')\n", (47747, 47774), True, 'import numpy as np\n'), ((47789, 47817), 'numpy.stack', 'np.stack', (['(z, y, x)'], {'axis': '(-1)'}), '((z, y, x), axis=-1)\n', (47797, 47817), True, 'import numpy as np\n'), ((47950, 47984), 'numpy.meshgrid', 'np.meshgrid', (['y_', 'x_'], {'indexing': '"""ij"""'}), "(y_, x_, indexing='ij')\n", (47961, 47984), True, 'import numpy as np\n'), ((47999, 48024), 'numpy.stack', 'np.stack', (['(y, x)'], {'axis': '(-1)'}), '((y, x), axis=-1)\n', (48007, 48024), True, 'import numpy as np\n'), ((50601, 50666), 'imageio.get_writer', 'imageio.get_writer', (["(path + 'step%d.gif' % step)"], {'mode': '"""I"""', 'fps': 'fps'}), "(path + 'step%d.gif' % step, mode='I', fps=fps)\n", (50619, 50666), False, 'import imageio\n'), ((3556, 3571), 'numpy.asarray', 'np.asarray', (['[1]'], {}), '([1])\n', (3566, 3571), True, 'import numpy as np\n'), ((3595, 3610), 'numpy.asarray', 'np.asarray', (['[1]'], {}), '([1])\n', (3605, 3610), True, 'import numpy as np\n'), ((6475, 6549), 'numpy.append', 'np.append', (['self.tileSizeHigh', "[self.data_flags[DATA_KEY_HIGH]['channels']]"], {}), "(self.tileSizeHigh, [self.data_flags[DATA_KEY_HIGH]['channels']])\n", (6484, 6549), True, 'import numpy as np\n'), ((6578, 6651), 'numpy.append', 'np.append', (['self.simSizeHigh', "[self.data_flags[DATA_KEY_HIGH]['channels']]"], {}), "(self.simSizeHigh, [self.data_flags[DATA_KEY_HIGH]['channels']])\n", (6587, 6651), True, 'import numpy as np\n'), ((15567, 15604), 'numpy.array_equal', 'np.array_equal', (['tile.shape', 'tileShape'], {}), '(tile.shape, tileShape)\n', (15581, 15604), True, 'import numpy as np\n'), ((16958, 16979), 'numpy.asarray', 'np.asarray', (['batch_low'], {}), '(batch_low)\n', (16968, 16979), True, 'import numpy as np\n'), ((16981, 17003), 'numpy.asarray', 'np.asarray', (['batch_high'], {}), '(batch_high)\n', (16991, 17003), True, 'import numpy as np\n'), ((18075, 18086), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (18083, 18086), True, 'import numpy as np\n'), ((18456, 18497), 'numpy.random.choice', 'np.random.choice', (['self.cube_rot[self.dim]'], {}), '(self.cube_rot[self.dim])\n', (18472, 18497), True, 'import numpy as np\n'), ((18607, 18626), 'numpy.random.choice', 'np.random.choice', (['(4)'], {}), '(4)\n', (18623, 18626), True, 'import numpy as np\n'), ((19458, 19490), 'random.randrange', 'randrange', (['(0)', 'self.setBorders[0]'], {}), '(0, self.setBorders[0])\n', (19467, 19490), False, 'from random import seed, random, randrange\n'), ((19513, 19562), 'random.randrange', 'randrange', (['self.setBorders[0]', 'self.setBorders[1]'], {}), '(self.setBorders[0], self.setBorders[1])\n', (19522, 19562), False, 'from random import seed, random, randrange\n'), ((19621, 19654), 'random.randrange', 'randrange', (['(0)', '(self.dim_t - tile_t)'], {}), '(0, self.dim_t - tile_t)\n', (19630, 19654), False, 'from random import seed, random, randrange\n'), ((20888, 20916), 'numpy.copy', 'np.copy', (['self.tile_shape_low'], {}), '(self.tile_shape_low)\n', (20895, 20916), True, 'import numpy as np\n'), ((21546, 21572), 'numpy.amin', 'np.amin', (['(end - start)[:3]'], {}), '((end - start)[:3])\n', (21553, 21572), True, 'import numpy as np\n'), ((28345, 28379), 'numpy.array', 'np.array', (['data[DATA_KEY_LOW].shape'], {}), '(data[DATA_KEY_LOW].shape)\n', (28353, 28379), True, 'import numpy as np\n'), ((28448, 28469), 'numpy.append', 'np.append', (['[1]', 'scale'], {}), '([1], scale)\n', (28457, 28469), True, 'import numpy as np\n'), ((29613, 29641), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5, 0]'], {}), '([0.5, 0.5, 0.5, 0])\n', (29621, 29641), True, 'import numpy as np\n'), ((29891, 29930), 'numpy.dot', 'np.dot', (['offset_matrix', 'transform_matrix'], {}), '(offset_matrix, transform_matrix)\n', (29897, 29930), True, 'import numpy as np\n'), ((40044, 40065), 'numpy.split', 'np.split', (['tile', '(2)', '(-1)'], {}), '(tile, 2, -1)\n', (40052, 40065), True, 'import numpy as np\n'), ((40867, 40919), 'numpy.concatenate', 'np.concatenate', (['[channels[v[0]], channels[v[1]]]', '(-1)'], {}), '([channels[v[0]], channels[v[1]]], -1)\n', (40881, 40919), True, 'import numpy as np\n'), ((41259, 41311), 'numpy.concatenate', 'np.concatenate', (['[channels[v[0]], channels[v[2]]]', '(-1)'], {}), '([channels[v[0]], channels[v[2]]], -1)\n', (41273, 41311), True, 'import numpy as np\n'), ((41326, 41350), 'numpy.average', 'np.average', (['vavg'], {'axis': '(1)'}), '(vavg, axis=1)\n', (41336, 41350), True, 'import numpy as np\n'), ((41426, 41478), 'numpy.concatenate', 'np.concatenate', (['[channels[v[1]], channels[v[2]]]', '(-1)'], {}), '([channels[v[1]], channels[v[2]]], -1)\n', (41440, 41478), True, 'import numpy as np\n'), ((41493, 41517), 'numpy.average', 'np.average', (['vavg'], {'axis': '(2)'}), '(vavg, axis=2)\n', (41503, 41517), True, 'import numpy as np\n'), ((42064, 42132), 'numpy.concatenate', 'np.concatenate', (['[channels[i[0]], channels[i[1]], channels[i[2]]]', '(-1)'], {}), '([channels[i[0]], channels[i[1]], channels[i[2]]], -1)\n', (42078, 42132), True, 'import numpy as np\n'), ((46775, 46813), 'numpy.concatenate', 'np.concatenate', (['(add_y, add_x)'], {'axis': '(4)'}), '((add_y, add_x), axis=4)\n', (46789, 46813), True, 'import numpy as np\n'), ((50737, 50774), 'imageio.imread', 'imageio.imread', (['(path + mask % (i + 1))'], {}), '(path + mask % (i + 1))\n', (50751, 50774), False, 'import imageio\n'), ((3744, 3786), 'numpy.less', 'np.less', (['self.simSizeLow', 'self.tileSizeLow'], {}), '(self.simSizeLow, self.tileSizeLow)\n', (3751, 3786), True, 'import numpy as np\n'), ((15246, 15297), 'numpy.less', 'np.less', (['data.shape[:3]', '(tileShape[:3] + offset[:3])'], {}), '(data.shape[:3], tileShape[:3] + offset[:3])\n', (15253, 15297), True, 'import numpy as np\n'), ((17724, 17761), 'numpy.ceil', 'np.ceil', (['(self.tile_shape_low * factor)'], {}), '(self.tile_shape_low * factor)\n', (17731, 17761), True, 'import numpy as np\n'), ((18910, 18968), 'numpy.array_equal', 'np.array_equal', (['data[DATA_KEY_LOW].shape', 'target_shape_low'], {}), '(data[DATA_KEY_LOW].shape, target_shape_low)\n', (18924, 18968), True, 'import numpy as np\n'), ((20242, 20321), 'numpy.copy', 'np.copy', (['self.data[DATA_KEY_LOW][index // self.dim_t][:, :, :, begin_ch:end_ch]'], {}), '(self.data[DATA_KEY_LOW][index // self.dim_t][:, :, :, begin_ch:end_ch])\n', (20249, 20321), True, 'import numpy as np\n'), ((20318, 20408), 'numpy.copy', 'np.copy', (['self.data[DATA_KEY_HIGH][index // self.dim_t][:, :, :, begin_ch_y:end_c_h_y]'], {}), '(self.data[DATA_KEY_HIGH][index // self.dim_t][:, :, :, begin_ch_y:\n end_c_h_y])\n', (20325, 20408), True, 'import numpy as np\n'), ((20419, 20498), 'numpy.copy', 'np.copy', (['self.data[DATA_KEY_LOW][index // self.dim_t][:, :, :, begin_ch:end_ch]'], {}), '(self.data[DATA_KEY_LOW][index // self.dim_t][:, :, :, begin_ch:end_ch])\n', (20426, 20498), True, 'import numpy as np\n'), ((20495, 20549), 'numpy.copy', 'np.copy', (['self.data[DATA_KEY_HIGH][index // self.dim_t]'], {}), '(self.data[DATA_KEY_HIGH][index // self.dim_t])\n', (20502, 20549), True, 'import numpy as np\n'), ((21314, 21324), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (21321, 21324), True, 'import numpy as np\n'), ((23316, 23339), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2)'], {}), '(0, 2)\n', (23333, 23339), True, 'import numpy as np\n'), ((23597, 23621), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(4)'}), '(size=4)\n', (23613, 23621), True, 'import numpy as np\n'), ((23633, 23653), 'numpy.linalg.norm', 'np.linalg.norm', (['quat'], {}), '(quat)\n', (23647, 23653), True, 'import numpy as np\n'), ((23712, 23942), 'numpy.array', 'np.array', (['[[1 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0], 0], [q[1, 2] +\n q[3, 0], 1 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0], 0], [q[1, 3] - q[2, \n 0], q[2, 3] + q[1, 0], 1 - q[1, 1] - q[2, 2], 0], [0, 0, 0, 1]]'], {}), '([[1 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0], 0],\n [q[1, 2] + q[3, 0], 1 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0], 0], [q[1,\n 3] - q[2, 0], q[2, 3] + q[1, 0], 1 - q[1, 1] - q[2, 2], 0], [0, 0, 0, 1]])\n', (23720, 23942), True, 'import numpy as np\n'), ((25350, 25390), 'numpy.reshape', 'np.reshape', (['vel[0]', 'channels[v[2]].shape'], {}), '(vel[0], channels[v[2]].shape)\n', (25360, 25390), True, 'import numpy as np\n'), ((25413, 25453), 'numpy.reshape', 'np.reshape', (['vel[1]', 'channels[v[1]].shape'], {}), '(vel[1], channels[v[1]].shape)\n', (25423, 25453), True, 'import numpy as np\n'), ((25476, 25516), 'numpy.reshape', 'np.reshape', (['vel[2]', 'channels[v[0]].shape'], {}), '(vel[2], channels[v[0]].shape)\n', (25486, 25516), True, 'import numpy as np\n'), ((25548, 25600), 'numpy.concatenate', 'np.concatenate', (['[channels[v[1]], channels[v[0]]]', '(-1)'], {}), '([channels[v[1]], channels[v[0]]], -1)\n', (25562, 25600), True, 'import numpy as np\n'), ((25664, 25688), 'numpy.reshape', 'np.reshape', (['vel', '(-1, 2)'], {}), '(vel, (-1, 2))\n', (25674, 25688), True, 'import numpy as np\n'), ((25753, 25773), 'numpy.split', 'np.split', (['vel', '(2)', '(-1)'], {}), '(vel, 2, -1)\n', (25761, 25773), True, 'import numpy as np\n'), ((26251, 26286), 'numpy.rot90', 'np.rot90', (['data[data_key]'], {'axes': 'axes'}), '(data[data_key], axes=axes)\n', (26259, 26286), True, 'import numpy as np\n'), ((27063, 27079), 'numpy.asarray', 'np.asarray', (['axes'], {}), '(axes)\n', (27073, 27079), True, 'import numpy as np\n'), ((27082, 27101), 'numpy.ones', 'np.ones', (['axes.shape'], {}), '(axes.shape)\n', (27089, 27101), True, 'import numpy as np\n'), ((29586, 29606), 'numpy.array', 'np.array', (['data.shape'], {}), '(data.shape)\n', (29594, 29606), True, 'import numpy as np\n'), ((31701, 31737), 'numpy.concatenate', 'np.concatenate', (['frame_slices'], {'axis': '(1)'}), '(frame_slices, axis=1)\n', (31715, 31737), True, 'import numpy as np\n'), ((32070, 32098), 'numpy.split', 'np.split', (['tile', '[1]'], {'axis': '(-1)'}), '(tile, [1], axis=-1)\n', (32078, 32098), True, 'import numpy as np\n'), ((37235, 37266), 'numpy.average', 'np.average', (['high'], {'axis': '(axis + 1)'}), '(high, axis=axis + 1)\n', (37245, 37266), True, 'import numpy as np\n'), ((37286, 37316), 'numpy.average', 'np.average', (['low'], {'axis': '(axis + 1)'}), '(low, axis=axis + 1)\n', (37296, 37316), True, 'import numpy as np\n'), ((38966, 39030), 'numpy.concatenate', 'np.concatenate', (['tiles[offset:offset + tiles_in_image[1]]'], {'axis': '(1)'}), '(tiles[offset:offset + tiles_in_image[1]], axis=1)\n', (38980, 39030), True, 'import numpy as np\n'), ((40947, 40989), 'numpy.concatenate', 'np.concatenate', (['[vavg, channels[v[2]]]', '(-1)'], {}), '([vavg, channels[v[2]]], -1)\n', (40961, 40989), True, 'import numpy as np\n'), ((42938, 43002), 'numpy.concatenate', 'np.concatenate', (['tiles[offset:offset + tiles_in_image[1]]'], {'axis': '(2)'}), '(tiles[offset:offset + tiles_in_image[1]], axis=2)\n', (42952, 43002), True, 'import numpy as np\n'), ((46612, 46658), 'numpy.concatenate', 'np.concatenate', (['(add_z, add_y, add_x)'], {'axis': '(-1)'}), '((add_z, add_y, add_x), axis=-1)\n', (46626, 46658), True, 'import numpy as np\n'), ((11976, 12022), 'numpy.array_equal', 'np.array_equal', (['low_shape', 'self.tile_shape_low'], {}), '(low_shape, self.tile_shape_low)\n', (11990, 12022), True, 'import numpy as np\n'), ((12030, 12078), 'numpy.array_equal', 'np.array_equal', (['high_shape', 'self.tile_shape_high'], {}), '(high_shape, self.tile_shape_high)\n', (12044, 12078), True, 'import numpy as np\n'), ((12536, 12596), 'numpy.array_equal', 'np.array_equal', (['single_frame_low_shape', 'self.frame_shape_low'], {}), '(single_frame_low_shape, self.frame_shape_low)\n', (12550, 12596), True, 'import numpy as np\n'), ((12604, 12666), 'numpy.array_equal', 'np.array_equal', (['single_frame_high_shape', 'self.frame_shape_high'], {}), '(single_frame_high_shape, self.frame_shape_high)\n', (12618, 12666), True, 'import numpy as np\n'), ((17616, 17675), 'numpy.random.uniform', 'np.random.uniform', (['self.scaleFactor[0]', 'self.scaleFactor[1]'], {}), '(self.scaleFactor[0], self.scaleFactor[1])\n', (17633, 17675), True, 'import numpy as np\n'), ((18142, 18176), 'numpy.array', 'np.array', (['data[DATA_KEY_LOW].shape'], {}), '(data[DATA_KEY_LOW].shape)\n', (18150, 18176), True, 'import numpy as np\n'), ((18976, 19036), 'numpy.array_equal', 'np.array_equal', (['data[DATA_KEY_HIGH].shape', 'target_shape_high'], {}), '(data[DATA_KEY_HIGH].shape, target_shape_high)\n', (18990, 19036), True, 'import numpy as np\n'), ((21803, 21830), 'random.randrange', 'randrange', (['start[0]', 'end[0]'], {}), '(start[0], end[0])\n', (21812, 21830), False, 'from random import seed, random, randrange\n'), ((21832, 21859), 'random.randrange', 'randrange', (['start[1]', 'end[1]'], {}), '(start[1], end[1])\n', (21841, 21859), False, 'from random import seed, random, randrange\n'), ((21861, 21888), 'random.randrange', 'randrange', (['start[2]', 'end[2]'], {}), '(start[2], end[2])\n', (21870, 21888), False, 'from random import seed, random, randrange\n'), ((23667, 23687), 'numpy.outer', 'np.outer', (['quat', 'quat'], {}), '(quat, quat)\n', (23675, 23687), True, 'import numpy as np\n'), ((27248, 27277), 'numpy.flip', 'np.flip', (['data[data_key]', 'axis'], {}), '(data[data_key], axis)\n', (27255, 27277), True, 'import numpy as np\n'), ((28300, 28334), 'numpy.array', 'np.array', (['data[DATA_KEY_LOW].shape'], {}), '(data[DATA_KEY_LOW].shape)\n', (28308, 28334), True, 'import numpy as np\n'), ((31186, 31197), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (31194, 31197), True, 'import numpy as np\n'), ((31614, 31674), 'numpy.concatenate', 'np.concatenate', (['tiles[offset:offset + frameShape[2]]'], {'axis': '(2)'}), '(tiles[offset:offset + frameShape[2]], axis=2)\n', (31628, 31674), True, 'import numpy as np\n'), ((41011, 41023), 'numpy.max', 'np.max', (['vavg'], {}), '(vavg)\n', (41017, 41023), True, 'import numpy as np\n'), ((14937, 14966), 'numpy.pad', 'np.pad', (['currTile', 'pad', '"""edge"""'], {}), "(currTile, pad, 'edge')\n", (14943, 14966), True, 'import numpy as np\n'), ((23402, 23415), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (23408, 23415), True, 'import numpy as np\n'), ((23453, 23466), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (23459, 23466), True, 'import numpy as np\n'), ((23468, 23481), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (23474, 23481), True, 'import numpy as np\n'), ((42005, 42033), 'numpy.ones_like', 'np.ones_like', (['channels[i[0]]'], {}), '(channels[i[0]])\n', (42017, 42033), True, 'import numpy as np\n'), ((23418, 23431), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (23424, 23431), True, 'import numpy as np\n')] |
'''
analysis
~~~~~~~~
This module collects all useful functions to be used
by the worker when performing a fuzzing analysis
'''
import os
import random
import string
from config import Config
import logging as logger
import time
import worker_timeout as wt
import shutil
import seeds_generator as sg
import ast
import numpy as np
import sklearn.feature_selection as fs
from jni_extractor.method_abstraction import SootMethod
conf = Config()
WORKSPACE = '/data/local/tmp/workspace'
LIBS = os.path.join(WORKSPACE, 'libs')
INPUT = os.path.join(WORKSPACE, 'inputs')
OUTPUT = os.path.join(WORKSPACE, 'outputs')
FUZZ_ME = os.path.join(WORKSPACE, conf.fuzz_me_bin)
class NotBootedError(Exception):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super().__init__(message)
class AnalysisCompleted(Exception):
def __init__(self, message, dry_run=False):
# Call the base class constructor with the parameters it needs
super().__init__(message)
self.dry_run = dry_run
class AnalysisAborted(Exception):
ONLY_INTEGER = 'only_int'
TYPE_NOT_SUPPORTED = 'type_not_supported'
ISA_NOT_FOUND = 'isa_not_found'
DRY_RUN_ERROR = 'dry_run_error'
EMULATOR_FAILED = 'emulator_failure'
def __init__(self, message, code):
# Call the base class constructor with the parameters it needs
super().__init__(message)
self.code = code
class AflDryRunError(Exception):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super().__init__(message)
class AflDryRunSeedError(AflDryRunError):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super().__init__(message)
def _check_booted(emu):
if not emu.booted:
logger.error("emulator {} was not booted".format(emu.name))
raise NotBootedError("emulator {} was not booted".format(emu.name))
def gen_input_from_signature(signature):
"""Generate a random seed input and returns it as a string"""
# TODO (clod) this method has to be thougth properly as it is crucial
# to have proper input. For now I just use a lame random implementation
return sg.generate_seeds(signature)
def init_emulator(emu, apk_id, signature, isa):
"""initialize the provided emulator so that
it can perform fuzzing operations on the given signature for the library
with the given isa.
**NOTE** The emulator should be booted, or `NotBootedError` is thrown
Args:
(AndroidEmulator) emu: the emulator we want to initialize
(str) signature: signature that needs to be fuzzed
(str) isa: isa of the library and emulator we try to extract signature from
pass
"""
_check_booted(emu)
emu.adb.run_shell_command("mkdir {}".format(WORKSPACE)) \
.push_file_to_emu(conf.afl_path + '_' + isa, WORKSPACE) \
.push_file_to_emu(conf.fuzz_me + '_' + isa, WORKSPACE) \
.run_shell_command("mkdir {}".format(INPUT)) \
.run_shell_command("mkdir {}".format(OUTPUT))
# set up signature
emu.adb.run_shell_command(
"echo '{}' >> {}/signatures.txt".format(signature, WORKSPACE))
# TODO(clod): this currently only deals with integers
# Also, I generate all the possible combinations of (positive, negative, 0)
# this should probably change for too long signatures as we are generating
# 3^(#param) seed files.
seeds = gen_input_from_signature(signature)
if seeds is None:
raise AnalysisAborted(
"We currently support only Strings and integer like signatures", code=AnalysisAborted.TYPE_NOT_SUPPORTED)
for i in range(len(seeds)):
seed = seeds[i]
emu.adb.run_shell_command(
"echo '{}' >> {}/in{}".format(seed, INPUT, i))
# set up libraries (.so)#
server_libs_path = os.path.join(conf.libraries, apk_id, isa)
if not os.path.isdir(server_libs_path):
logger.error(
"{} version of the library not found. Analysis aborted".format(isa))
raise AnalysisAborted("{} version of the library not found. Analysis aborted".format(
isa), code=AnalysisAborted.ISA_NOT_FOUND)
lib_names = os.listdir(server_libs_path)
lib_names_emu_path = list(
map(lambda x: os.path.join(LIBS, isa, x), lib_names))
lib_names_str = "\n".join(lib_names_emu_path)
emu.adb.push_file_to_emu(os.path.join(conf.libraries, apk_id, isa), os.path.join(LIBS, isa)) \
.run_shell_command("echo -n '' >> {}/libraries.txt".format(WORKSPACE)) \
.run_shell_command("echo '{}' >> {}/libraries.txt".format(lib_names_str, WORKSPACE)) \
.run_shell_command("echo '0' > {}/parameter".format(WORKSPACE))
logger.info("initialized emulator {} [yes]".format(emu.name))
def clean_emulator(emu):
"""Brings the emulator to a clean state.
It is done by deleting the workspace folder
**NOTE** The emulator should be booted, or `NotBootedError` is thrown
"""
_check_booted(emu)
emu.adb.run_shell_command("rm -rf {}".format(WORKSPACE))
logger.info("cleaned emulator {}:{} [yes]".format(emu.name, emu.port))
@wt.timeout
def _afl_analysis(db, job_id, emu, timeout=60):
job = db.jobs.find_one({'_id': job_id})
soot_method = SootMethod(job['data']['signature'])
n_param = len(soot_method.get_parameter_types())
progress = 0
switch_param = 0
while True:
# (n_param + 1) so that we can detect when to all inputs at the same time
if (progress % (timeout // (n_param + 1) )) == 0:
# we update the param for the fuzzer
emu.adb.run_shell_command(
"echo '{}' > {}/parameter".format(switch_param, WORKSPACE))
switch_param += 1
if (progress % 10) == 0:
# every 10 second we updated the progres
percentage = float(progress)/timeout * 100
if db is not None and job_id is not None:
db.jobs.find_one_and_update(
{'_id': job_id}, {'$set': {'progress': percentage}})
progress += 1
# let's wait until the anlysis is completed (aka the timeout is triggered)
time.sleep(1)
def start_afl_analysis(emu, db, job_id, timeout, isa):
"""Starts afl fuzzing analysis on the provided emulator.
The analysis blocks the execution for `timeout` seconds.
After the timeout a `AnalysisCompleted` is raised, indicating the end of the analysis.
Notice that there are cases where the analysis could fail. In these sytuations, `AnalysisAborted` is raised.
The analysis updates a progress in the database, which can be used to nofify the user of the current status.
**NOTE** The emulator should be booted, or `NotBootedError` is thrown
Param:
(AndroidEmulator) `emu`: emulator where the analysis should run
`timeout` (int): length in seconds of the analysis
`db` (pymongo connection): a connection to the database where `job_id` is stored in the jobs collection
`job_id` (str): id of the job to update
"""
_check_booted(emu)
emu.adb.run_shell_command(
'cd {}; export LD_LIBRARY_PATH={}/{}; {} -i {} -o {} -t 60000 -m 50MB -n {} > {}'.format(
WORKSPACE, LIBS, isa, './'+conf.afl_bin + '_' + isa, INPUT, OUTPUT, FUZZ_ME + '_' + isa, WORKSPACE+'/afl.log'),
blocking=False)
logger.info("analysis started on {} [yes]".format(emu.name))
# TODO (clod) do a smarter timeout based on the parameters of the signature
# under examination. However, sometimes it may require longer for some
# reasons. Therefore, let's stick to 10mins for now
try:
_check_afl_dry_run(emu, timeout=600)
except AflDryRunSeedError:
# the seed inputs were probably causing a crash.
# it is in principle still interesting, so we save the results
# we raise a TimedOutExc meaning the analysis is finished
logger.info(
"complted with dry-run error emulator {}:{}".format(emu.name, emu.port))
logger.warning(
"complted with dry-run error. It usually indicates that seeds crashed or hanged the program")
raise AnalysisCompleted(
"complted with dry-run error emulator {}:{}".format(emu.name, emu.port), dry_run=True)
except(AflDryRunError, wt.TimedOutExc) as ex:
# something went badly wrong, so we need to stop the anlysis completely
logger.exception(ex)
logger.error("analysis aborted on emulator {}:{}".format(
emu.name, emu.port))
raise AnalysisAborted(
"analysis aborted on emulator {}:{}".format(emu.name, emu.port), code=AnalysisAborted.DRY_RUN_ERROR)
try:
_afl_analysis(db, job_id, emu, timeout=timeout)
except wt.TimedOutExc:
logger.info("analysis completed on emulator {}:{}".format(
emu.name, emu.port))
raise AnalysisCompleted(
"analysis completed on emulator {}:{}".format(emu.name, emu.port))
def extract_afl_analysis_result(emu, destination):
"""Extract afl analysis result to `destination`.
If destination doesn't exists, a `ValueError` is raised
Params:
(str) `destination`: location where to extract the results
"""
if not os.path.isdir(destination):
raise ValueError(
"destination `{}` must be an existing folder!".format(destination))
emu.adb.pull_file_from_emu(WORKSPACE, destination)
zip_archive = os.path.join(destination, 'workspace')
shutil.make_archive(zip_archive, 'zip',
os.path.join(destination, 'workspace'))
@wt.timeout
def _check_afl_dry_run(emu, timeout=60):
'''This method checks for afl dry run to be successful.
If it is not, an AflDryRunError is raised.
The method cehcks the afl.log file we use to redirect afl stdout. We look
for specific strings that tell us the status of the fuzzer.
To be on the safe side, we also added a timout to this method. A good timeout (seconds) comes from the assumption
that whatever happens it should not take longer that 1min (the timeout for an hang in AFL which we setr) * #inputs
'''
out = ""
while "All set and ready to roll!" not in out:
out = emu.adb.run_and_read_shell_command(
'cat {}'.format(WORKSPACE+'/afl.log'))
if "PROGRAM ABORT" in out:
logger.warning(
"seeds lead to afl dry run failure on {}:{}".format(emu.name, emu.port))
raise AflDryRunSeedError(
"seeds lead to afl dry run failure on {}:{}".format(emu.name, emu.port))
elif "SYSTEM ERROR" in out:
logger.error("afl dry run failure on {}:{}".format(
emu.name, emu.port))
raise AflDryRunError(
"afl dry run failure on {}:{}".format(emu.name, emu.port))
logger.info("dry run emulator {}:{} [yes]".format(emu.name, emu.port))
return True
def _generate_taint_model(io_path):
'''Extract io.txt file and generates a model
that can be used by flowdroid to propagate the taint
**NOTE** This method should be called only after AnalysisCompleted
has been triggered!!!
Param:
AndroidEmulator emu: android emulator instance where io.txt file
has been produced.
str io_path: Path to `io.txt`. The file should be at the same location specified for
`extract_afl_analysis_result`.
'''
f = open(io_path, 'r')
X = None # parameters matrix
Y = None # return values: starting empty
processed = set()
for entry in f:
afl_entry = ast.literal_eval(entry.strip())
# we do not want duplicates
afl_tuple = tuple(afl_entry)
if afl_tuple in processed:
continue
else:
processed.add(afl_tuple)
# afl_entry[0] is the index of the fuzzed param and afl_entry[1] is the ret value
x = np.array(afl_entry[1:len(afl_entry)-1])
y = afl_entry[-1]
if X is None and Y is None:
# this is the first entry and iteration
X = np.array(x)
Y = np.array(y)
else:
X = np.vstack((X, x))
Y = np.append(Y, y)
f.close()
result = fs.mutual_info_classif(X, Y)
result = [round(sigmoid(x), 2) for x in result]
print(result)
#fs.mutual_info_regression(X, Y)
def sigmoid(x):
return 1 / (1 + np.exp(-x)) | [
"sklearn.feature_selection.mutual_info_classif",
"os.listdir",
"config.Config",
"seeds_generator.generate_seeds",
"os.path.join",
"logging.warning",
"time.sleep",
"logging.exception",
"numpy.append",
"numpy.array",
"jni_extractor.method_abstraction.SootMethod",
"os.path.isdir",
"numpy.exp",
... | [((436, 444), 'config.Config', 'Config', ([], {}), '()\n', (442, 444), False, 'from config import Config\n'), ((493, 524), 'os.path.join', 'os.path.join', (['WORKSPACE', '"""libs"""'], {}), "(WORKSPACE, 'libs')\n", (505, 524), False, 'import os\n'), ((533, 566), 'os.path.join', 'os.path.join', (['WORKSPACE', '"""inputs"""'], {}), "(WORKSPACE, 'inputs')\n", (545, 566), False, 'import os\n'), ((576, 610), 'os.path.join', 'os.path.join', (['WORKSPACE', '"""outputs"""'], {}), "(WORKSPACE, 'outputs')\n", (588, 610), False, 'import os\n'), ((621, 662), 'os.path.join', 'os.path.join', (['WORKSPACE', 'conf.fuzz_me_bin'], {}), '(WORKSPACE, conf.fuzz_me_bin)\n', (633, 662), False, 'import os\n'), ((2271, 2299), 'seeds_generator.generate_seeds', 'sg.generate_seeds', (['signature'], {}), '(signature)\n', (2288, 2299), True, 'import seeds_generator as sg\n'), ((3933, 3974), 'os.path.join', 'os.path.join', (['conf.libraries', 'apk_id', 'isa'], {}), '(conf.libraries, apk_id, isa)\n', (3945, 3974), False, 'import os\n'), ((4288, 4316), 'os.listdir', 'os.listdir', (['server_libs_path'], {}), '(server_libs_path)\n', (4298, 4316), False, 'import os\n'), ((5361, 5397), 'jni_extractor.method_abstraction.SootMethod', 'SootMethod', (["job['data']['signature']"], {}), "(job['data']['signature'])\n", (5371, 5397), False, 'from jni_extractor.method_abstraction import SootMethod\n'), ((9571, 9609), 'os.path.join', 'os.path.join', (['destination', '"""workspace"""'], {}), "(destination, 'workspace')\n", (9583, 9609), False, 'import os\n'), ((12395, 12423), 'sklearn.feature_selection.mutual_info_classif', 'fs.mutual_info_classif', (['X', 'Y'], {}), '(X, Y)\n', (12417, 12423), True, 'import sklearn.feature_selection as fs\n'), ((3987, 4018), 'os.path.isdir', 'os.path.isdir', (['server_libs_path'], {}), '(server_libs_path)\n', (4000, 4018), False, 'import os\n'), ((6268, 6281), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6278, 6281), False, 'import time\n'), ((9363, 9389), 'os.path.isdir', 'os.path.isdir', (['destination'], {}), '(destination)\n', (9376, 9389), False, 'import os\n'), ((9678, 9716), 'os.path.join', 'os.path.join', (['destination', '"""workspace"""'], {}), "(destination, 'workspace')\n", (9690, 9716), False, 'import os\n'), ((8136, 8254), 'logging.warning', 'logger.warning', (['"""complted with dry-run error. It usually indicates that seeds crashed or hanged the program"""'], {}), "(\n 'complted with dry-run error. It usually indicates that seeds crashed or hanged the program'\n )\n", (8150, 8254), True, 'import logging as logger\n'), ((8529, 8549), 'logging.exception', 'logger.exception', (['ex'], {}), '(ex)\n', (8545, 8549), True, 'import logging as logger\n'), ((12242, 12253), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (12250, 12253), True, 'import numpy as np\n'), ((12270, 12281), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (12278, 12281), True, 'import numpy as np\n'), ((12316, 12333), 'numpy.vstack', 'np.vstack', (['(X, x)'], {}), '((X, x))\n', (12325, 12333), True, 'import numpy as np\n'), ((12350, 12365), 'numpy.append', 'np.append', (['Y', 'y'], {}), '(Y, y)\n', (12359, 12365), True, 'import numpy as np\n'), ((12566, 12576), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (12572, 12576), True, 'import numpy as np\n'), ((4370, 4396), 'os.path.join', 'os.path.join', (['LIBS', 'isa', 'x'], {}), '(LIBS, isa, x)\n', (4382, 4396), False, 'import os\n'), ((4490, 4531), 'os.path.join', 'os.path.join', (['conf.libraries', 'apk_id', 'isa'], {}), '(conf.libraries, apk_id, isa)\n', (4502, 4531), False, 'import os\n'), ((4533, 4556), 'os.path.join', 'os.path.join', (['LIBS', 'isa'], {}), '(LIBS, isa)\n', (4545, 4556), False, 'import os\n')] |
import torch
import torch.nn as nn
import numpy as np
from sklearn.linear_model import LogisticRegression as Logit
from scipy.optimize import brentq
from models.layers import StochasticLinear as SLinear
from models.layers import NotStochasticLinear as Linear
from models.layers import BoundedStochasticModel
class SMiniWikiModel(BoundedStochasticModel):
def __init__(self, input_dim, output_dim, radius):
super().__init__(radius, mag_input=1)
self.names = ('layer1',)
self.layer1 = SLinear(input_dim, output_dim, bias=False)
def forward(self, x):
scale = self.get_scale()
return self.layer1(x, scale)
class MiniWikiModel(BoundedStochasticModel):
def __init__(self, input_dim, output_dim, radius):
super().__init__(radius, mag_input=1)
self.names = ('layer1',)
self.layer1 = Linear(input_dim, output_dim, bias=False)
def forward(self, x):
scale = self.get_scale()
return self.layer1(x, scale)
class LogReg(nn.Module):
def __init__(self, input_dim, output_dim):
super(LogReg, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.linear = torch.nn.Linear(input_dim, output_dim, bias=False)
def forward(self, x):
return self.linear(x)
class ConstrainedLogit(Logit):
def __init__(self, radius=1.0, interval=[-4, 4], **kwargs):
self.radius = radius
self.interval = interval
self.kwargs = kwargs
def logit(self, logC, X, Y):
super().__init__(C=10 ** logC, fit_intercept=False, **self.kwargs, max_iter=1000)
super().fit(X, Y)
return np.linalg.norm(self.coef_) - self.radius
def fit(self, X, Y):
brentq(self.logit, *self.interval, args=(X, Y), full_output=True)
| [
"scipy.optimize.brentq",
"models.layers.StochasticLinear",
"torch.nn.Linear",
"numpy.linalg.norm",
"models.layers.NotStochasticLinear"
] | [((512, 554), 'models.layers.StochasticLinear', 'SLinear', (['input_dim', 'output_dim'], {'bias': '(False)'}), '(input_dim, output_dim, bias=False)\n', (519, 554), True, 'from models.layers import StochasticLinear as SLinear\n'), ((855, 896), 'models.layers.NotStochasticLinear', 'Linear', (['input_dim', 'output_dim'], {'bias': '(False)'}), '(input_dim, output_dim, bias=False)\n', (861, 896), True, 'from models.layers import NotStochasticLinear as Linear\n'), ((1201, 1251), 'torch.nn.Linear', 'torch.nn.Linear', (['input_dim', 'output_dim'], {'bias': '(False)'}), '(input_dim, output_dim, bias=False)\n', (1216, 1251), False, 'import torch\n'), ((1737, 1802), 'scipy.optimize.brentq', 'brentq', (['self.logit', '*self.interval'], {'args': '(X, Y)', 'full_output': '(True)'}), '(self.logit, *self.interval, args=(X, Y), full_output=True)\n', (1743, 1802), False, 'from scipy.optimize import brentq\n'), ((1662, 1688), 'numpy.linalg.norm', 'np.linalg.norm', (['self.coef_'], {}), '(self.coef_)\n', (1676, 1688), True, 'import numpy as np\n')] |
import numpy as np
class surface:
def __init__(self, vertices=[[0.,0.,0.], [1.,0.,0.], [0.,1.,0.]], reflectivity=1.):
if (type(vertices) != list):
raise ValueError("vertices must be of type list")
if (len(vertices) != 3):
raise ValueError("Surface must have exactly 3 vertices")
if (len(vertices[0]) != 3) or (len(vertices[1]) != 3) or (len(vertices[2]) != 3):
raise ValueError("Vertices must be a 3D vector (length 3)")
self.vertices = np.array(vertices)
self.reflectivity = reflectivity
self.normalVector = np.cross(self.vertices[1]-self.vertices[0], self.vertices[2]-self.vertices[0])
self.normalVector *= 1./np.dot(self.normalVector, self.normalVector)
#Indices for longest line on surface
self.longestEdge = self.longestEdgeIndeces()
print("Longest edge from vertex {} ({}) to vertex {} ({})".format(self.longestEdge[0], self.vertices[self.longestEdge[0]], self.longestEdge[1], self.vertices[self.longestEdge[1]]))
#Leftover index which is not on longest line
self.vertexShortIndex = [x for x in range(3) if x not in self.longestEdge][0]
self.vertexShort = self.vertices[self.vertexShortIndex]
print("Vertex not on longest edge: {} ({})".format(self.vertexShortIndex, self.vertexShort))
#Determine leftover vertex position relative to longest line.
#0 or 1 for right-angle triangles
self.vertexShortTranslationFactor = self.translationFactor(self.vertexShort);
print("Vertex translation factor for vertex {}: {}".format(self.vertexShortIndex, self.vertexShortTranslationFactor))
self.vertexShortNormalIntersection = self.vertices[self.longestEdge[0]] + self.vertexShortTranslationFactor*(self.vertices[self.longestEdge[1]]-self.vertices[self.longestEdge[0]]);
self.vertexShortNormalVector = self.vertexShort - self.vertexShortNormalIntersection;
def dot(self, vector1, vector2):
"Dot product for the inner-most array elements"
return np.sum(vector1*vector2, axis=-1)
def mag(self, vector):
return np.sqrt(self.dot(vector, vector))
#Return indices of vertices which form the longest line between them
def longestEdgeIndeces(self):
edgeVectors = self.vertices - np.roll(self.vertices, -1, axis=0)
edgeLengths = np.array([self.dot(edgeVectors[0], edgeVectors[0]), self.dot(edgeVectors[1], edgeVectors[1]), self.dot(edgeVectors[2], edgeVectors[2])])
argMax = np.argmax(edgeLengths)
return [argMax, (argMax+1)%3]
def translationFactor(self, coord):
x = self.vertices[self.longestEdge[0]]
y = self.vertices[self.longestEdge[1]]
z = coord
solution = self.dot(y-x, z-x)/self.dot(y-x, y-x)
return solution
def getNormalVector(self, vectorStart, vectorFinish, coord):
x = vectorStart
y = vectorFinish
z = coord
translationFactor = self.dot(y-x, z-x)/self.dot(y-x, y-x)
if type(translationFactor) != np.ndarray:
coordIntersect = x + translationFactor*(y-x)
else:
coordIntersect = x + translationFactor[:,None]*(y-x)[None,:]
return coord - coordIntersect
def intersectSurface(self, vectorOrigin, vectorDirection, intensity):
'''
Determine whether an incident vector intersects this surface.
vectorOrigin and vectorDirection must be of the 2D form:
[[a1,a2,a3],[b1,b2,b3],[b1,b2,b3],...,[h1,h2,h3]]
'''
if vectorOrigin.ndim == 1:
raise ValueError("Input vectors must be 2D")
indices = np.arange(len(vectorOrigin))
intersectionPoints, conditionIntersect = self.intersect(vectorOrigin, vectorDirection)
indices = indices[conditionIntersect]
intersectionPoints = intersectionPoints[conditionIntersect]
if len(indices) > 0:
#Calculate new direction once reflected
translationFactor = self.dot(vectorOrigin[conditionIntersect]-2*intersectionPoints, self.normalVector)/self.mag(self.normalVector)
vectorDirectionNew = vectorDirection[conditionIntersect] + 2*self.normalVector[None,:]*np.sign(self.dot(-self.normalVector, vectorDirection[conditionIntersect]))[:,None]
vectorOrigin[indices] = intersectionPoints
vectorDirection[indices] = vectorDirectionNew
intensity[indices] *= self.reflectivity
return vectorOrigin, vectorDirection, intensity
def intersectionPointOnPlane(self, vectorOrigin, vectorDirection):
'''
This surface is defined on a 2D plane. For a given vector, see where the intersection point
is with the plane.
This can be used to determine whether the intersection point is on the actual surface.
'''
# Remove points which are parallel to the plane and won't intrsect
conditionParallel = self.dot(vectorDirection, self.normalVector) == 0.
vectorOrigin[conditionParallel] *= np.nan
vectorDirection[conditionParallel] *= np.nan
translationFactor = self.dot(self.vertices[0]-vectorOrigin, self.normalVector)/self.dot(vectorDirection, self.normalVector)
return vectorOrigin + translationFactor[...,None]*vectorDirection, np.invert(conditionParallel)
def intersect(self, vectorOrigin, vectorDirection):
intersectionPoints, condition0 = self.intersectionPointOnPlane(vectorOrigin, vectorDirection)
translationFactor = self.translationFactor(intersectionPoints)
normalVector = intersectionPoints - self.vertexShortNormalIntersection
vec0 = self.getNormalVector(self.vertices[1], self.vertices[2], self.vertices[0])
vec1 = self.getNormalVector(self.vertices[0], self.vertices[2], self.vertices[1])
vec2 = self.getNormalVector(self.vertices[1], self.vertices[0], self.vertices[2])
vecIntersectionPoint0 = self.getNormalVector(self.vertices[1], self.vertices[2], intersectionPoints)
vecIntersectionPoint1 = self.getNormalVector(self.vertices[0], self.vertices[2], intersectionPoints)
vecIntersectionPoint2 = self.getNormalVector(self.vertices[1], self.vertices[0], intersectionPoints)
condition1 = self.dot(vecIntersectionPoint0, vec0) >= 0
condition2 = self.dot(vecIntersectionPoint1, vec1) >= 0
condition3 = self.dot(vecIntersectionPoint2, vec2) >= 0
return intersectionPoints, condition0*condition1*condition2*condition3 | [
"numpy.roll",
"numpy.cross",
"numpy.argmax",
"numpy.invert",
"numpy.sum",
"numpy.array",
"numpy.dot"
] | [((550, 568), 'numpy.array', 'np.array', (['vertices'], {}), '(vertices)\n', (558, 568), True, 'import numpy as np\n'), ((650, 737), 'numpy.cross', 'np.cross', (['(self.vertices[1] - self.vertices[0])', '(self.vertices[2] - self.vertices[0])'], {}), '(self.vertices[1] - self.vertices[0], self.vertices[2] - self.\n vertices[0])\n', (658, 737), True, 'import numpy as np\n'), ((2175, 2209), 'numpy.sum', 'np.sum', (['(vector1 * vector2)'], {'axis': '(-1)'}), '(vector1 * vector2, axis=-1)\n', (2181, 2209), True, 'import numpy as np\n'), ((2659, 2681), 'numpy.argmax', 'np.argmax', (['edgeLengths'], {}), '(edgeLengths)\n', (2668, 2681), True, 'import numpy as np\n'), ((762, 806), 'numpy.dot', 'np.dot', (['self.normalVector', 'self.normalVector'], {}), '(self.normalVector, self.normalVector)\n', (768, 806), True, 'import numpy as np\n'), ((2446, 2480), 'numpy.roll', 'np.roll', (['self.vertices', '(-1)'], {'axis': '(0)'}), '(self.vertices, -1, axis=0)\n', (2453, 2480), True, 'import numpy as np\n'), ((5631, 5659), 'numpy.invert', 'np.invert', (['conditionParallel'], {}), '(conditionParallel)\n', (5640, 5659), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# @Brief: 实现模型分类的网络,MAML与网络结构无关,重点在训练过程
from tensorflow.keras import layers, models, losses
import tensorflow as tf
import numpy as np
class MAML:
def __init__(self, input_shape, num_classes):
"""
MAML模型类,需要两个模型,一个是作为真实更新的权重θ,另一个是用来做θ'的更新
:param input_shape: 模型输入shape
:param num_classes: 分类数目
"""
self.input_shape = input_shape
self.num_classes = num_classes
# 因为不能修改到meta model的权重θ和梯度更新状态,所以在更新θ’时需要用另外一个模型作为载体
self.meta_model = self.get_maml_model()
self.inner_writer_step = 0
self.outer_writer_step = 0
def get_maml_model(self):
"""
建立maml模型
:return: maml model
"""
model = models.Sequential([
layers.Conv2D(filters=64, kernel_size=3, padding='same', activation="relu",
input_shape=self.input_shape),
layers.BatchNormalization(),
layers.MaxPool2D(pool_size=2, strides=2),
layers.Conv2D(filters=64, kernel_size=3, padding='same', activation="relu"),
layers.BatchNormalization(),
layers.MaxPool2D(pool_size=2, strides=2),
layers.Conv2D(filters=64, kernel_size=3, padding='same', activation="relu"),
layers.BatchNormalization(),
layers.MaxPool2D(pool_size=2, strides=2),
layers.Conv2D(filters=64, kernel_size=3, padding="same", activation="relu"),
layers.BatchNormalization(),
layers.MaxPool2D(pool_size=2, strides=2),
layers.Flatten(),
layers.Dense(self.num_classes, activation='softmax'),
])
return model
def train_on_batch(self, train_data, inner_optimizer, inner_step, outer_optimizer=None, writer=None):
"""
MAML一个batch的训练过程
:param train_data: 训练数据,以task为一个单位
:param inner_optimizer: support set对应的优化器
:param inner_step: 内部更新几个step
:param outer_optimizer: query set对应的优化器,如果对象不存在则不更新梯度
:param writer: 用于记录tensorboard
:return: batch query loss
"""
batch_acc = []
batch_loss = []
meta_support_image, meta_support_label, meta_query_image, meta_query_label = next(train_data)
for support_image, support_label, query_image, query_label in zip(meta_support_image, meta_support_label,
meta_query_image, meta_query_label):
# 用meta_weights保存一开始的权重,并将其设置为inner step模型的权重
meta_weights = self.meta_model.get_weights()
for _ in range(inner_step):
with tf.GradientTape() as tape:
logits = self.meta_model(support_image, training=True)
loss = losses.sparse_categorical_crossentropy(support_label, logits)
loss = tf.reduce_mean(loss)
acc = (np.argmax(logits, -1) == query_label).astype(np.int32).mean()
grads = tape.gradient(loss, self.meta_model.trainable_variables)
inner_optimizer.apply_gradients(zip(grads, self.meta_model.trainable_variables))
if writer:
with writer.as_default():
tf.summary.scalar('support_loss', loss, step=self.inner_writer_step)
tf.summary.scalar('support_accuracy', acc, step=self.inner_writer_step)
self.inner_writer_step += 1
# 载入support set训练完的模型权重,接下来用来计算query set的loss
with tf.GradientTape() as tape:
logits = self.meta_model(query_image, training=True)
loss = losses.sparse_categorical_crossentropy(query_label, logits)
loss = tf.reduce_mean(loss)
batch_loss.append(loss)
acc = (np.argmax(logits, -1) == query_label).astype(np.int32).mean()
batch_acc.append(acc)
# 重载最开始的权重,根据上面计算的loss计算梯度和更新方向
self.meta_model.set_weights(meta_weights)
if outer_optimizer:
grads = tape.gradient(loss, self.meta_model.trainable_variables)
outer_optimizer.apply_gradients(zip(grads, self.meta_model.trainable_variables))
if writer:
with writer.as_default():
tf.summary.scalar('query_loss', loss, step=self.outer_writer_step)
tf.summary.scalar('query_accuracy', acc, step=self.outer_writer_step)
self.outer_writer_step += 1
return np.array(batch_loss).mean(), np.array(batch_acc).mean()
| [
"tensorflow.keras.layers.Conv2D",
"numpy.argmax",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.GradientTape",
"numpy.array",
"tensorflow.keras.losses.sparse_categorical_crossentropy",
"tensorflow.keras.layers.Dense",
"tensorflow.reduce_mean",
"tensorflow.keras.layers.Flatten",
"tensor... | [((774, 883), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'padding': '"""same"""', 'activation': '"""relu"""', 'input_shape': 'self.input_shape'}), "(filters=64, kernel_size=3, padding='same', activation='relu',\n input_shape=self.input_shape)\n", (787, 883), False, 'from tensorflow.keras import layers, models, losses\n'), ((919, 946), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (944, 946), False, 'from tensorflow.keras import layers, models, losses\n'), ((960, 1000), 'tensorflow.keras.layers.MaxPool2D', 'layers.MaxPool2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (976, 1000), False, 'from tensorflow.keras import layers, models, losses\n'), ((1015, 1090), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=3, padding='same', activation='relu')\n", (1028, 1090), False, 'from tensorflow.keras import layers, models, losses\n'), ((1104, 1131), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (1129, 1131), False, 'from tensorflow.keras import layers, models, losses\n'), ((1145, 1185), 'tensorflow.keras.layers.MaxPool2D', 'layers.MaxPool2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (1161, 1185), False, 'from tensorflow.keras import layers, models, losses\n'), ((1200, 1275), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=3, padding='same', activation='relu')\n", (1213, 1275), False, 'from tensorflow.keras import layers, models, losses\n'), ((1289, 1316), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (1314, 1316), False, 'from tensorflow.keras import layers, models, losses\n'), ((1330, 1370), 'tensorflow.keras.layers.MaxPool2D', 'layers.MaxPool2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (1346, 1370), False, 'from tensorflow.keras import layers, models, losses\n'), ((1385, 1460), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=3, padding='same', activation='relu')\n", (1398, 1460), False, 'from tensorflow.keras import layers, models, losses\n'), ((1474, 1501), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (1499, 1501), False, 'from tensorflow.keras import layers, models, losses\n'), ((1515, 1555), 'tensorflow.keras.layers.MaxPool2D', 'layers.MaxPool2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (1531, 1555), False, 'from tensorflow.keras import layers, models, losses\n'), ((1570, 1586), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (1584, 1586), False, 'from tensorflow.keras import layers, models, losses\n'), ((1600, 1652), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['self.num_classes'], {'activation': '"""softmax"""'}), "(self.num_classes, activation='softmax')\n", (1612, 1652), False, 'from tensorflow.keras import layers, models, losses\n'), ((3561, 3578), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3576, 3578), True, 'import tensorflow as tf\n'), ((3680, 3739), 'tensorflow.keras.losses.sparse_categorical_crossentropy', 'losses.sparse_categorical_crossentropy', (['query_label', 'logits'], {}), '(query_label, logits)\n', (3718, 3739), False, 'from tensorflow.keras import layers, models, losses\n'), ((3763, 3783), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (3777, 3783), True, 'import tensorflow as tf\n'), ((2662, 2679), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2677, 2679), True, 'import tensorflow as tf\n'), ((2791, 2852), 'tensorflow.keras.losses.sparse_categorical_crossentropy', 'losses.sparse_categorical_crossentropy', (['support_label', 'logits'], {}), '(support_label, logits)\n', (2829, 2852), False, 'from tensorflow.keras import layers, models, losses\n'), ((2880, 2900), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (2894, 2900), True, 'import tensorflow as tf\n'), ((4584, 4604), 'numpy.array', 'np.array', (['batch_loss'], {}), '(batch_loss)\n', (4592, 4604), True, 'import numpy as np\n'), ((4613, 4632), 'numpy.array', 'np.array', (['batch_acc'], {}), '(batch_acc)\n', (4621, 4632), True, 'import numpy as np\n'), ((3268, 3336), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""support_loss"""', 'loss'], {'step': 'self.inner_writer_step'}), "('support_loss', loss, step=self.inner_writer_step)\n", (3285, 3336), True, 'import tensorflow as tf\n'), ((3361, 3432), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""support_accuracy"""', 'acc'], {'step': 'self.inner_writer_step'}), "('support_accuracy', acc, step=self.inner_writer_step)\n", (3378, 3432), True, 'import tensorflow as tf\n'), ((4355, 4421), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""query_loss"""', 'loss'], {'step': 'self.outer_writer_step'}), "('query_loss', loss, step=self.outer_writer_step)\n", (4372, 4421), True, 'import tensorflow as tf\n'), ((4446, 4515), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""query_accuracy"""', 'acc'], {'step': 'self.outer_writer_step'}), "('query_accuracy', acc, step=self.outer_writer_step)\n", (4463, 4515), True, 'import tensorflow as tf\n'), ((3848, 3869), 'numpy.argmax', 'np.argmax', (['logits', '(-1)'], {}), '(logits, -1)\n', (3857, 3869), True, 'import numpy as np\n'), ((2929, 2950), 'numpy.argmax', 'np.argmax', (['logits', '(-1)'], {}), '(logits, -1)\n', (2938, 2950), True, 'import numpy as np\n')] |
import os
import numpy as np
import json
import random
from PIL import Image
from PIL import ImageDraw
import torch
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
class DatasetBase(Dataset):
"""Base dataset for VITON-GAN.
"""
def __init__(self, opt, mode, data_list, train=True):
super(DatasetBase, self).__init__()
self.data_path = os.path.join(opt.data_root, mode)
self.train = train
self.fine_height = opt.fine_height
self.fine_width = opt.fine_width
self.radius = opt.radius
self.transform = transforms.Compose([
transforms.ToTensor(), # [0,255] to [0,1]
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) # [0,1] to [-1,1]
])
person_names = []
cloth_names = []
with open(os.path.join(opt.data_root, data_list), 'r') as f:
for line in f.readlines():
person_name, cloth_name = line.strip().split()
person_names.append(person_name)
cloth_names.append(cloth_name)
self.person_names = person_names
self.cloth_names = cloth_names
def __len__(self):
return len(self.person_names)
def _get_mask_arrays(self, person_parse):
"""Split person_parse array into mask channels
"""
shape = (person_parse > 0).astype(np.float32)
head = (person_parse == 1).astype(np.float32) + \
(person_parse == 2).astype(np.float32) + \
(person_parse == 4).astype(np.float32) + \
(person_parse == 13).astype(np.float32) # Hat, Hair, Sunglasses, Face
head = (head > 0).astype(np.float32)
cloth = (person_parse == 5).astype(np.float32) + \
(person_parse == 6).astype(np.float32) + \
(person_parse == 7).astype(np.float32) # Upper-clothes, Dress, Coat
cloth = (cloth > 0).astype(np.float32)
body = (person_parse == 1).astype(np.float32) + \
(person_parse == 2).astype(np.float32) + \
(person_parse == 3).astype(np.float32) + \
(person_parse == 4).astype(np.float32) + \
(person_parse > 7).astype(np.float32) # Neither cloth nor background
body = (body > 0).astype(np.float32)
return shape, head, cloth, body # [0,1]
def _downsample(self, im):
im = im.resize((self.fine_width//16, self.fine_height//16), Image.BILINEAR)
return im.resize((self.fine_width, self.fine_height), Image.BILINEAR)
def _load_pose(self, pose_name):
"""Load pose json file
"""
with open(os.path.join(self.data_path, 'pose', pose_name), 'r') as f:
pose_label = json.load(f)
pose_data = pose_label['people'][0]['pose_keypoints']
pose_data = np.array(pose_data)
pose_data = pose_data.reshape((-1,3))
point_num = pose_data.shape[0]
feature_pose_tensor = torch.zeros(point_num, self.fine_height, self.fine_width) # 18 channels
r = self.radius
pose_im = Image.new('L', (self.fine_width, self.fine_height)) # For visualization
pose_draw = ImageDraw.Draw(pose_im)
for i in range(point_num):
one_map = Image.new('L', (self.fine_width, self.fine_height))
draw = ImageDraw.Draw(one_map)
pointx = pose_data[i,0]
pointy = pose_data[i,1]
if pointx > 1 and pointy > 1:
draw.rectangle((pointx-r, pointy-r, pointx+r, pointy+r), 'white', 'white')
pose_draw.rectangle((pointx-r, pointy-r, pointx+r, pointy+r), 'white', 'white')
one_map = self.transform(one_map)
feature_pose_tensor[i] = one_map[0]
pose_tensor = self.transform(pose_im) # [-1,1]
return feature_pose_tensor, pose_tensor
def _get_item_base(self, index):
# Person
person_name = self.person_names[index]
person_im = Image.open(os.path.join(self.data_path, 'person', person_name))
person_tensor = self.transform(person_im) # [-1,1]
# Person-parse
parse_name = person_name.replace('.jpg', '.png')
person_parse = Image.open(os.path.join(self.data_path, 'person-parse', parse_name))
person_parse = np.array(person_parse) # shape: (256,192,3)
shape_mask, head_mask, cloth_mask, body_mask = self._get_mask_arrays(person_parse)
shape_im = Image.fromarray((shape_mask*255).astype(np.uint8))
feature_shape_tensor = self.transform(self._downsample(shape_im)) # [-1,1]
head_mask_tensor = torch.from_numpy(head_mask) # [0,1]
feature_head_tensor = person_tensor * head_mask_tensor - (1 - head_mask_tensor) # [-1,1], fill -1 for other parts
cloth_mask_tensor = torch.from_numpy(cloth_mask) # [0,1]
cloth_parse_tensor = person_tensor * cloth_mask_tensor + (1 - cloth_mask_tensor) # [-1,1], fill 1 for other parts
body_mask_tensor = torch.from_numpy(body_mask).unsqueeze(0) # Tensor [0,1]
# Pose keypoints
pose_name = person_name.replace('.jpg', '_keypoints.json')
feature_pose_tensor, pose_tensor = self._load_pose(pose_name)
# Cloth-agnostic representation
feature_tensor = torch.cat([feature_shape_tensor, feature_head_tensor, feature_pose_tensor], 0)
data = {
'person_name': person_name, # For visualization or ground truth
'person': person_tensor, # For visualization or ground truth
'feature': feature_tensor, # For input
'pose': pose_tensor, # For visualization
'head': feature_head_tensor, # For visualization
'shape': feature_shape_tensor, # For visualization
'cloth_parse': cloth_parse_tensor, # For ground truth
'body_mask': body_mask_tensor # For ground truth
}
return data
def binarized_tensor(arr):
mask = (arr >= 128).astype(np.float32)
return torch.from_numpy(mask).unsqueeze(0) # [0,1]
def random_horizontal_flip(data):
rand = random.random()
if rand < 0.5:
return data
else:
for key, value in data.items():
if 'name' in key:
continue
else:
data[key] = torch.flip(value, [2]) # 2 for width
return data
class GMMDataset(DatasetBase):
def __getitem__(self, index):
cloth_name = self.cloth_names[index]
cloth_im = Image.open(os.path.join(self.data_path, 'cloth', cloth_name))
cloth_tensor = self.transform(cloth_im) # [-1,1]
cloth_mask_im = Image.open(os.path.join(self.data_path, 'cloth-mask', cloth_name))
cloth_mask_tensor = binarized_tensor(np.array(cloth_mask_im))
grid_im = Image.open('grid.png')
grid_tensor = self.transform(grid_im)
data = self._get_item_base(index)
data['cloth_name'] = cloth_name # For visualization or input
data['cloth'] = cloth_tensor # For visualization or input
data['cloth_mask'] = cloth_mask_tensor # For input
data['grid'] = grid_tensor # For visualization
if self.train:
data = random_horizontal_flip(data) # Data augmentation
return data
class TOMDataset(DatasetBase):
def __getitem__(self, index):
cloth_name = self.cloth_names[index]
cloth_im = Image.open(os.path.join(self.data_path, 'warp-cloth', cloth_name))
cloth_tensor = self.transform(cloth_im) # [-1,1]
cloth_mask_im = Image.open(os.path.join(self.data_path, 'warp-cloth-mask', cloth_name))
cloth_mask_tensor = binarized_tensor(np.array(cloth_mask_im))
data = self._get_item_base(index)
data['cloth_name'] = cloth_name # For visualization or input
data['cloth'] = cloth_tensor # For visualization or input
data['cloth_mask'] = cloth_mask_tensor # For input
if self.train:
data = random_horizontal_flip(data) # Data augmentation
return data | [
"PIL.Image.open",
"PIL.Image.new",
"os.path.join",
"torch.from_numpy",
"numpy.array",
"PIL.ImageDraw.Draw",
"torch.flip",
"torchvision.transforms.Normalize",
"json.load",
"random.random",
"torchvision.transforms.ToTensor",
"torch.zeros",
"torch.cat"
] | [((6153, 6168), 'random.random', 'random.random', ([], {}), '()\n', (6166, 6168), False, 'import random\n'), ((408, 441), 'os.path.join', 'os.path.join', (['opt.data_root', 'mode'], {}), '(opt.data_root, mode)\n', (420, 441), False, 'import os\n'), ((3027, 3084), 'torch.zeros', 'torch.zeros', (['point_num', 'self.fine_height', 'self.fine_width'], {}), '(point_num, self.fine_height, self.fine_width)\n', (3038, 3084), False, 'import torch\n'), ((3141, 3192), 'PIL.Image.new', 'Image.new', (['"""L"""', '(self.fine_width, self.fine_height)'], {}), "('L', (self.fine_width, self.fine_height))\n", (3150, 3192), False, 'from PIL import Image\n'), ((3233, 3256), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['pose_im'], {}), '(pose_im)\n', (3247, 3256), False, 'from PIL import ImageDraw\n'), ((4349, 4371), 'numpy.array', 'np.array', (['person_parse'], {}), '(person_parse)\n', (4357, 4371), True, 'import numpy as np\n'), ((4664, 4691), 'torch.from_numpy', 'torch.from_numpy', (['head_mask'], {}), '(head_mask)\n', (4680, 4691), False, 'import torch\n'), ((4850, 4878), 'torch.from_numpy', 'torch.from_numpy', (['cloth_mask'], {}), '(cloth_mask)\n', (4866, 4878), False, 'import torch\n'), ((5328, 5406), 'torch.cat', 'torch.cat', (['[feature_shape_tensor, feature_head_tensor, feature_pose_tensor]', '(0)'], {}), '([feature_shape_tensor, feature_head_tensor, feature_pose_tensor], 0)\n', (5337, 5406), False, 'import torch\n'), ((6841, 6863), 'PIL.Image.open', 'Image.open', (['"""grid.png"""'], {}), "('grid.png')\n", (6851, 6863), False, 'from PIL import Image\n'), ((2785, 2797), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2794, 2797), False, 'import json\n'), ((2888, 2907), 'numpy.array', 'np.array', (['pose_data'], {}), '(pose_data)\n', (2896, 2907), True, 'import numpy as np\n'), ((3314, 3365), 'PIL.Image.new', 'Image.new', (['"""L"""', '(self.fine_width, self.fine_height)'], {}), "('L', (self.fine_width, self.fine_height))\n", (3323, 3365), False, 'from PIL import Image\n'), ((3385, 3408), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['one_map'], {}), '(one_map)\n', (3399, 3408), False, 'from PIL import ImageDraw\n'), ((4041, 4092), 'os.path.join', 'os.path.join', (['self.data_path', '"""person"""', 'person_name'], {}), "(self.data_path, 'person', person_name)\n", (4053, 4092), False, 'import os\n'), ((4268, 4324), 'os.path.join', 'os.path.join', (['self.data_path', '"""person-parse"""', 'parse_name'], {}), "(self.data_path, 'person-parse', parse_name)\n", (4280, 4324), False, 'import os\n'), ((6063, 6085), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (6079, 6085), False, 'import torch\n'), ((6553, 6602), 'os.path.join', 'os.path.join', (['self.data_path', '"""cloth"""', 'cloth_name'], {}), "(self.data_path, 'cloth', cloth_name)\n", (6565, 6602), False, 'import os\n'), ((6697, 6751), 'os.path.join', 'os.path.join', (['self.data_path', '"""cloth-mask"""', 'cloth_name'], {}), "(self.data_path, 'cloth-mask', cloth_name)\n", (6709, 6751), False, 'import os\n'), ((6798, 6821), 'numpy.array', 'np.array', (['cloth_mask_im'], {}), '(cloth_mask_im)\n', (6806, 6821), True, 'import numpy as np\n'), ((7455, 7509), 'os.path.join', 'os.path.join', (['self.data_path', '"""warp-cloth"""', 'cloth_name'], {}), "(self.data_path, 'warp-cloth', cloth_name)\n", (7467, 7509), False, 'import os\n'), ((7604, 7663), 'os.path.join', 'os.path.join', (['self.data_path', '"""warp-cloth-mask"""', 'cloth_name'], {}), "(self.data_path, 'warp-cloth-mask', cloth_name)\n", (7616, 7663), False, 'import os\n'), ((7710, 7733), 'numpy.array', 'np.array', (['cloth_mask_im'], {}), '(cloth_mask_im)\n', (7718, 7733), True, 'import numpy as np\n'), ((648, 669), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (667, 669), True, 'import torchvision.transforms as transforms\n'), ((706, 760), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (726, 760), True, 'import torchvision.transforms as transforms\n'), ((877, 915), 'os.path.join', 'os.path.join', (['opt.data_root', 'data_list'], {}), '(opt.data_root, data_list)\n', (889, 915), False, 'import os\n'), ((2700, 2747), 'os.path.join', 'os.path.join', (['self.data_path', '"""pose"""', 'pose_name'], {}), "(self.data_path, 'pose', pose_name)\n", (2712, 2747), False, 'import os\n'), ((5036, 5063), 'torch.from_numpy', 'torch.from_numpy', (['body_mask'], {}), '(body_mask)\n', (5052, 5063), False, 'import torch\n'), ((6359, 6381), 'torch.flip', 'torch.flip', (['value', '[2]'], {}), '(value, [2])\n', (6369, 6381), False, 'import torch\n')] |
"""
This is for Kaggle's Northeastern SMILE Lab - Recognizing Faces in the Wild playground competition:
https://www.kaggle.com/c/recognizing-faces-in-the-wild
The general model will be to create feature vectors of each face, then
compare their Euclidean distance to get a value.
I will use a second NN to make the final prediction of the feature vector differences.
It will determine the "distance" that two faces must be to be kin.
Written by <NAME>
bl<EMAIL>.ch
"""
import os
import sys
import csv
import time
import random
from itertools import combinations
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from keras import regularizers
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.python.keras.preprocessing.image import img_to_array, load_img
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Dense, Dropout
from keras_vggface.vggface import VGGFace # Installed from pip via git, not direct from pip.
# Define a bunch of stuff. -------------------------------------
dataset_dir = os.path.join('E:\\', 'datasets', 'kaggle-recognizing-faces-in-the-wild')
test_files = os.path.join(dataset_dir, 'test')
train_files = os.path.join(dataset_dir, 'train')
submission_pairs_file = os.path.join(dataset_dir, 'sample_submission.csv')
train_relationships_file = os.path.join(dataset_dir, 'train_relationships.csv')
output_file = '.\\predictions.csv'
model_file = '.\\comparison_model_weights.h5'
img_rows = 224
img_cols = 224
img_channels = 3
num_categories = 2 # Kin, or not-kin
# --------------------------------------------------------------
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1(y_true, y_pred):
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
return 2*((p*r)/(p+r+K.epsilon()))
def read_model(filename):
print('Reading previous model...')
if os.path.exists(filename):
print()
custom_obs = {'recall':recall, 'precision':precision, 'f1':f1}
return load_model(filename, custom_objects=custom_obs)
else:
print('Previous model not found.\n')
return None
def read_csv_file(filename):
print('Reading data from ' + filename + '\n')
if not os.path.exists(filename):
return None
return pd.read_csv(filename)
def picture_to_tensor(filename):
img = img_to_array(load_img(filename)) # Returns 3D np array.
return img.reshape([1, img_rows, img_cols, img_channels])
def get_feature_vectors(model, file_paths, full_path_as_key):
print('Creating feature vectors...')
start = time.time()
features = {}
num_features = -1
for f in file_paths:
feat_vec = model.predict(picture_to_tensor(f))[0] # Returns array of arrays with one element
num_features = len(feat_vec)
key = None
split_path = f.split(os.sep)
family, person, pic = split_path[len(split_path) - 3:]
if full_path_as_key:
key = os.path.join(family, person, pic)
else:
key = pic
features[key] = feat_vec
print('Calculation time elapsed: ' + str(int(time.time() - start)) + ' seconds.\n')
return features, num_features
def merge_feature_vectors(feature_vectors, num_features):
# feature_vectors is a dict where key=<path\filename> and value=<ndarray of features>
# Get all vectors aggregated by person.
averaged_vectors = {}
for f in feature_vectors.keys():
person = os.path.dirname(f) # Gets <family>\<person>
if person not in averaged_vectors:
averaged_vectors[person] = []
averaged_vectors[person].append(feature_vectors[f])
# Average each vector by person.
for p in averaged_vectors.keys():
vect_sum = averaged_vectors[p].pop(0) # Set equal to first vector.
count = 1
for feat_vec in averaged_vectors[p]:
vect_sum += feat_vec
count += 1
averaged_vectors[p] = vect_sum / count
filename = os.path.join(dataset_dir, 'VGGFace_avg_feature_vectors_training.csv')
save_feature_vectors(averaged_vectors, num_features, filename)
def save_feature_vectors(feature_vectors, num_features, filename):
print('Saving feature vectors to file...\n')
with open(filename, 'w', newline='') as csvfile:
w = csv.writer(csvfile)
w.writerow(['ImageFileName'] + ['Feature'+str(i) for i in range(num_features)])
for key in feature_vectors.keys():
w.writerow([key] + feature_vectors[key].tolist())
def prep_model(num_features):
print('Prepping model...\n')
model = Sequential()
model.add(Dense(
50, # CHANGED
activation='relu',
kernel_regularizer=regularizers.l2(0.01), # CHANGED
input_shape=(num_features,)
))
'''model.add(Dense( # CHANGED
10,
activation='relu'
))'''
model.add(Dropout(0.05)) # CHANGED
model.add(Dense( # Output layer.
num_categories, # kin or not kin.
activation='softmax'
))
# Compile the model.
model.compile(
# optimizer=SGD(lr=0.1, decay=1e-6), # Default learning rate is 0.01.
# optimizer=Adam(lr=0.01, decay=1e-6), # Default learning rate is 0.001.
optimizer='adam',
loss='categorical_crossentropy',
metrics=[recall, precision, f1] #, 'accuracy'] # accuracy is not the best metric for imbalanced classes.
# TODO : the values for these metrics are always the same...
)
return model
def train_model(model, feature_dict, num_features, kin_combinations):
print('Training model...\n')
all_combinations = list(combinations(feature_dict.keys(), 2)) # Generate all pairwise people combinations.
non_kin_combinations = { (pair if pair not in kin_combinations else None):False for pair in all_combinations}
del non_kin_combinations[None], all_combinations
non_kin_combinations = list(non_kin_combinations)
kin_combinations_lst = list(kin_combinations.keys())
num_kin_relations = len(kin_combinations_lst)
pairs_per_iteration = 100000 # num_kin_relations * 2
num_non_kin_per_iteration = pairs_per_iteration - num_kin_relations
num_iterations = 1 + (len(non_kin_combinations) // num_non_kin_per_iteration)
validation_split = 0.1 # 10%
reduce_lr = ReduceLROnPlateau( # CHANGED
monitor='val_f1',
factor=0.5,
patience=5
)
start = time.time()
for iteration in range(num_iterations):
print('\n\tIteration ' + str(iteration + 1) + '/' + str(num_iterations))
print('\t\tPrepping data...')
# Get the selection of images for this iteration.
current_pairs = non_kin_combinations[iteration*num_non_kin_per_iteration:(iteration+1)*num_non_kin_per_iteration]
'''# Non-kin relationship labels.
valid_pairs = current_pairs[int(len(current_pairs)*(1-validation_split)):len(current_pairs)]
del current_pairs[int(len(current_pairs)*(1-validation_split)):len(current_pairs)]
# Kin relationship labels.'''
random.shuffle(kin_combinations_lst)
current_pairs.extend(kin_combinations_lst)
'''valid_pairs.extend(kin_combinations_lst[int(len(kin_combinations_lst)*(1-validation_split)):len(kin_combinations_lst)])
del kin_combinations_lst[int(len(kin_combinations_lst)*(1-validation_split)):len(kin_combinations_lst)]'''
random.shuffle(current_pairs)
# Create the list of relatives, and the difference vector list.
diff_vectors = []
relations = []
for pair in current_pairs:
if pair[0] not in feature_dict or pair[1] not in feature_dict:
continue
diff_vectors.append(feature_dict[pair[0]] - feature_dict[pair[1]])
if kin_combinations.get((pair[0], pair[1]), False) or kin_combinations.get((pair[1], pair[0]), False):
relations.append(1)
else:
relations.append(0)
'''valid_vectors = []
valid_rels = []
for pair in valid_pairs:
if pair[0] not in feature_dict or pair[1] not in feature_dict:
continue
valid_vectors.append(feature_dict[pair[0]] - feature_dict[pair[1]])
if kin_combinations.get((pair[0], pair[1]), False) or kin_combinations.get((pair[1], pair[0]), False):
valid_rels.append(1)
else:
valid_rels.append(0)'''
diff_vectors = np.stack(diff_vectors)
relations = to_categorical(relations, num_categories)
'''valid_vectors = np.stack(valid_vectors)
valid_rels = to_categorical(valid_rels, num_categories)'''
class_weight = {
0:1.0,
1:(len(current_pairs)-num_kin_relations)/num_kin_relations
}
# Run training.
print('\t\tTraining...')
model.fit(
diff_vectors,
relations,
batch_size = 100,
epochs = 10,
class_weight = class_weight,
callbacks=[reduce_lr],
# validation_data = (valid_vectors, valid_rels)
validation_split = validation_split
)
total = int(time.time() - start)
print('\nTraining time elapsed: ' + str(total) + ' seconds.')
print('Training time per iteration: ' + str(total / num_iterations) + ' seconds.')
return model
def save_model(model, filename):
print('Saving model to file...\n')
if not os.path.exists(filename):
model.save(filename)
def make_predictions(model, feature_vectors, image_pairs):
print('Making predictions...\n')
preds = {
'img_pair':[],
'is_related':[]
}
for i in range(len(image_pairs)):
image_1, image_2 = image_pairs[i].split('-')
diff_vector = feature_vectors[image_1] - feature_vectors[image_2]
prediction = model.predict(diff_vector.reshape((1, len(diff_vector)))) # Returns vector of probabilities.
preds['img_pair'].append(image_pairs[i])
preds['is_related'].append(prediction[0][1]) # Always get prediction of kin class; don't care about non-kin.
# preds['is_related'].append(np.argmax(prediction, axis=1)[0])
return pd.DataFrame(preds)
def save_predictions(preds):
print('Saving predictions...\n')
preds.to_csv(output_file, index = False)
if __name__ == '__main__':
'''
Initial plan:
1. Use pre-trained face model - keras_vggface
2. Iterate through all training faces to create their feature vectors (save these)
3. Create smaller NN where the inputs are the raw differences of the feature vectors
4. Train NN with ALL facial combinations, noting which are actually related
-there are ~76 million combinations...how can I reduce this?
-what if I just use one from each person instead of all photos?
-what if I average the vectors from each person? (~2.6 million combinations)
5. Run all test images through CNN to generate their feature vectors
6. Run all test feature vectors/distances through NN model to predict kinship
'''
# Suppress some tensorflow INFO, WARNING, and ERROR messages
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Load pre-trained facial model. Step 1.
vggface = VGGFace() # Uses default vgg16 model.
print()
# Get and prep training images and create feature vectors. Step 2.
train_relationships = read_csv_file(train_relationships_file)
train_rels_dict = {}
print('Gathering kin relationships...\n')
for i, row in train_relationships.iterrows(): # Store kin relationships for quick reference.
p1 = row['p1'].replace('/', os.sep)
p2 = row['p2'].replace('/', os.sep)
train_rels_dict[(p1, p2)] = True
# train_rels_dict[(p2, p1)] = True # CHANGED
# Check to see if the feature vectors have already been calculated.
feature_vector_file = os.path.join(dataset_dir, 'VGGFace_avg_feature_vectors_training.csv')
feature_vectors = read_csv_file(feature_vector_file)
num_features = -1
if feature_vectors is None:
print('Training feature vector file not found.\n')
training_image_paths = []
for folder in os.walk(train_files): # Get all training image paths.
training_image_paths.extend([os.path.join(folder[0], file) for file in folder[2]])
feature_vectors, num_features = get_feature_vectors(vggface, training_image_paths, True)
merge_feature_vectors(feature_vectors, num_features)
del training_image_paths
else: # Convert to dict.
print('Reading stored training feature vectors...\n')
labels = feature_vectors['ImageFileName']
num_features = len(feature_vectors.columns) - 1
temp_vectors = feature_vectors.drop('ImageFileName', axis=1).to_numpy()
feature_vectors = {labels[i]: temp_vectors[i] for i in range(len(temp_vectors))}
del temp_vectors, labels
# Create comparison model. Step 3.
comp_model = read_model(model_file)
if comp_model is None:
# Train comparison model. Step 4.
comp_model = prep_model(num_features)
comp_model = train_model(comp_model, feature_vectors, num_features, train_rels_dict)
del feature_vectors
del train_rels_dict
save_model(comp_model, model_file)
# Get and prep test images and create feature vectors. Step 5.
feature_vector_file = os.path.join(dataset_dir, 'VGGFace_feature_vectors_test.csv')
feature_vectors = read_csv_file(feature_vector_file)
if feature_vectors is None:
print('Test feature vector file not found.\n')
test_image_paths = []
for folder in os.walk(test_files): # Get all training image paths.
test_image_paths.extend([os.path.join(folder[0], file) for file in folder[2]])
feature_vectors, num_features = get_feature_vectors(vggface, test_image_paths, False)
save_feature_vectors(feature_vectors, num_features, feature_vector_file)
del test_image_paths
else:
print('Reading stored test feature vectors...\n')
labels = feature_vectors['ImageFileName']
num_features = len(feature_vectors.columns) - 1
temp_vectors = feature_vectors.drop('ImageFileName', axis=1).to_numpy()
feature_vectors = {labels[i]: temp_vectors[i] for i in range(len(temp_vectors))}
del temp_vectors, labels
# Make predictions. Step 6.
pairs = read_csv_file(submission_pairs_file)['img_pair']
preds = make_predictions(comp_model, feature_vectors, pairs)
save_predictions(preds)
| [
"tensorflow.keras.backend.epsilon",
"pandas.read_csv",
"tensorflow.python.keras.preprocessing.image.load_img",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.load_model",
"os.walk",
"os.path.exists",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"numpy.stack",
"pandas.DataFrame",
"t... | [((1253, 1325), 'os.path.join', 'os.path.join', (['"""E:\\\\"""', '"""datasets"""', '"""kaggle-recognizing-faces-in-the-wild"""'], {}), "('E:\\\\', 'datasets', 'kaggle-recognizing-faces-in-the-wild')\n", (1265, 1325), False, 'import os\n'), ((1339, 1372), 'os.path.join', 'os.path.join', (['dataset_dir', '"""test"""'], {}), "(dataset_dir, 'test')\n", (1351, 1372), False, 'import os\n'), ((1387, 1421), 'os.path.join', 'os.path.join', (['dataset_dir', '"""train"""'], {}), "(dataset_dir, 'train')\n", (1399, 1421), False, 'import os\n'), ((1446, 1496), 'os.path.join', 'os.path.join', (['dataset_dir', '"""sample_submission.csv"""'], {}), "(dataset_dir, 'sample_submission.csv')\n", (1458, 1496), False, 'import os\n'), ((1524, 1576), 'os.path.join', 'os.path.join', (['dataset_dir', '"""train_relationships.csv"""'], {}), "(dataset_dir, 'train_relationships.csv')\n", (1536, 1576), False, 'import os\n'), ((2509, 2533), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (2523, 2533), False, 'import os\n'), ((2920, 2941), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (2931, 2941), True, 'import pandas as pd\n'), ((3235, 3246), 'time.time', 'time.time', ([], {}), '()\n', (3244, 3246), False, 'import time\n'), ((4682, 4751), 'os.path.join', 'os.path.join', (['dataset_dir', '"""VGGFace_avg_feature_vectors_training.csv"""'], {}), "(dataset_dir, 'VGGFace_avg_feature_vectors_training.csv')\n", (4694, 4751), False, 'import os\n'), ((5318, 5330), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (5328, 5330), False, 'from tensorflow.keras.models import Sequential, load_model\n'), ((7042, 7101), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_f1"""', 'factor': '(0.5)', 'patience': '(5)'}), "(monitor='val_f1', factor=0.5, patience=5)\n", (7059, 7101), False, 'from tensorflow.keras.callbacks import ReduceLROnPlateau\n'), ((7156, 7167), 'time.time', 'time.time', ([], {}), '()\n', (7165, 7167), False, 'import time\n'), ((11018, 11037), 'pandas.DataFrame', 'pd.DataFrame', (['preds'], {}), '(preds)\n', (11030, 11037), True, 'import pandas as pd\n'), ((12078, 12087), 'keras_vggface.vggface.VGGFace', 'VGGFace', ([], {}), '()\n', (12085, 12087), False, 'from keras_vggface.vggface import VGGFace\n'), ((12718, 12787), 'os.path.join', 'os.path.join', (['dataset_dir', '"""VGGFace_avg_feature_vectors_training.csv"""'], {}), "(dataset_dir, 'VGGFace_avg_feature_vectors_training.csv')\n", (12730, 12787), False, 'import os\n'), ((14240, 14301), 'os.path.join', 'os.path.join', (['dataset_dir', '"""VGGFace_feature_vectors_test.csv"""'], {}), "(dataset_dir, 'VGGFace_feature_vectors_test.csv')\n", (14252, 14301), False, 'import os\n'), ((2637, 2684), 'tensorflow.keras.models.load_model', 'load_model', (['filename'], {'custom_objects': 'custom_obs'}), '(filename, custom_objects=custom_obs)\n', (2647, 2684), False, 'from tensorflow.keras.models import Sequential, load_model\n'), ((2862, 2886), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (2876, 2886), False, 'import os\n'), ((3005, 3023), 'tensorflow.python.keras.preprocessing.image.load_img', 'load_img', (['filename'], {}), '(filename)\n', (3013, 3023), False, 'from tensorflow.python.keras.preprocessing.image import img_to_array, load_img\n'), ((4140, 4158), 'os.path.dirname', 'os.path.dirname', (['f'], {}), '(f)\n', (4155, 4158), False, 'import os\n'), ((5013, 5032), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (5023, 5032), False, 'import csv\n'), ((5604, 5617), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.05)'], {}), '(0.05)\n', (5611, 5617), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((5644, 5687), 'tensorflow.keras.layers.Dense', 'Dense', (['num_categories'], {'activation': '"""softmax"""'}), "(num_categories, activation='softmax')\n", (5649, 5687), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((7810, 7846), 'random.shuffle', 'random.shuffle', (['kin_combinations_lst'], {}), '(kin_combinations_lst)\n', (7824, 7846), False, 'import random\n'), ((8152, 8181), 'random.shuffle', 'random.shuffle', (['current_pairs'], {}), '(current_pairs)\n', (8166, 8181), False, 'import random\n'), ((9249, 9271), 'numpy.stack', 'np.stack', (['diff_vectors'], {}), '(diff_vectors)\n', (9257, 9271), True, 'import numpy as np\n'), ((9292, 9333), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['relations', 'num_categories'], {}), '(relations, num_categories)\n', (9306, 9333), False, 'from tensorflow.keras.utils import to_categorical\n'), ((10253, 10277), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (10267, 10277), False, 'import os\n'), ((13014, 13034), 'os.walk', 'os.walk', (['train_files'], {}), '(train_files)\n', (13021, 13034), False, 'import os\n'), ((14498, 14517), 'os.walk', 'os.walk', (['test_files'], {}), '(test_files)\n', (14505, 14517), False, 'import os\n'), ((1874, 1903), 'tensorflow.keras.backend.clip', 'K.clip', (['(y_true * y_pred)', '(0)', '(1)'], {}), '(y_true * y_pred, 0, 1)\n', (1880, 1903), True, 'from tensorflow.keras import backend as K\n'), ((1945, 1965), 'tensorflow.keras.backend.clip', 'K.clip', (['y_true', '(0)', '(1)'], {}), '(y_true, 0, 1)\n', (1951, 1965), True, 'from tensorflow.keras import backend as K\n'), ((2020, 2031), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (2029, 2031), True, 'from tensorflow.keras import backend as K\n'), ((2119, 2148), 'tensorflow.keras.backend.clip', 'K.clip', (['(y_true * y_pred)', '(0)', '(1)'], {}), '(y_true * y_pred, 0, 1)\n', (2125, 2148), True, 'from tensorflow.keras import backend as K\n'), ((2191, 2211), 'tensorflow.keras.backend.clip', 'K.clip', (['y_pred', '(0)', '(1)'], {}), '(y_pred, 0, 1)\n', (2197, 2211), True, 'from tensorflow.keras import backend as K\n'), ((2270, 2281), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (2279, 2281), True, 'from tensorflow.keras import backend as K\n'), ((3617, 3650), 'os.path.join', 'os.path.join', (['family', 'person', 'pic'], {}), '(family, person, pic)\n', (3629, 3650), False, 'import os\n'), ((9974, 9985), 'time.time', 'time.time', ([], {}), '()\n', (9983, 9985), False, 'import time\n'), ((2420, 2431), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (2429, 2431), True, 'from tensorflow.keras import backend as K\n'), ((5430, 5451), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (5445, 5451), False, 'from keras import regularizers\n'), ((13110, 13139), 'os.path.join', 'os.path.join', (['folder[0]', 'file'], {}), '(folder[0], file)\n', (13122, 13139), False, 'import os\n'), ((14589, 14618), 'os.path.join', 'os.path.join', (['folder[0]', 'file'], {}), '(folder[0], file)\n', (14601, 14618), False, 'import os\n'), ((3778, 3789), 'time.time', 'time.time', ([], {}), '()\n', (3787, 3789), False, 'import time\n')] |
import mmcv
import numpy as np
import pytest
from os import path as osp
from mmdet3d.core.bbox import DepthInstance3DBoxes
from mmdet3d.datasets.pipelines import (LoadAnnotations3D, LoadPointsFromFile,
LoadPointsFromMultiSweeps)
def test_load_points_from_indoor_file():
sunrgbd_info = mmcv.load('./tests/data/sunrgbd/sunrgbd_infos.pkl')
sunrgbd_load_points_from_file = LoadPointsFromFile(6, shift_height=True)
sunrgbd_results = dict()
data_path = './tests/data/sunrgbd'
sunrgbd_info = sunrgbd_info[0]
sunrgbd_results['pts_filename'] = osp.join(data_path,
sunrgbd_info['pts_path'])
sunrgbd_results = sunrgbd_load_points_from_file(sunrgbd_results)
sunrgbd_point_cloud = sunrgbd_results['points']
assert sunrgbd_point_cloud.shape == (100, 4)
scannet_info = mmcv.load('./tests/data/scannet/scannet_infos.pkl')
scannet_load_data = LoadPointsFromFile(shift_height=True)
scannet_results = dict()
data_path = './tests/data/scannet'
scannet_info = scannet_info[0]
scannet_results['pts_filename'] = osp.join(data_path,
scannet_info['pts_path'])
scannet_results = scannet_load_data(scannet_results)
scannet_point_cloud = scannet_results['points']
repr_str = repr(scannet_load_data)
expected_repr_str = 'LoadPointsFromFile(shift_height=True, ' \
'file_client_args={\'backend\': \'disk\'}), ' \
'load_dim=6, use_dim=[0, 1, 2])'
assert repr_str == expected_repr_str
assert scannet_point_cloud.shape == (100, 4)
def test_load_points_from_outdoor_file():
data_path = 'tests/data/kitti/a.bin'
load_points_from_file = LoadPointsFromFile(4, 4)
results = dict()
results['pts_filename'] = data_path
results = load_points_from_file(results)
points = results['points']
assert points.shape == (50, 4)
assert np.allclose(points.sum(), 2637.479)
load_points_from_file = LoadPointsFromFile(4, [0, 1, 2, 3])
results = dict()
results['pts_filename'] = data_path
results = load_points_from_file(results)
new_points = results['points']
assert new_points.shape == (50, 4)
assert np.allclose(points.sum(), 2637.479)
np.equal(points, new_points)
with pytest.raises(AssertionError):
LoadPointsFromFile(4, 5)
def test_load_annotations3D():
# Test scannet LoadAnnotations3D
scannet_info = mmcv.load('./tests/data/scannet/scannet_infos.pkl')[0]
scannet_load_annotations3D = LoadAnnotations3D(
with_bbox_3d=True,
with_label_3d=True,
with_mask_3d=True,
with_seg_3d=True)
scannet_results = dict()
data_path = './tests/data/scannet'
if scannet_info['annos']['gt_num'] != 0:
scannet_gt_bboxes_3d = scannet_info['annos']['gt_boxes_upright_depth']
scannet_gt_labels_3d = scannet_info['annos']['class']
else:
scannet_gt_bboxes_3d = np.zeros((1, 6), dtype=np.float32)
scannet_gt_labels_3d = np.zeros((1, ))
# prepare input of loading pipeline
scannet_results['ann_info'] = dict()
scannet_results['ann_info']['pts_instance_mask_path'] = osp.join(
data_path, scannet_info['pts_instance_mask_path'])
scannet_results['ann_info']['pts_semantic_mask_path'] = osp.join(
data_path, scannet_info['pts_semantic_mask_path'])
scannet_results['ann_info']['gt_bboxes_3d'] = DepthInstance3DBoxes(
scannet_gt_bboxes_3d, box_dim=6, with_yaw=False)
scannet_results['ann_info']['gt_labels_3d'] = scannet_gt_labels_3d
scannet_results['bbox3d_fields'] = []
scannet_results['pts_mask_fields'] = []
scannet_results['pts_seg_fields'] = []
scannet_results = scannet_load_annotations3D(scannet_results)
scannet_gt_boxes = scannet_results['gt_bboxes_3d']
scannet_gt_lbaels = scannet_results['gt_labels_3d']
scannet_pts_instance_mask = scannet_results['pts_instance_mask']
scannet_pts_semantic_mask = scannet_results['pts_semantic_mask']
repr_str = repr(scannet_load_annotations3D)
expected_repr_str = 'LoadAnnotations3D(\n with_bbox_3d=True, ' \
'with_label_3d=True, with_mask_3d=True, ' \
'with_seg_3d=True, with_bbox=False, ' \
'with_label=False, with_mask=False, ' \
'with_seg=False, poly2mask=True)'
assert repr_str == expected_repr_str
assert scannet_gt_boxes.tensor.shape == (27, 7)
assert scannet_gt_lbaels.shape == (27, )
assert scannet_pts_instance_mask.shape == (100, )
assert scannet_pts_semantic_mask.shape == (100, )
def test_load_points_from_multi_sweeps():
load_points_from_multi_sweeps = LoadPointsFromMultiSweeps()
sweep = dict(
data_path='./tests/data/nuscenes/sweeps/LIDAR_TOP/'
'n008-2018-09-18-12-07-26-0400__LIDAR_TOP__1537287083900561.pcd.bin',
timestamp=1537290014899034,
sensor2lidar_translation=[-0.02344713, -3.88266051, -0.17151584],
sensor2lidar_rotation=np.array(
[[9.99979347e-01, 3.99870769e-04, 6.41441690e-03],
[-4.42034222e-04, 9.99978299e-01, 6.57316197e-03],
[-6.41164929e-03, -6.57586161e-03, 9.99957824e-01]]))
results = dict(
points=np.array([[1., 2., 3., 4., 5.], [1., 2., 3., 4., 5.],
[1., 2., 3., 4., 5.]]),
timestamp=1537290014899034,
sweeps=[sweep])
results = load_points_from_multi_sweeps(results)
points = results['points']
repr_str = repr(load_points_from_multi_sweeps)
expected_repr_str = 'LoadPointsFromMultiSweeps(sweeps_num=10)'
assert repr_str == expected_repr_str
assert points.shape == (403, 4)
| [
"mmdet3d.datasets.pipelines.LoadPointsFromMultiSweeps",
"os.path.join",
"numpy.equal",
"numpy.array",
"numpy.zeros",
"pytest.raises",
"mmdet3d.core.bbox.DepthInstance3DBoxes",
"mmcv.load",
"mmdet3d.datasets.pipelines.LoadAnnotations3D",
"mmdet3d.datasets.pipelines.LoadPointsFromFile"
] | [((332, 383), 'mmcv.load', 'mmcv.load', (['"""./tests/data/sunrgbd/sunrgbd_infos.pkl"""'], {}), "('./tests/data/sunrgbd/sunrgbd_infos.pkl')\n", (341, 383), False, 'import mmcv\n'), ((420, 460), 'mmdet3d.datasets.pipelines.LoadPointsFromFile', 'LoadPointsFromFile', (['(6)'], {'shift_height': '(True)'}), '(6, shift_height=True)\n', (438, 460), False, 'from mmdet3d.datasets.pipelines import LoadAnnotations3D, LoadPointsFromFile, LoadPointsFromMultiSweeps\n'), ((602, 647), 'os.path.join', 'osp.join', (['data_path', "sunrgbd_info['pts_path']"], {}), "(data_path, sunrgbd_info['pts_path'])\n", (610, 647), True, 'from os import path as osp\n'), ((885, 936), 'mmcv.load', 'mmcv.load', (['"""./tests/data/scannet/scannet_infos.pkl"""'], {}), "('./tests/data/scannet/scannet_infos.pkl')\n", (894, 936), False, 'import mmcv\n'), ((961, 998), 'mmdet3d.datasets.pipelines.LoadPointsFromFile', 'LoadPointsFromFile', ([], {'shift_height': '(True)'}), '(shift_height=True)\n', (979, 998), False, 'from mmdet3d.datasets.pipelines import LoadAnnotations3D, LoadPointsFromFile, LoadPointsFromMultiSweeps\n'), ((1141, 1186), 'os.path.join', 'osp.join', (['data_path', "scannet_info['pts_path']"], {}), "(data_path, scannet_info['pts_path'])\n", (1149, 1186), True, 'from os import path as osp\n'), ((1781, 1805), 'mmdet3d.datasets.pipelines.LoadPointsFromFile', 'LoadPointsFromFile', (['(4)', '(4)'], {}), '(4, 4)\n', (1799, 1805), False, 'from mmdet3d.datasets.pipelines import LoadAnnotations3D, LoadPointsFromFile, LoadPointsFromMultiSweeps\n'), ((2054, 2089), 'mmdet3d.datasets.pipelines.LoadPointsFromFile', 'LoadPointsFromFile', (['(4)', '[0, 1, 2, 3]'], {}), '(4, [0, 1, 2, 3])\n', (2072, 2089), False, 'from mmdet3d.datasets.pipelines import LoadAnnotations3D, LoadPointsFromFile, LoadPointsFromMultiSweeps\n'), ((2321, 2349), 'numpy.equal', 'np.equal', (['points', 'new_points'], {}), '(points, new_points)\n', (2329, 2349), True, 'import numpy as np\n'), ((2601, 2698), 'mmdet3d.datasets.pipelines.LoadAnnotations3D', 'LoadAnnotations3D', ([], {'with_bbox_3d': '(True)', 'with_label_3d': '(True)', 'with_mask_3d': '(True)', 'with_seg_3d': '(True)'}), '(with_bbox_3d=True, with_label_3d=True, with_mask_3d=True,\n with_seg_3d=True)\n', (2618, 2698), False, 'from mmdet3d.datasets.pipelines import LoadAnnotations3D, LoadPointsFromFile, LoadPointsFromMultiSweeps\n'), ((3248, 3307), 'os.path.join', 'osp.join', (['data_path', "scannet_info['pts_instance_mask_path']"], {}), "(data_path, scannet_info['pts_instance_mask_path'])\n", (3256, 3307), True, 'from os import path as osp\n'), ((3377, 3436), 'os.path.join', 'osp.join', (['data_path', "scannet_info['pts_semantic_mask_path']"], {}), "(data_path, scannet_info['pts_semantic_mask_path'])\n", (3385, 3436), True, 'from os import path as osp\n'), ((3496, 3565), 'mmdet3d.core.bbox.DepthInstance3DBoxes', 'DepthInstance3DBoxes', (['scannet_gt_bboxes_3d'], {'box_dim': '(6)', 'with_yaw': '(False)'}), '(scannet_gt_bboxes_3d, box_dim=6, with_yaw=False)\n', (3516, 3565), False, 'from mmdet3d.core.bbox import DepthInstance3DBoxes\n'), ((4825, 4852), 'mmdet3d.datasets.pipelines.LoadPointsFromMultiSweeps', 'LoadPointsFromMultiSweeps', ([], {}), '()\n', (4850, 4852), False, 'from mmdet3d.datasets.pipelines import LoadAnnotations3D, LoadPointsFromFile, LoadPointsFromMultiSweeps\n'), ((2360, 2389), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2373, 2389), False, 'import pytest\n'), ((2399, 2423), 'mmdet3d.datasets.pipelines.LoadPointsFromFile', 'LoadPointsFromFile', (['(4)', '(5)'], {}), '(4, 5)\n', (2417, 2423), False, 'from mmdet3d.datasets.pipelines import LoadAnnotations3D, LoadPointsFromFile, LoadPointsFromMultiSweeps\n'), ((2513, 2564), 'mmcv.load', 'mmcv.load', (['"""./tests/data/scannet/scannet_infos.pkl"""'], {}), "('./tests/data/scannet/scannet_infos.pkl')\n", (2522, 2564), False, 'import mmcv\n'), ((3024, 3058), 'numpy.zeros', 'np.zeros', (['(1, 6)'], {'dtype': 'np.float32'}), '((1, 6), dtype=np.float32)\n', (3032, 3058), True, 'import numpy as np\n'), ((3090, 3104), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (3098, 3104), True, 'import numpy as np\n'), ((5149, 5308), 'numpy.array', 'np.array', (['[[0.999979347, 0.000399870769, 0.0064144169], [-0.000442034222, 0.999978299,\n 0.00657316197], [-0.00641164929, -0.00657586161, 0.999957824]]'], {}), '([[0.999979347, 0.000399870769, 0.0064144169], [-0.000442034222, \n 0.999978299, 0.00657316197], [-0.00641164929, -0.00657586161, 0.999957824]]\n )\n', (5157, 5308), True, 'import numpy as np\n'), ((5388, 5484), 'numpy.array', 'np.array', (['[[1.0, 2.0, 3.0, 4.0, 5.0], [1.0, 2.0, 3.0, 4.0, 5.0], [1.0, 2.0, 3.0, 4.0,\n 5.0]]'], {}), '([[1.0, 2.0, 3.0, 4.0, 5.0], [1.0, 2.0, 3.0, 4.0, 5.0], [1.0, 2.0, \n 3.0, 4.0, 5.0]])\n', (5396, 5484), True, 'import numpy as np\n')] |
import numpy as np
from collections import deque
import random
class Buffer:
def __init__(self, max_size=1000, seed=None):
self.buffer = deque(maxlen=max_size)
self.max_size = max_size
random.seed(seed)
@property
def size(self):
return len(self.buffer)
def sample(self, ct):
ct = min(ct, self.size)
batch = random.sample(self.buffer, ct)
s = np.float32([x[0] for x in batch])
a = np.float32([x[1] for x in batch])
r = np.float32([x[2] for x in batch])
s1 = np.float32([x[3] for x in batch])
a1 = np.float32([x[4] for x in batch])
return s,a,r,s1, a1
def sample_(self, ct):
ct = min(ct, self.size)
batch = random.sample(self.buffer, ct)
s = [x[0] for x in batch]
a = [x[1] for x in batch]
r = [x[2] for x in batch]
s1 = [x[3] for x in batch]
a1 = [x[4] for x in batch]
ano = [x[5] for x in batch]
return s,a,r,s1, a1, ano
def add(self, s,a,r,s1, a1=None, ano=None):
arr= [s,a,r,s1, a1, ano]
self.buffer.append(arr)
class PriortizedReplay(Buffer):
def __init__(self,max_size=1000, seed=None, beta=1., eps = 0.1):
super(PriortizedReplay, self).__init__(max_size, seed)
self.beta = beta
self.probs = deque(maxlen=self.max_size)
self.rg = np.random.RandomState(seed)
self.eps = eps
def add(self,s,a,r,s1, a1=None, ano=None, td=0):
arr= [s,a,r,s1, a1, ano]
self.probs.append(td+self.eps)
self.buffer.append(arr)
def sample(self, ct):
ct = min(ct, self.size)
probs = np.array(self.probs)
probs = probs ** self.beta
probs = probs/probs.sum()
idx = [self.rg.choice(self.size, p=probs) for _ in range(ct)]
s = np.float32([self.buffer[i][0] for i in idx])
a = np.float32([self.buffer[i][1] for i in idx])
r = np.float32([self.buffer[i][2] for i in idx])
s1 = np.float32([self.buffer[i][3] for i in idx])
a1 = np.float32([self.buffer[i][4] for i in idx])
return s,a,r,s1,a1
def sample_(self,ct):
ct = min(ct, self.size)
probs = np.array(self.probs)
probs = probs.argsort() +1
probs = (1/probs)
probs = probs ** self.beta
probs = probs/probs.sum()
idx = [self.rg.choice(self.size, p=probs) for _ in range(ct)]
s = [self.buffer[i][0] for i in idx]
a = [self.buffer[i][1] for i in idx]
r = [self.buffer[i][2] for i in idx]
s1 =[self.buffer[i][3] for i in idx]
a1 =[self.buffer[i][4] for i in idx]
ano =[self.buffer[i][5] for i in idx]
return s,a,r,s1,a1,ano
| [
"random.sample",
"collections.deque",
"random.seed",
"numpy.array",
"numpy.float32",
"numpy.random.RandomState"
] | [((151, 173), 'collections.deque', 'deque', ([], {'maxlen': 'max_size'}), '(maxlen=max_size)\n', (156, 173), False, 'from collections import deque\n'), ((215, 232), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (226, 232), False, 'import random\n'), ((383, 413), 'random.sample', 'random.sample', (['self.buffer', 'ct'], {}), '(self.buffer, ct)\n', (396, 413), False, 'import random\n'), ((426, 459), 'numpy.float32', 'np.float32', (['[x[0] for x in batch]'], {}), '([x[0] for x in batch])\n', (436, 459), True, 'import numpy as np\n'), ((472, 505), 'numpy.float32', 'np.float32', (['[x[1] for x in batch]'], {}), '([x[1] for x in batch])\n', (482, 505), True, 'import numpy as np\n'), ((518, 551), 'numpy.float32', 'np.float32', (['[x[2] for x in batch]'], {}), '([x[2] for x in batch])\n', (528, 551), True, 'import numpy as np\n'), ((565, 598), 'numpy.float32', 'np.float32', (['[x[3] for x in batch]'], {}), '([x[3] for x in batch])\n', (575, 598), True, 'import numpy as np\n'), ((612, 645), 'numpy.float32', 'np.float32', (['[x[4] for x in batch]'], {}), '([x[4] for x in batch])\n', (622, 645), True, 'import numpy as np\n'), ((755, 785), 'random.sample', 'random.sample', (['self.buffer', 'ct'], {}), '(self.buffer, ct)\n', (768, 785), False, 'import random\n'), ((1357, 1384), 'collections.deque', 'deque', ([], {'maxlen': 'self.max_size'}), '(maxlen=self.max_size)\n', (1362, 1384), False, 'from collections import deque\n'), ((1403, 1430), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (1424, 1430), True, 'import numpy as np\n'), ((1695, 1715), 'numpy.array', 'np.array', (['self.probs'], {}), '(self.probs)\n', (1703, 1715), True, 'import numpy as np\n'), ((1867, 1911), 'numpy.float32', 'np.float32', (['[self.buffer[i][0] for i in idx]'], {}), '([self.buffer[i][0] for i in idx])\n', (1877, 1911), True, 'import numpy as np\n'), ((1924, 1968), 'numpy.float32', 'np.float32', (['[self.buffer[i][1] for i in idx]'], {}), '([self.buffer[i][1] for i in idx])\n', (1934, 1968), True, 'import numpy as np\n'), ((1981, 2025), 'numpy.float32', 'np.float32', (['[self.buffer[i][2] for i in idx]'], {}), '([self.buffer[i][2] for i in idx])\n', (1991, 2025), True, 'import numpy as np\n'), ((2039, 2083), 'numpy.float32', 'np.float32', (['[self.buffer[i][3] for i in idx]'], {}), '([self.buffer[i][3] for i in idx])\n', (2049, 2083), True, 'import numpy as np\n'), ((2097, 2141), 'numpy.float32', 'np.float32', (['[self.buffer[i][4] for i in idx]'], {}), '([self.buffer[i][4] for i in idx])\n', (2107, 2141), True, 'import numpy as np\n'), ((2249, 2269), 'numpy.array', 'np.array', (['self.probs'], {}), '(self.probs)\n', (2257, 2269), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from PIL import Image
import random
import datetime
import os
from shutil import copyfile
MATE_PROBABILTY = 0.35
MUTATION_PROBABILITY = 0.45
HARD_MUTATION_PROBABILITY = 0.6
ADD_GEN = 5
POPULATION = 10
CIRCLES = 1
FILENAME = "monalisa"
ref = Image.open("../img/" + FILENAME + ".jpg")
ref = np.array(ref)
black = np.zeros((256, 256, 3), np.uint8)
copy = np.zeros((256, 256, 3), np.uint8)
def image(adn):
i = 0
img = np.array(copy)
while i < len(adn):
x = ord(adn[i])
y = ord(adn[i + 1])
radius = ord(adn[i + 2])
alpha = ord(adn[i + 3])
r = ord(adn[i + 4])
g = ord(adn[i + 5])
b = ord(adn[i + 6])
cv2.circle(img, (x, y), radius, (r, g, b), -1)
i += 7
alpha = alpha / 255.0
cv2.addWeighted(img, alpha, copy, 1.0 - alpha, 0, img)
return img
def fitness(adn):
black = image(adn)
fit = 0
for i in range(256):
for j in range(256):
for k in range(3):
fit += abs(int(ref[i, j, k]) - int(black[i, j, k]))
return fit
def select(samples):
new_samples = []
for i in range(len(samples)):
candidates = random.sample(range(len(samples)), 2)
if samples[candidates[0]][1] < samples[candidates[1]][1]:
new_samples.append(samples[candidates[0]])
else:
new_samples.append(samples[candidates[1]])
return new_samples
def mate(samples):
new_samples = []
for i in range(len(samples) // 2):
dads = random.sample(samples, 2)
prob = random.uniform(0.0, 1.0)
if prob < MATE_PROBABILTY:
cross = random.randint(0, len(dads[0]) - 1)
son_a = dads[0][0][cross:] + dads[1][0][:cross]
son_b = dads[1][0][cross:] + dads[0][0][:cross]
fit_a = fitness(son_a)
fit_b = fitness(son_b)
new_samples.append((son_a, fit_a))
new_samples.append((son_b, fit_b))
else:
new_samples.append(dads[0])
new_samples.append(dads[1])
return new_samples
def mutate(samples):
new_samples = []
for sample in samples:
prob = random.uniform(0.0, 1.0)
if prob <= MUTATION_PROBABILITY:
hard = random.uniform(0.0, 1.0) <= HARD_MUTATION_PROBABILITY
mutated = list(sample[0])
for i in range(0, random.randint(1, len(sample[0]) / 2)):
index = random.randint(0, len(sample[0]) - 1)
if (hard):
mutated[index] = chr(random.randint(0, 255))
else:
delta = random.randint(-16, 16)
mutated[index] = chr(max(0, min(255, ord(mutated[index]) + delta)))
mutated = ''.join(mutated)
fit = fitness(mutated)
new_samples.append((mutated, fit))
else:
new_samples.append(sample)
return new_samples
def create_population(circles):
elite = None
samples = []
while len(samples) < POPULATION:
sample = ""
for i in range(7 * circles):
sample += chr(random.randint(0, 255))
fit = fitness(sample)
samples.append((sample, fit))
if elite is None or fit < elite[1]:
elite = samples[-1]
return elite, samples
def linear():
circles = 1
elite, samples = create_population(circles)
add_elite = -1
add_repetitions = 1
date = datetime.datetime.now().strftime("%H.%M.%S")
folder = "%s/%s/" % (FILENAME, date)
os.makedirs(folder)
copyfile(FILENAME + ".jpg", folder + FILENAME + ".jpg")
k = 0
while True:
samples = select(samples)
samples = mate(samples)
samples = mutate(samples)
pseudo = None
for sample in samples:
if pseudo is None or sample[1] < pseudo[1]:
pseudo = sample
if elite[1] < pseudo[1]:
index = random.randint(0, len(samples) - 1)
samples[index] = elite
else:
elite = pseudo
if k % 10 == 0:
best = image(elite[0])
if elite[1] == add_elite:
add_repetitions += 1
if add_repetitions >= ADD_GEN:
global copy
copy = best
circles += 1
elite, samples = create_population(1)
else:
add_elite = elite[1]
add_repetitions = 1
img = Image.fromarray(best)
percentage = 100.0 - (elite[1] * 100.0 / float(256 * 256 * 3 * 255))
img.save("%s%04d (%d - %.2f).jpg" % (folder, k, elite[1], percentage))
k += 1
linear()
| [
"random.sample",
"PIL.Image.open",
"random.uniform",
"PIL.Image.fromarray",
"os.makedirs",
"numpy.array",
"numpy.zeros",
"cv2.addWeighted",
"shutil.copyfile",
"cv2.circle",
"datetime.datetime.now",
"random.randint"
] | [((277, 318), 'PIL.Image.open', 'Image.open', (["('../img/' + FILENAME + '.jpg')"], {}), "('../img/' + FILENAME + '.jpg')\n", (287, 318), False, 'from PIL import Image\n'), ((325, 338), 'numpy.array', 'np.array', (['ref'], {}), '(ref)\n', (333, 338), True, 'import numpy as np\n'), ((348, 381), 'numpy.zeros', 'np.zeros', (['(256, 256, 3)', 'np.uint8'], {}), '((256, 256, 3), np.uint8)\n', (356, 381), True, 'import numpy as np\n'), ((389, 422), 'numpy.zeros', 'np.zeros', (['(256, 256, 3)', 'np.uint8'], {}), '((256, 256, 3), np.uint8)\n', (397, 422), True, 'import numpy as np\n'), ((461, 475), 'numpy.array', 'np.array', (['copy'], {}), '(copy)\n', (469, 475), True, 'import numpy as np\n'), ((803, 857), 'cv2.addWeighted', 'cv2.addWeighted', (['img', 'alpha', 'copy', '(1.0 - alpha)', '(0)', 'img'], {}), '(img, alpha, copy, 1.0 - alpha, 0, img)\n', (818, 857), False, 'import cv2\n'), ((3565, 3584), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (3576, 3584), False, 'import os\n'), ((3589, 3644), 'shutil.copyfile', 'copyfile', (["(FILENAME + '.jpg')", "(folder + FILENAME + '.jpg')"], {}), "(FILENAME + '.jpg', folder + FILENAME + '.jpg')\n", (3597, 3644), False, 'from shutil import copyfile\n'), ((710, 756), 'cv2.circle', 'cv2.circle', (['img', '(x, y)', 'radius', '(r, g, b)', '(-1)'], {}), '(img, (x, y), radius, (r, g, b), -1)\n', (720, 756), False, 'import cv2\n'), ((1545, 1570), 'random.sample', 'random.sample', (['samples', '(2)'], {}), '(samples, 2)\n', (1558, 1570), False, 'import random\n'), ((1586, 1610), 'random.uniform', 'random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1600, 1610), False, 'import random\n'), ((2192, 2216), 'random.uniform', 'random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (2206, 2216), False, 'import random\n'), ((3475, 3498), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3496, 3498), False, 'import datetime\n'), ((4528, 4549), 'PIL.Image.fromarray', 'Image.fromarray', (['best'], {}), '(best)\n', (4543, 4549), False, 'from PIL import Image\n'), ((2277, 2301), 'random.uniform', 'random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (2291, 2301), False, 'import random\n'), ((3143, 3165), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (3157, 3165), False, 'import random\n'), ((2643, 2666), 'random.randint', 'random.randint', (['(-16)', '(16)'], {}), '(-16, 16)\n', (2657, 2666), False, 'import random\n'), ((2569, 2591), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (2583, 2591), False, 'import random\n')] |
# -*- coding = utf-8 -*-
# @Time : 2022/2/3 10:17
# @Author : 戎昱
# @File : makeImageSets.py
# @Software : PyCharm
# @Contact : <EMAIL>
# @github : https://github.com/SekiroRong
import os
import random
import numpy as np
from config import kitti_root
# kitti_root = r'G:\carla'
video_path = kitti_root + r'\training'
images_dir = os.path.join(video_path, 'image_2')
txt_dir = kitti_root + r'\ImageSets\train.txt'
txt_dir2 = kitti_root + r'\ImageSets\test.txt'
txt_dir3 = kitti_root + r'\ImageSets\trainval.txt'
txt_dir4 = kitti_root + r'\ImageSets\val.txt'
def makeImageSets(renew = True):
mode = 'w' if renew else 'a'
images_filenames = sorted(
[os.path.join(images_dir, filename) for filename in os.listdir(images_dir)]
)
# print(images_filenames)
length = len(images_filenames)
train_length = int(0.8 * length)
val_length = length - train_length
print(length)
train_filenames = np.random.choice(images_filenames, train_length, replace=False)
val_filenames = list(set(images_filenames)^set(train_filenames))
print(len(train_filenames))
print(len(val_filenames))
with open(txt_dir,mode) as f:
for img in train_filenames:
# for img in images_filenames:
img_name = img.replace(images_dir,'')[1:-4]
f.write(img_name)
f.write('\n')
# with open(txt_dir2,mode) as f:
# for img in images_filenames:
# img_name = img.replace(images_dir,'')[1:-4]
# # print(img_name)
# f.write(img_name)
# f.write('\n')
with open(txt_dir3,mode) as f:
for img in images_filenames:
img_name = img.replace(images_dir,'')[1:-4]
# print(img_name)
f.write(img_name)
f.write('\n')
with open(txt_dir4,mode) as f:
for img in val_filenames:
img_name = img.replace(images_dir,'')[1:-4]
# print(img_name)
f.write(img_name)
f.write('\n')
makeImageSets() | [
"numpy.random.choice",
"os.listdir",
"os.path.join"
] | [((346, 381), 'os.path.join', 'os.path.join', (['video_path', '"""image_2"""'], {}), "(video_path, 'image_2')\n", (358, 381), False, 'import os\n'), ((962, 1025), 'numpy.random.choice', 'np.random.choice', (['images_filenames', 'train_length'], {'replace': '(False)'}), '(images_filenames, train_length, replace=False)\n', (978, 1025), True, 'import numpy as np\n'), ((689, 723), 'os.path.join', 'os.path.join', (['images_dir', 'filename'], {}), '(images_dir, filename)\n', (701, 723), False, 'import os\n'), ((740, 762), 'os.listdir', 'os.listdir', (['images_dir'], {}), '(images_dir)\n', (750, 762), False, 'import os\n')] |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Test for smoothing with kernels """
import numpy as np
from numpy.random import random_integers as randint
from nipy import load_image
from nipy.algorithms.kernel_smooth import LinearFilter
from nipy.core.api import Image
from nipy.core.reference.coordinate_map import AffineTransform
from nipy.algorithms.kernel_smooth import sigma2fwhm, fwhm2sigma
from nipy.testing import (assert_true, assert_equal, assert_raises,
anatfile, funcfile)
# No test here?
def test_anat_smooth():
anat = load_image(anatfile)
smoother = LinearFilter(anat.coordmap, anat.shape)
sanat = smoother.smooth(anat)
def test_func_smooth():
func = load_image(funcfile)
smoother = LinearFilter(func.coordmap, func.shape)
# should work, but currently broken : sfunc = smoother.smooth(func)
assert_raises(NotImplementedError, smoother.smooth, func)
def test_sigma_fwhm():
# ensure that fwhm2sigma and sigma2fwhm are inverses of each other
fwhm = np.arange(1.0, 5.0, 0.1)
sigma = np.arange(1.0, 5.0, 0.1)
assert_true(np.allclose(sigma2fwhm(fwhm2sigma(fwhm)), fwhm))
assert_true(np.allclose(fwhm2sigma(sigma2fwhm(sigma)), sigma))
def test_kernel():
# Verify that convolution with a delta function gives the correct
# answer.
tol = 0.9999
sdtol = 1.0e-8
for x in range(6):
shape = randint(30,60,(3,))
# pos of delta
ii, jj, kk = randint(11,17, (3,))
# random affine coordmap (diagonal and translations)
coordmap = AffineTransform.from_start_step('ijk', 'xyz',
randint(5,20,(3,))*0.25,
randint(5,10,(3,))*0.5)
# delta function in 3D array
signal = np.zeros(shape)
signal[ii,jj,kk] = 1.
signal = Image(signal, coordmap=coordmap)
# A filter with coordmap, shape matched to image
kernel = LinearFilter(coordmap, shape,
fwhm=randint(50,100)/10.)
# smoothed normalized 3D array
ssignal = kernel.smooth(signal).get_data()
ssignal[:] *= kernel.norms[kernel.normalization]
# 3 points * signal.size array
I = np.indices(ssignal.shape)
I.shape = (kernel.coordmap.ndims[0], np.product(shape))
# location of maximum in smoothed array
i, j, k = I[:, np.argmax(ssignal[:].flat)]
# same place as we put it before smoothing?
assert_equal((i,j,k), (ii,jj,kk))
# get physical points position relative to position of delta
Z = kernel.coordmap(I.T) - kernel.coordmap([i,j,k])
_k = kernel(Z)
_k.shape = ssignal.shape
assert_true((np.corrcoef(_k[:].flat, ssignal[:].flat)[0,1] > tol))
assert_true(((_k[:] - ssignal[:]).std() < sdtol))
def _indices(i,j,k,axis):
I = np.zeros((3,20))
I[0] += i
I[1] += j
I[2] += k
I[axis] += np.arange(-10,10)
return I.T
vx = ssignal[i,j,(k-10):(k+10)]
xformed_ijk = coordmap([i, j, k])
vvx = coordmap(_indices(i,j,k,2)) - xformed_ijk
assert_true((np.corrcoef(vx, kernel(vvx))[0,1] > tol))
vy = ssignal[i,(j-10):(j+10),k]
vvy = coordmap(_indices(i,j,k,1)) - xformed_ijk
assert_true((np.corrcoef(vy, kernel(vvy))[0,1] > tol))
vz = ssignal[(i-10):(i+10),j,k]
vvz = coordmap(_indices(i,j,k,0)) - xformed_ijk
assert_true((np.corrcoef(vz, kernel(vvz))[0,1] > tol))
| [
"nipy.algorithms.kernel_smooth.LinearFilter",
"nipy.core.api.Image",
"numpy.product",
"nipy.algorithms.kernel_smooth.fwhm2sigma",
"numpy.corrcoef",
"numpy.random.random_integers",
"nipy.load_image",
"numpy.argmax",
"numpy.indices",
"numpy.zeros",
"nipy.testing.assert_equal",
"nipy.algorithms.k... | [((637, 657), 'nipy.load_image', 'load_image', (['anatfile'], {}), '(anatfile)\n', (647, 657), False, 'from nipy import load_image\n'), ((673, 712), 'nipy.algorithms.kernel_smooth.LinearFilter', 'LinearFilter', (['anat.coordmap', 'anat.shape'], {}), '(anat.coordmap, anat.shape)\n', (685, 712), False, 'from nipy.algorithms.kernel_smooth import LinearFilter\n'), ((784, 804), 'nipy.load_image', 'load_image', (['funcfile'], {}), '(funcfile)\n', (794, 804), False, 'from nipy import load_image\n'), ((820, 859), 'nipy.algorithms.kernel_smooth.LinearFilter', 'LinearFilter', (['func.coordmap', 'func.shape'], {}), '(func.coordmap, func.shape)\n', (832, 859), False, 'from nipy.algorithms.kernel_smooth import LinearFilter\n'), ((936, 993), 'nipy.testing.assert_raises', 'assert_raises', (['NotImplementedError', 'smoother.smooth', 'func'], {}), '(NotImplementedError, smoother.smooth, func)\n', (949, 993), False, 'from nipy.testing import assert_true, assert_equal, assert_raises, anatfile, funcfile\n'), ((1101, 1125), 'numpy.arange', 'np.arange', (['(1.0)', '(5.0)', '(0.1)'], {}), '(1.0, 5.0, 0.1)\n', (1110, 1125), True, 'import numpy as np\n'), ((1138, 1162), 'numpy.arange', 'np.arange', (['(1.0)', '(5.0)', '(0.1)'], {}), '(1.0, 5.0, 0.1)\n', (1147, 1162), True, 'import numpy as np\n'), ((1475, 1496), 'numpy.random.random_integers', 'randint', (['(30)', '(60)', '(3,)'], {}), '(30, 60, (3,))\n', (1482, 1496), True, 'from numpy.random import random_integers as randint\n'), ((1539, 1560), 'numpy.random.random_integers', 'randint', (['(11)', '(17)', '(3,)'], {}), '(11, 17, (3,))\n', (1546, 1560), True, 'from numpy.random import random_integers as randint\n'), ((1874, 1889), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (1882, 1889), True, 'import numpy as np\n'), ((1937, 1969), 'nipy.core.api.Image', 'Image', (['signal'], {'coordmap': 'coordmap'}), '(signal, coordmap=coordmap)\n', (1942, 1969), False, 'from nipy.core.api import Image\n'), ((2329, 2354), 'numpy.indices', 'np.indices', (['ssignal.shape'], {}), '(ssignal.shape)\n', (2339, 2354), True, 'import numpy as np\n'), ((2578, 2615), 'nipy.testing.assert_equal', 'assert_equal', (['(i, j, k)', '(ii, jj, kk)'], {}), '((i, j, k), (ii, jj, kk))\n', (2590, 2615), False, 'from nipy.testing import assert_true, assert_equal, assert_raises, anatfile, funcfile\n'), ((2400, 2417), 'numpy.product', 'np.product', (['shape'], {}), '(shape)\n', (2410, 2417), True, 'import numpy as np\n'), ((2981, 2998), 'numpy.zeros', 'np.zeros', (['(3, 20)'], {}), '((3, 20))\n', (2989, 2998), True, 'import numpy as np\n'), ((3087, 3105), 'numpy.arange', 'np.arange', (['(-10)', '(10)'], {}), '(-10, 10)\n', (3096, 3105), True, 'import numpy as np\n'), ((1202, 1218), 'nipy.algorithms.kernel_smooth.fwhm2sigma', 'fwhm2sigma', (['fwhm'], {}), '(fwhm)\n', (1212, 1218), False, 'from nipy.algorithms.kernel_smooth import sigma2fwhm, fwhm2sigma\n'), ((1267, 1284), 'nipy.algorithms.kernel_smooth.sigma2fwhm', 'sigma2fwhm', (['sigma'], {}), '(sigma)\n', (1277, 1284), False, 'from nipy.algorithms.kernel_smooth import sigma2fwhm, fwhm2sigma\n'), ((1729, 1749), 'numpy.random.random_integers', 'randint', (['(5)', '(20)', '(3,)'], {}), '(5, 20, (3,))\n', (1736, 1749), True, 'from numpy.random import random_integers as randint\n'), ((1796, 1816), 'numpy.random.random_integers', 'randint', (['(5)', '(10)', '(3,)'], {}), '(5, 10, (3,))\n', (1803, 1816), True, 'from numpy.random import random_integers as randint\n'), ((2490, 2516), 'numpy.argmax', 'np.argmax', (['ssignal[:].flat'], {}), '(ssignal[:].flat)\n', (2499, 2516), True, 'import numpy as np\n'), ((2110, 2126), 'numpy.random.random_integers', 'randint', (['(50)', '(100)'], {}), '(50, 100)\n', (2117, 2126), True, 'from numpy.random import random_integers as randint\n'), ((2818, 2858), 'numpy.corrcoef', 'np.corrcoef', (['_k[:].flat', 'ssignal[:].flat'], {}), '(_k[:].flat, ssignal[:].flat)\n', (2829, 2858), True, 'import numpy as np\n')] |
import json
import logging
import os
import tempfile
import pandas as pd
import warnings
from io import StringIO
from os import getcwd
from os.path import abspath, dirname, join
from pathlib import Path
from shutil import rmtree
from numpy.testing import assert_array_equal
from pandas.testing import assert_frame_equal
from nose.tools import (assert_equal,
assert_not_equal,
eq_,
ok_,
raises)
from rsmtool.convert_feature_json import convert_feature_json_file
from rsmtool.configuration_parser import (Configuration,
ConfigurationParser)
_MY_DIR = dirname(__file__)
class TestConfigurationParser:
def setUp(self):
pass
@raises(FileNotFoundError)
def test_init_nonexistent_file(self):
non_existent_file = "/x/y.json"
_ = ConfigurationParser(non_existent_file)
@raises(OSError)
def test_init_directory_instead_of_file(self):
with tempfile.TemporaryDirectory() as tempd:
_ = ConfigurationParser(tempd)
@raises(ValueError)
def test_init_non_json_file(self):
with tempfile.NamedTemporaryFile(suffix=".txt") as tempf:
_ = ConfigurationParser(tempf.name)
def test_parse_config_from_dict_rsmtool(self):
data = {'experiment_id': 'experiment_1',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'model': 'LinearRegression'}
# Add data to `Configuration` object
newdata = Configuration(data)
assert_equal(newdata['id_column'], 'spkitemid')
assert_equal(newdata['use_scaled_predictions'], False)
assert_equal(newdata['select_transformations'], False)
assert_array_equal(newdata['general_sections'], ['all'])
assert_equal(newdata['description'], '')
assert_equal(newdata.configdir, getcwd())
def test_parse_config_from_dict_rsmeval(self):
data = {'experiment_id': 'experiment_1',
'predictions_file': 'data/rsmtool_smTrain.csv',
'system_score_column': 'system',
'trim_min': 1,
'trim_max': 5}
# Add data to `Configuration` object
newdata = Configuration(data, context='rsmeval')
assert_equal(newdata['id_column'], 'spkitemid')
assert_array_equal(newdata['general_sections'], ['all'])
assert_equal(newdata['description'], '')
assert_equal(newdata.configdir, getcwd())
@raises(ValueError)
def test_validate_config_missing_fields(self):
data = {'experiment_id': 'test'}
_ = ConfigurationParser.validate_config(data)
@raises(ValueError)
def test_validate_config_min_responses_but_no_candidate(self):
data = {'experiment_id': 'experiment_1',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'model': 'LinearRegression',
'min_responses_per_candidate': 5}
_ = ConfigurationParser.validate_config(data)
def test_validate_config_unspecified_fields(self):
data = {'experiment_id': 'experiment_1',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'model': 'LinearRegression'}
newdata = ConfigurationParser.validate_config(data)
assert_equal(newdata['id_column'], 'spkitemid')
assert_equal(newdata['use_scaled_predictions'], False)
assert_equal(newdata['select_transformations'], False)
assert_array_equal(newdata['general_sections'], ['all'])
assert_equal(newdata['description'], '')
@raises(ValueError)
def test_validate_config_unknown_fields(self):
data = {'experiment_id': 'experiment_1',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'description': 'Test',
'model': 'LinearRegression',
'output': 'foobar'}
_ = ConfigurationParser.validate_config(data)
@raises(ValueError)
def test_validate_config_experiment_id_1(self):
data = {'experiment_id': 'test experiment',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'model': 'LinearRegression'}
_ = ConfigurationParser.validate_config(data)
@raises(ValueError)
def test_validate_config_experiment_id_2(self):
data = {'experiment_id': 'test experiment',
'predictions_file': 'data/foo',
'system_score_column': 'h1',
'trim_min': 1,
'trim_max': 5}
_ = ConfigurationParser.validate_config(data, context='rsmeval')
@raises(ValueError)
def test_validate_config_experiment_id_3(self):
data = {'comparison_id': 'old vs new',
'experiment_id_old': 'old_experiment',
'experiment_dir_old': 'data/old',
'experiment_id_new': 'new_experiment',
'experiment_dir_new': 'data/new'}
_ = ConfigurationParser.validate_config(data, context='rsmcompare')
@raises(ValueError)
def test_validate_config_experiment_id_4(self):
data = {'comparison_id': 'old vs new',
'experiment_id_old': 'old experiment',
'experiment_dir_old': 'data/old',
'experiment_id_new': 'new_experiment',
'experiment_dir_new': 'data/new'}
_ = ConfigurationParser.validate_config(data, context='rsmcompare')
@raises(ValueError)
def test_validate_config_experiment_id_5(self):
data = {'experiment_id': 'this_is_a_really_really_really_'
'really_really_really_really_really_really_really_'
'really_really_really_really_really_really_really_'
'really_really_really_really_really_really_really_'
'really_really_really_long_id',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'model': 'LinearRegression'}
_ = ConfigurationParser.validate_config(data)
@raises(ValueError)
def test_validate_config_experiment_id_6(self):
data = {'experiment_id': 'this_is_a_really_really_really_'
'really_really_really_really_really_really_really_'
'really_really_really_really_really_really_really_'
'really_really_really_really_really_really_really_'
'really_really_really_long_id',
'predictions_file': 'data/foo',
'system_score_column': 'h1',
'trim_min': 1,
'trim_max': 5}
_ = ConfigurationParser.validate_config(data, context='rsmeval')
@raises(ValueError)
def test_validate_config_experiment_id_7(self):
data = {'comparison_id': 'this_is_a_really_really_really_'
'really_really_really_really_really_really_really_'
'really_really_really_really_really_really_really_'
'really_really_really_really_really_really_really_'
'really_really_really_long_id',
'experiment_id_old': 'old_experiment',
'experiment_dir_old': 'data/old',
'experiment_id_new': 'new_experiment',
'experiment_dir_new': 'data/new'}
_ = ConfigurationParser.validate_config(data, context='rsmcompare')
@raises(ValueError)
def test_validate_config_experiment_id_8(self):
data = {'summary_id': 'model summary',
'experiment_dirs': []}
_ = ConfigurationParser.validate_config(data, context='rsmsummarize')
@raises(ValueError)
def test_validate_config_experiment_id_9(self):
data = {'summary_id': 'this_is_a_really_really_really_'
'really_really_really_really_really_really_really_'
'really_really_really_really_really_really_really_'
'really_really_really_really_really_really_really_'
'really_really_really_long_id',
'experiment_dirs': []}
_ = ConfigurationParser.validate_config(data, context='rsmsummarize')
@raises(ValueError)
def test_validate_config_too_many_experiment_names(self):
data = {'summary_id': 'summary',
'experiment_dirs': ["dir1", "dir2", "dir3"],
'experiment_names': ['exp1', 'exp2', 'exp3', 'exp4']}
_ = ConfigurationParser.validate_config(data, context='rsmsummarize')
@raises(ValueError)
def test_validate_config_too_few_experiment_names(self):
data = {'summary_id': 'summary',
'experiment_dirs': ["dir1", "dir2", "dir3"],
'experiment_names': ['exp1', 'exp2']}
_ = ConfigurationParser.validate_config(data, context='rsmsummarize')
def test_validate_config_numeric_subgroup_threshold(self):
data = {'experiment_id': 'experiment_1',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'model': 'LinearRegression',
'subgroups': ['L2', 'L1'],
'min_n_per_group': 100}
newdata = ConfigurationParser.validate_config(data)
eq_(type(newdata['min_n_per_group']), dict)
assert_equal(newdata['min_n_per_group']['L1'], 100)
assert_equal(newdata['min_n_per_group']['L2'], 100)
def test_validate_config_dictionary_subgroup_threshold(self):
data = {'experiment_id': 'experiment_1',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'model': 'LinearRegression',
'subgroups': ['L2', 'L1'],
'min_n_per_group': {"L1": 100,
"L2": 200}}
newdata = ConfigurationParser.validate_config(data)
eq_(type(newdata['min_n_per_group']), dict)
assert_equal(newdata['min_n_per_group']['L1'], 100)
assert_equal(newdata['min_n_per_group']['L2'], 200)
@raises(ValueError)
def test_validate_config_too_few_subgroup_keys(self):
data = {'experiment_id': 'experiment_1',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'model': 'LinearRegression',
'subgroups': ['L1', 'L2'],
'min_n_per_group': {"L1": 100}}
_ = ConfigurationParser.validate_config(data)
@raises(ValueError)
def test_valdiate_config_too_many_subgroup_keys(self):
data = {'experiment_id': 'experiment_1',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'model': 'LinearRegression',
'subgroups': ['L1', 'L2'],
'min_n_per_group': {"L1": 100,
"L2": 100,
"L4": 50}}
_ = ConfigurationParser.validate_config(data)
@raises(ValueError)
def test_validate_config_mismatched_subgroup_keys(self):
data = {'experiment_id': 'experiment_1',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'model': 'LinearRegression',
'subgroups': ['L1', 'L2'],
'min_n_per_group': {"L1": 100,
"L4": 50}}
_ = ConfigurationParser.validate_config(data)
@raises(ValueError)
def test_validate_config_min_n_without_subgroups(self):
data = {'experiment_id': 'experiment_1',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'model': 'LinearRegression',
'min_n_per_group': {"L1": 100,
"L2": 50}}
_ = ConfigurationParser.validate_config(data)
def test_validate_config_warning_feature_file_and_transformations(self):
data = {'experiment_id': 'experiment_1',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'model': 'LinearRegression',
'select_transformations': True,
'features': 'some_file.csv'}
with warnings.catch_warnings(record=True) as warning_list:
_ = ConfigurationParser.validate_config(data)
eq_(len(warning_list), 1)
ok_(issubclass(warning_list[0].category, UserWarning))
def test_validate_config_warning_feature_list_and_transformations(self):
# this should no show warnings
data = {'experiment_id': 'experiment_1',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'model': 'LinearRegression',
'select_transformations': True,
'features': ['feature1', 'feature2']}
with warnings.catch_warnings(record=True) as warning_list:
_ = ConfigurationParser.validate_config(data)
eq_(len(warning_list), 0)
def test_process_fields(self):
data = {'experiment_id': 'experiment_1',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'description': 'Test',
'model': 'empWt',
'use_scaled_predictions': 'True',
'subgroups': 'native language, GPA_range',
'exclude_zero_scores': 'false'}
newdata = ConfigurationParser.process_config(data)
assert_array_equal(newdata['subgroups'], ['native language', 'GPA_range'])
eq_(type(newdata['use_scaled_predictions']), bool)
eq_(newdata['use_scaled_predictions'], True)
eq_(newdata['exclude_zero_scores'], False)
@raises(ValueError)
def test_process_fields_with_non_boolean(self):
data = {'experiment_id': 'experiment_1',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'description': 'Test',
'model': 'empWt',
'use_scaled_predictions': 'True',
'feature_prefix': '1gram, 2gram',
'subgroups': 'native language, GPA_range',
'exclude_zero_scores': 'Yes'}
_ = ConfigurationParser.process_config(data)
@raises(ValueError)
def test_process_fields_with_integer(self):
data = {'experiment_id': 'experiment_1',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'description': 'Test',
'model': 'empWt',
'use_scaled_predictions': 'True',
'feature_prefix': '1gram, 2gram',
'subgroups': 'native language, GPA_range',
'exclude_zero_scores': 1}
_ = ConfigurationParser.process_config(data)
def test_process_fields_rsmsummarize(self):
data = {'summary_id': 'summary',
'experiment_dirs': 'home/dir1, home/dir2, home/dir3',
'experiment_names': 'exp1, exp2, exp3'}
newdata = ConfigurationParser.process_config(data)
assert_array_equal(newdata['experiment_dirs'], ['home/dir1',
'home/dir2',
'home/dir3'])
assert_array_equal(newdata['experiment_names'], ['exp1',
'exp2',
'exp3'])
@raises(ValueError)
def test_invalid_skll_objective(self):
data = {'experiment_id': 'experiment_1',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'description': 'Test',
'model': 'LinearSVR',
'skll_objective': 'squared_error'}
_ = ConfigurationParser.validate_config(data)
@raises(ValueError)
def test_wrong_skll_model_for_expected_scores(self):
data = {'experiment_id': 'experiment_1',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'description': 'Test',
'model': 'LinearSVR',
'predict_expected_scores': 'true'}
_ = ConfigurationParser.validate_config(data)
@raises(ValueError)
def test_builtin_model_for_expected_scores(self):
data = {'experiment_id': 'experiment_1',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'description': 'Test',
'model': 'NNLR',
'predict_expected_scores': 'true'}
_ = ConfigurationParser.validate_config(data)
def test_process_validate_correct_order_boolean(self):
data = {'experiment_id': 'experiment_1',
'train_file': 'data/rsmtool_smTrain.csv',
'test_file': 'data/rsmtool_smEval.csv',
'description': 'Test',
'model': 'NNLR',
'predict_expected_scores': 'false'}
configuration = Configuration(data)
eq_(configuration['predict_expected_scores'], False)
def test_process_validate_correct_order_list(self):
data = {'summary_id': 'summary',
'experiment_dirs': 'home/dir1, home/dir2, home/dir3',
'experiment_names': 'exp1, exp2, exp3'}
# Add data to `ConfigurationParser` object
configuration = Configuration(data, context='rsmsummarize')
assert_array_equal(configuration['experiment_dirs'], ['home/dir1',
'home/dir2',
'home/dir3'])
assert_array_equal(configuration['experiment_names'],
['exp1', 'exp2', 'exp3'])
class TestConfiguration:
def test_init_default_values(self):
config_dict = {"experiment_id": 'my_experiment',
"train_file": 'path/to/train.tsv',
"test_file": 'path/to/test.tsv',
"model": 'LinearRegression'}
config = Configuration(config_dict)
for key in config_dict:
if key == 'experiment_id':
continue
eq_(config._config[key], config_dict[key])
eq_(config._config['experiment_id'], config_dict['experiment_id'])
eq_(config.configdir, abspath(getcwd()))
def test_init_with_configdir_only_as_kword_argument(self):
configdir = 'some/path'
config_dict = {'experiment_id': 'my_experiment',
'train_file': 'path/to/train.tsv',
'test_file': 'path/to/test.tsv',
"model": 'LinearRegression'}
config = Configuration(config_dict,
configdir=configdir)
eq_(config._configdir, Path(configdir).resolve())
@raises(TypeError)
def test_init_wrong_input_type(self):
config_input = [('experiment_id', "XXX"),
('train_file', 'path/to/train.tsv')]
config = Configuration(config_input)
def check_logging_output(self, expected, function, *args, **kwargs):
# check if the `expected` text is in the actual logging output
root_logger = logging.getLogger()
with StringIO() as string_io:
# add a stream handler
handler = logging.StreamHandler(string_io)
root_logger.addHandler(handler)
result = function(*args, **kwargs)
logging_text = string_io.getvalue()
try:
assert expected in logging_text
except AssertionError:
# remove the stream handler and raise error
root_logger.handlers = []
raise AssertionError('`{}` not in logging output: '
'`{}`.'.format(expected, logging_text))
# remove the stream handler, even if we have no errors
root_logger.handlers = []
return result
def test_pop_value(self):
dictionary = {'experiment_id': '001',
'train_file': 'path/to/train.tsv',
'test_file': 'path/to/test.tsv',
"model": 'LinearRegression'}
config = Configuration(dictionary)
value = config.pop("experiment_id")
eq_(value, '001')
def test_pop_value_default(self):
dictionary = {'experiment_id': '001',
'train_file': 'path/to/train.tsv',
'test_file': 'path/to/test.tsv',
"model": 'LinearRegression'}
config = Configuration(dictionary)
value = config.pop("foo", "bar")
eq_(value, 'bar')
def test_copy(self):
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"trim_min": 1,
"trim_max": 6,
"flag_column": [1, 2, 3],
"model": 'LinearRegression'})
config_copy = config.copy()
assert_not_equal(id(config), id(config_copy))
for key in config.keys():
# check to make sure this is a deep copy
if key == "flag_column":
assert_not_equal(id(config[key]), id(config_copy[key]))
assert_equal(config[key], config_copy[key])
def test_copy_not_deep(self):
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"trim_min": 1,
"trim_max": 6,
"flag_column": [1, 2, 3],
"model": 'LinearRegression'})
config_copy = config.copy(deep=False)
assert_not_equal(id(config), id(config_copy))
for key in config.keys():
# check to make sure this is a shallow copy
if key == "flag_column":
assert_equal(id(config[key]), id(config_copy[key]))
assert_equal(config[key], config_copy[key])
def test_check_flag_column(self):
input_dict = {"advisory flag": ['0']}
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column": input_dict,
"model": 'LinearRegression'})
output_dict = config.check_flag_column()
eq_(input_dict, output_dict)
def test_check_flag_column_flag_column_test(self):
input_dict = {"advisory flag": ['0']}
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column_test": input_dict,
"flag_column": input_dict,
"model": 'LinearRegression'})
output_dict = config.check_flag_column("flag_column_test")
eq_(input_dict, output_dict)
def test_check_flag_column_keep_numeric(self):
input_dict = {"advisory flag": [1, 2, 3]}
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column": input_dict,
"model": 'LinearRegression'})
output_dict = config.check_flag_column()
eq_(output_dict, {"advisory flag": [1, 2, 3]})
def test_check_flag_column_no_values(self):
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column": None,
"model": 'LinearRegression'})
flag_dict = config.check_flag_column()
eq_(flag_dict, {})
def test_check_flag_column_convert_to_list(self):
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column": {"advisories": "0"},
"model": 'LinearRegression'})
flag_dict = config.check_flag_column()
eq_(flag_dict, {"advisories": ['0']})
def test_check_flag_column_convert_to_list_test(self):
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column": {"advisories": "0"},
"model": 'LinearRegression'})
flag_dict = self.check_logging_output('evaluating',
config.check_flag_column,
partition='test')
eq_(flag_dict, {"advisories": ['0']})
def test_check_flag_column_convert_to_list_train(self):
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column": {"advisories": "0"},
"model": 'LinearRegression'})
flag_dict = self.check_logging_output('training',
config.check_flag_column,
partition='train')
eq_(flag_dict, {"advisories": ['0']})
def test_check_flag_column_convert_to_list_both(self):
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column": {"advisories": "0"},
"model": 'LinearRegression'})
flag_dict = self.check_logging_output('training and evaluating',
config.check_flag_column,
partition='both')
eq_(flag_dict, {"advisories": ['0']})
def test_check_flag_column_convert_to_list_unknown(self):
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column": {"advisories": "0"},
"model": 'LinearRegression'})
flag_dict = self.check_logging_output('training and/or evaluating',
config.check_flag_column,
partition='unknown')
eq_(flag_dict, {"advisories": ['0']})
@raises(AssertionError)
def test_check_flag_column_convert_to_list_test_error(self):
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column": {"advisories": "0"},
"model": 'LinearRegression'})
self.check_logging_output('training',
config.check_flag_column,
partition='test')
def test_check_flag_column_convert_to_list_keep_numeric(self):
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column": {"advisories": 123},
"model": 'LinearRegression'})
flag_dict = config.check_flag_column()
eq_(flag_dict, {"advisories": [123]})
def test_contains_key(self):
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column": {"advisories": 123},
"model": 'LinearRegression'})
ok_('flag_column' in config, msg="Test 'flag_column' in config.")
def test_does_not_contain_nested_key(self):
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column": {"advisories": 123},
"model": 'LinearRegression'})
eq_('advisories' in config, False)
def test_get_item(self):
expected_item = {"advisories": 123}
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column": expected_item,
"model": 'LinearRegression'})
item = config['flag_column']
eq_(item, expected_item)
def test_set_item(self):
expected_item = ["45", 34]
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column": {"advisories": 123},
"model": 'LinearRegression'})
config['other_column'] = expected_item
eq_(config['other_column'], expected_item)
@raises(ValueError)
def test_check_flag_column_wrong_format(self):
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column": "[advisories]",
"model": 'LinearRegression'})
config.check_flag_column()
@raises(ValueError)
def test_check_flag_column_wrong_partition(self):
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column_test": {"advisories": 123},
"model": 'LinearRegression'})
config.check_flag_column(partition='eval')
@raises(ValueError)
def test_check_flag_column_mismatched_partition(self):
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column_test": {"advisories": 123},
"model": 'LinearRegression'})
config.check_flag_column(flag_column='flag_column_test',
partition='train')
@raises(ValueError)
def test_check_flag_column_mismatched_partition_both(self):
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column_test": {"advisories": 123},
"model": 'LinearRegression'})
config.check_flag_column(flag_column='flag_column_test',
partition='both')
def test_str_correct(self):
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"flag_column": "[advisories]",
"model": 'LinearRegression'})
eq_(config['flag_column'], '[advisories]')
def test_get_configdir(self):
configdir = '/path/to/dir/'
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"trim_min": 1,
"trim_max": 6,
"flag_column": "[advisories]",
"model": 'LinearRegression'},
configdir=configdir)
eq_(config.configdir, abspath(configdir))
def test_set_configdir(self):
configdir = '/path/to/dir/'
new_configdir = 'path/that/is/new/'
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"trim_min": 1,
"trim_max": 6,
"flag_column": "[advisories]",
"model": 'LinearRegression'},
configdir=configdir)
config.configdir = new_configdir
eq_(config.configdir, abspath(new_configdir))
@raises(ValueError)
def test_set_configdir_to_none(self):
configdir = '/path/to/dir/'
config = Configuration({"flag_column": "[advisories]"},
configdir=configdir)
config.configdir = None
def test_get_context(self):
context = 'rsmtool'
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"trim_min": 1,
"trim_max": 6,
"flag_column": "[advisories]",
"model": 'LinearRegression'},
context=context)
eq_(config.context, context)
def test_set_context(self):
context = 'rsmtool'
new_context = 'rsmcompare'
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"trim_min": 1,
"trim_max": 6,
"flag_column": "[advisories]",
"model": 'LinearRegression'},
context=context)
config.context = new_context
eq_(config.context, new_context)
def test_get(self):
config = Configuration({"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"trim_min": 1,
"trim_max": 6,
"flag_column": "[advisories]",
"model": 'LinearRegression'})
eq_(config.get('flag_column'), "[advisories]")
eq_(config.get('fasdfasfasdfa', 'hi'), 'hi')
def test_to_dict(self):
configdict = {"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"trim_min": 1,
"trim_max": 6,
"flag_column": "abc",
"model": 'LinearRegression'}
config = Configuration(configdict)
for key in configdict:
eq_(config[key], configdict[key])
def test_keys(self):
configdict = {"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"trim_min": 1,
"trim_max": 6,
"flag_column": "abc",
"model": 'LinearRegression'}
config = Configuration(configdict)
given_keys = configdict.keys()
computed_keys = config.keys()
assert all([given_key in computed_keys for given_key in given_keys])
def test_values(self):
configdict = {"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"trim_min": 1,
"trim_max": 6,
"flag_column": "abc",
"model": 'LinearRegression'}
config = Configuration(configdict)
given_values = configdict.values()
computed_values = config.values()
assert all([given_value in computed_values for given_value in given_values])
def test_items(self):
configdict = {"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"trim_min": 1,
"trim_max": 6,
"flag_column": "abc",
"model": 'LinearRegression'}
config = Configuration(configdict)
given_items = configdict.items()
computed_items = config.items()
assert all([given_item in computed_items for given_item in given_items])
def test_save(self):
dictionary = {'experiment_id': '001',
'train_file': 'path/to/train.tsv',
'test_file': 'path/to/test.tsv',
"flag_column": "abc",
"model": 'LinearRegression'}
config = Configuration(dictionary)
config.save()
with open('output/001_rsmtool.json', 'r') as buff:
config_new = json.loads(buff.read())
rmtree('output')
for key in dictionary:
eq_(config_new[key], dictionary[key])
def test_save_rsmcompare(self):
dictionary = {"comparison_id": '001',
"experiment_id_old": 'foo',
"experiment_dir_old": 'foo',
"experiment_id_new": 'bar',
"experiment_dir_new": 'bar',
"description_old": "foo",
"description_new": "bar"}
config = Configuration(dictionary,
context='rsmcompare')
config.save()
out_path = 'output/001_rsmcompare.json'
with open(out_path) as buff:
config_new = json.loads(buff.read())
rmtree('output')
for key in dictionary:
eq_(config_new[key], dictionary[key])
def test_save_rsmsummarize(self):
dictionary = {"summary_id": '001', 'experiment_dirs': ['a', 'b', 'c']}
config = Configuration(dictionary,
context='rsmsummarize')
config.save()
out_path = 'output/001_rsmsummarize.json'
with open(out_path) as buff:
config_new = json.loads(buff.read())
rmtree('output')
for key in dictionary:
eq_(config_new[key], dictionary[key])
def test_check_exclude_listwise_true(self):
dictionary = {"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"min_items_per_candidate": 4,
"candidate_column": "candidate",
"model": 'LinearRegression'}
config = Configuration(dictionary)
exclude_list_wise = config.check_exclude_listwise()
eq_(exclude_list_wise, True)
def test_check_exclude_listwise_false(self):
dictionary = {"experiment_id": '001',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"model": 'LinearRegression'}
config = Configuration(dictionary)
exclude_list_wise = config.check_exclude_listwise()
eq_(exclude_list_wise, False)
def test_get_trim_min_max_tolerance_none(self):
dictionary = {'experiment_id': '001',
'id_column': 'A',
'candidate_column': 'B',
'train_file': 'path/to/train.tsv',
'test_file': 'path/to/test.tsv',
'features': 'path/to/features.csv',
"model": 'LinearRegression',
'subgroups': ['C']}
config = Configuration(dictionary)
trim_min_max_tolerance = config.get_trim_min_max_tolerance()
eq_(trim_min_max_tolerance, (None, None, 0.4998))
def test_get_trim_min_max_no_tolerance(self):
dictionary = {"experiment_id": '001',
"trim_min": 1,
"trim_max": 6,
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"model": 'LinearRegression'}
config = Configuration(dictionary)
trim_min_max_tolerance = config.get_trim_min_max_tolerance()
eq_(trim_min_max_tolerance, (1.0, 6.0, 0.4998))
def test_get_trim_min_max_values_tolerance(self):
dictionary = {"experiment_id": '001',
"trim_min": 1,
"trim_max": 6,
"trim_tolerance": 0.51,
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"model": 'LinearRegression'}
config = Configuration(dictionary)
trim_min_max_tolerance = config.get_trim_min_max_tolerance()
eq_(trim_min_max_tolerance, (1.0, 6.0, 0.51))
def test_get_trim_tolerance_no_min_max(self):
dictionary = {"experiment_id": '001',
"trim_tolerance": 0.49,
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv",
"model": 'LinearRegression'}
config = Configuration(dictionary)
trim_min_max_tolerance = config.get_trim_min_max_tolerance()
eq_(trim_min_max_tolerance, (None, None, 0.49))
def test_get_rater_error_variance(self):
dictionary = {"experiment_id": 'abs',
"rater_error_variance": "2.2525",
"model": 'LinearRegression',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv"}
config = Configuration(dictionary)
rater_error_variance = config.get_rater_error_variance()
eq_(rater_error_variance, 2.2525)
def test_get_rater_error_variance_none(self):
dictionary = {"experiment_id": 'abs',
"model": 'LinearRegression',
"train_file": "/foo/train.csv",
"test_file": "/foo/test.csv"}
config = Configuration(dictionary)
rater_error_variance = config.get_rater_error_variance()
eq_(rater_error_variance, None)
def test_get_names_and_paths_with_feature_file(self):
filepaths = ['path/to/train.tsv',
'path/to/test.tsv',
'path/to/features.csv']
filenames = ['train', 'test', 'feature_specs']
expected = (filenames, filepaths)
dictionary = {'experiment_id': '001',
'id_column': 'A',
'candidate_column': 'B',
'train_file': 'path/to/train.tsv',
'test_file': 'path/to/test.tsv',
'features': 'path/to/features.csv',
"model": 'LinearRegression',
'subgroups': ['C']}
config = Configuration(dictionary)
values_for_reader = config.get_names_and_paths(['train_file', 'test_file',
'features'],
['train', 'test',
'feature_specs'])
eq_(values_for_reader, expected)
def test_get_names_and_paths_with_feature_subset(self):
filepaths = ['path/to/train.tsv',
'path/to/test.tsv',
'path/to/feature_subset.csv']
filenames = ['train', 'test', 'feature_subset_specs']
expected = (filenames, filepaths)
dictionary = {'experiment_id': '001',
'id_column': 'A',
'candidate_column': 'B',
'train_file': 'path/to/train.tsv',
'test_file': 'path/to/test.tsv',
'feature_subset_file': 'path/to/feature_subset.csv',
'model': 'LinearRegression',
'subgroups': ['C']}
config = Configuration(dictionary)
values_for_reader = config.get_names_and_paths(['train_file', 'test_file',
'feature_subset_file'],
['train', 'test',
'feature_subset_specs'])
eq_(values_for_reader, expected)
def test_get_names_and_paths_with_feature_list(self):
filepaths = ['path/to/train.tsv',
'path/to/test.tsv']
filenames = ['train', 'test']
expected = (filenames, filepaths)
dictionary = {'experiment_id': '001',
'id_column': 'A',
'candidate_column': 'B',
'train_file': 'path/to/train.tsv',
'test_file': 'path/to/test.tsv',
'features': ['FEATURE1', 'FEATURE2'],
'model': 'LinearRegression',
'subgroups': ['C']}
config = Configuration(dictionary)
values_for_reader = config.get_names_and_paths(['train_file', 'test_file',
'features'],
['train', 'test',
'feature_specs'])
eq_(values_for_reader, expected)
class TestJSONFeatureConversion:
def test_json_feature_conversion(self):
json_feature_file = join(_MY_DIR, 'data', 'experiments',
'lr-feature-json', 'features.json')
expected_feature_csv = join(_MY_DIR, 'data', 'experiments', 'lr', 'features.csv')
# convert the feature json file and write to a temporary location
tempf = tempfile.NamedTemporaryFile(mode='w', suffix='.csv', delete=False)
convert_feature_json_file(json_feature_file, tempf.name, delete=False)
# read the expected and converted files into data frames
df_expected = pd.read_csv(expected_feature_csv)
df_converted = pd.read_csv(tempf.name)
tempf.close()
# get rid of the file now that have read it into memory
os.unlink(tempf.name)
assert_frame_equal(df_expected.sort_index(axis=1),
df_converted.sort_index(axis=1))
@raises(RuntimeError)
def test_json_feature_conversion_bad_json(self):
json_feature_file = join(_MY_DIR, 'data', 'experiments', 'lr-feature-json', 'lr.json')
# convert the feature json file and write to a temporary location
tempf = tempfile.NamedTemporaryFile(mode='w', suffix='.csv', delete=False)
convert_feature_json_file(json_feature_file, tempf.name, delete=False)
@raises(RuntimeError)
def test_json_feature_conversion_bad_output_file(self):
json_feature_file = join(_MY_DIR, 'data', 'experiments',
'lr-feature-json', 'features.json')
# convert the feature json file and write to a temporary location
tempf = tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False)
convert_feature_json_file(json_feature_file, tempf.name, delete=False)
| [
"logging.getLogger",
"logging.StreamHandler",
"nose.tools.eq_",
"pandas.read_csv",
"nose.tools.assert_equal",
"pathlib.Path",
"nose.tools.raises",
"os.unlink",
"tempfile.NamedTemporaryFile",
"io.StringIO",
"numpy.testing.assert_array_equal",
"rsmtool.configuration_parser.ConfigurationParser",
... | [((694, 711), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (701, 711), False, 'from os.path import abspath, dirname, join\n'), ((786, 811), 'nose.tools.raises', 'raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (792, 811), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((951, 966), 'nose.tools.raises', 'raises', (['OSError'], {}), '(OSError)\n', (957, 966), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((1120, 1138), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (1126, 1138), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((2587, 2605), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (2593, 2605), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((2759, 2777), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (2765, 2777), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((3785, 3803), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (3791, 3803), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((4199, 4217), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (4205, 4217), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((4542, 4560), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (4548, 4560), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((4900, 4918), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (4906, 4918), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((5311, 5329), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (5317, 5329), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((5722, 5740), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (5728, 5740), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((6332, 6350), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (6338, 6350), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((6957, 6975), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (6963, 6975), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((7640, 7658), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (7646, 7658), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((7882, 7900), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (7888, 7900), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((8393, 8411), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (8399, 8411), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((8731, 8749), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (8737, 8749), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((10286, 10304), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (10292, 10304), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((10723, 10741), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (10729, 10741), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((11254, 11272), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (11260, 11272), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((11740, 11758), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (11746, 11758), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((14120, 14138), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (14126, 14138), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((14692, 14710), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (14698, 14710), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((15937, 15955), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (15943, 15955), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((16351, 16369), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (16357, 16369), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((16779, 16797), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (16785, 16797), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((19424, 19441), 'nose.tools.raises', 'raises', (['TypeError'], {}), '(TypeError)\n', (19430, 19441), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((27779, 27801), 'nose.tools.raises', 'raises', (['AssertionError'], {}), '(AssertionError)\n', (27785, 27801), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((30571, 30589), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (30577, 30589), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((30990, 31008), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (30996, 31008), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((31438, 31456), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (31444, 31456), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((31957, 31975), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (31963, 31975), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((34112, 34130), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (34118, 34130), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((47998, 48018), 'nose.tools.raises', 'raises', (['RuntimeError'], {}), '(RuntimeError)\n', (48004, 48018), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((48410, 48430), 'nose.tools.raises', 'raises', (['RuntimeError'], {}), '(RuntimeError)\n', (48416, 48430), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((906, 944), 'rsmtool.configuration_parser.ConfigurationParser', 'ConfigurationParser', (['non_existent_file'], {}), '(non_existent_file)\n', (925, 944), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((1616, 1635), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['data'], {}), '(data)\n', (1629, 1635), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((1644, 1691), 'nose.tools.assert_equal', 'assert_equal', (["newdata['id_column']", '"""spkitemid"""'], {}), "(newdata['id_column'], 'spkitemid')\n", (1656, 1691), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((1700, 1754), 'nose.tools.assert_equal', 'assert_equal', (["newdata['use_scaled_predictions']", '(False)'], {}), "(newdata['use_scaled_predictions'], False)\n", (1712, 1754), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((1763, 1817), 'nose.tools.assert_equal', 'assert_equal', (["newdata['select_transformations']", '(False)'], {}), "(newdata['select_transformations'], False)\n", (1775, 1817), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((1826, 1882), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["newdata['general_sections']", "['all']"], {}), "(newdata['general_sections'], ['all'])\n", (1844, 1882), False, 'from numpy.testing import assert_array_equal\n'), ((1891, 1931), 'nose.tools.assert_equal', 'assert_equal', (["newdata['description']", '""""""'], {}), "(newdata['description'], '')\n", (1903, 1931), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((2322, 2360), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['data'], {'context': '"""rsmeval"""'}), "(data, context='rsmeval')\n", (2335, 2360), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((2369, 2416), 'nose.tools.assert_equal', 'assert_equal', (["newdata['id_column']", '"""spkitemid"""'], {}), "(newdata['id_column'], 'spkitemid')\n", (2381, 2416), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((2425, 2481), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["newdata['general_sections']", "['all']"], {}), "(newdata['general_sections'], ['all'])\n", (2443, 2481), False, 'from numpy.testing import assert_array_equal\n'), ((2490, 2530), 'nose.tools.assert_equal', 'assert_equal', (["newdata['description']", '""""""'], {}), "(newdata['description'], '')\n", (2502, 2530), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((2711, 2752), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {}), '(data)\n', (2746, 2752), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((3116, 3157), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {}), '(data)\n', (3151, 3157), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((3441, 3482), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {}), '(data)\n', (3476, 3482), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((3491, 3538), 'nose.tools.assert_equal', 'assert_equal', (["newdata['id_column']", '"""spkitemid"""'], {}), "(newdata['id_column'], 'spkitemid')\n", (3503, 3538), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((3547, 3601), 'nose.tools.assert_equal', 'assert_equal', (["newdata['use_scaled_predictions']", '(False)'], {}), "(newdata['use_scaled_predictions'], False)\n", (3559, 3601), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((3610, 3664), 'nose.tools.assert_equal', 'assert_equal', (["newdata['select_transformations']", '(False)'], {}), "(newdata['select_transformations'], False)\n", (3622, 3664), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((3673, 3729), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["newdata['general_sections']", "['all']"], {}), "(newdata['general_sections'], ['all'])\n", (3691, 3729), False, 'from numpy.testing import assert_array_equal\n'), ((3738, 3778), 'nose.tools.assert_equal', 'assert_equal', (["newdata['description']", '""""""'], {}), "(newdata['description'], '')\n", (3750, 3778), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((4151, 4192), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {}), '(data)\n', (4186, 4192), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((4494, 4535), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {}), '(data)\n', (4529, 4535), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((4833, 4893), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {'context': '"""rsmeval"""'}), "(data, context='rsmeval')\n", (4868, 4893), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((5241, 5304), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {'context': '"""rsmcompare"""'}), "(data, context='rsmcompare')\n", (5276, 5304), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((5652, 5715), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {'context': '"""rsmcompare"""'}), "(data, context='rsmcompare')\n", (5687, 5715), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((6284, 6325), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {}), '(data)\n', (6319, 6325), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((6890, 6950), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {'context': '"""rsmeval"""'}), "(data, context='rsmeval')\n", (6925, 6950), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((7570, 7633), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {'context': '"""rsmcompare"""'}), "(data, context='rsmcompare')\n", (7605, 7633), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((7810, 7875), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {'context': '"""rsmsummarize"""'}), "(data, context='rsmsummarize')\n", (7845, 7875), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((8321, 8386), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {'context': '"""rsmsummarize"""'}), "(data, context='rsmsummarize')\n", (8356, 8386), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((8659, 8724), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {'context': '"""rsmsummarize"""'}), "(data, context='rsmsummarize')\n", (8694, 8724), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((8980, 9045), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {'context': '"""rsmsummarize"""'}), "(data, context='rsmsummarize')\n", (9015, 9045), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((9420, 9461), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {}), '(data)\n', (9455, 9461), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((9522, 9573), 'nose.tools.assert_equal', 'assert_equal', (["newdata['min_n_per_group']['L1']", '(100)'], {}), "(newdata['min_n_per_group']['L1'], 100)\n", (9534, 9573), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((9582, 9633), 'nose.tools.assert_equal', 'assert_equal', (["newdata['min_n_per_group']['L2']", '(100)'], {}), "(newdata['min_n_per_group']['L2'], 100)\n", (9594, 9633), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((10066, 10107), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {}), '(data)\n', (10101, 10107), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((10168, 10219), 'nose.tools.assert_equal', 'assert_equal', (["newdata['min_n_per_group']['L1']", '(100)'], {}), "(newdata['min_n_per_group']['L1'], 100)\n", (10180, 10219), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((10228, 10279), 'nose.tools.assert_equal', 'assert_equal', (["newdata['min_n_per_group']['L2']", '(200)'], {}), "(newdata['min_n_per_group']['L2'], 200)\n", (10240, 10279), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((10675, 10716), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {}), '(data)\n', (10710, 10716), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((11206, 11247), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {}), '(data)\n', (11241, 11247), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((11692, 11733), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {}), '(data)\n', (11727, 11733), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((12134, 12175), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {}), '(data)\n', (12169, 12175), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((13827, 13867), 'rsmtool.configuration_parser.ConfigurationParser.process_config', 'ConfigurationParser.process_config', (['data'], {}), '(data)\n', (13861, 13867), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((13876, 13950), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["newdata['subgroups']", "['native language', 'GPA_range']"], {}), "(newdata['subgroups'], ['native language', 'GPA_range'])\n", (13894, 13950), False, 'from numpy.testing import assert_array_equal\n'), ((14018, 14062), 'nose.tools.eq_', 'eq_', (["newdata['use_scaled_predictions']", '(True)'], {}), "(newdata['use_scaled_predictions'], True)\n", (14021, 14062), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((14071, 14113), 'nose.tools.eq_', 'eq_', (["newdata['exclude_zero_scores']", '(False)'], {}), "(newdata['exclude_zero_scores'], False)\n", (14074, 14113), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((14645, 14685), 'rsmtool.configuration_parser.ConfigurationParser.process_config', 'ConfigurationParser.process_config', (['data'], {}), '(data)\n', (14679, 14685), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((15209, 15249), 'rsmtool.configuration_parser.ConfigurationParser.process_config', 'ConfigurationParser.process_config', (['data'], {}), '(data)\n', (15243, 15249), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((15485, 15525), 'rsmtool.configuration_parser.ConfigurationParser.process_config', 'ConfigurationParser.process_config', (['data'], {}), '(data)\n', (15519, 15525), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((15535, 15626), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["newdata['experiment_dirs']", "['home/dir1', 'home/dir2', 'home/dir3']"], {}), "(newdata['experiment_dirs'], ['home/dir1', 'home/dir2',\n 'home/dir3'])\n", (15553, 15626), False, 'from numpy.testing import assert_array_equal\n'), ((15743, 15816), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["newdata['experiment_names']", "['exp1', 'exp2', 'exp3']"], {}), "(newdata['experiment_names'], ['exp1', 'exp2', 'exp3'])\n", (15761, 15816), False, 'from numpy.testing import assert_array_equal\n'), ((16303, 16344), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {}), '(data)\n', (16338, 16344), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((16731, 16772), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {}), '(data)\n', (16766, 16772), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((17151, 17192), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {}), '(data)\n', (17186, 17192), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((17565, 17584), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['data'], {}), '(data)\n', (17578, 17584), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((17593, 17645), 'nose.tools.eq_', 'eq_', (["configuration['predict_expected_scores']", '(False)'], {}), "(configuration['predict_expected_scores'], False)\n", (17596, 17645), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((17946, 17989), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['data'], {'context': '"""rsmsummarize"""'}), "(data, context='rsmsummarize')\n", (17959, 17989), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((17998, 18095), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["configuration['experiment_dirs']", "['home/dir1', 'home/dir2', 'home/dir3']"], {}), "(configuration['experiment_dirs'], ['home/dir1',\n 'home/dir2', 'home/dir3'])\n", (18016, 18095), False, 'from numpy.testing import assert_array_equal\n'), ((18224, 18303), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["configuration['experiment_names']", "['exp1', 'exp2', 'exp3']"], {}), "(configuration['experiment_names'], ['exp1', 'exp2', 'exp3'])\n", (18242, 18303), False, 'from numpy.testing import assert_array_equal\n'), ((18640, 18666), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['config_dict'], {}), '(config_dict)\n', (18653, 18666), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((18826, 18892), 'nose.tools.eq_', 'eq_', (["config._config['experiment_id']", "config_dict['experiment_id']"], {}), "(config._config['experiment_id'], config_dict['experiment_id'])\n", (18829, 18892), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((19280, 19327), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['config_dict'], {'configdir': 'configdir'}), '(config_dict, configdir=configdir)\n', (19293, 19327), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((19612, 19639), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['config_input'], {}), '(config_input)\n', (19625, 19639), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((19810, 19829), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (19827, 19829), False, 'import logging\n'), ((20833, 20858), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['dictionary'], {}), '(dictionary)\n', (20846, 20858), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((20911, 20928), 'nose.tools.eq_', 'eq_', (['value', '"""001"""'], {}), "(value, '001')\n", (20914, 20928), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((21194, 21219), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['dictionary'], {}), '(dictionary)\n', (21207, 21219), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((21269, 21286), 'nose.tools.eq_', 'eq_', (['value', '"""bar"""'], {}), "(value, 'bar')\n", (21272, 21286), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((21330, 21524), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'trim_min': 1, 'trim_max': 6, 'flag_column': [1, 2, 3],\n 'model': 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'trim_min': 1, 'trim_max': 6,\n 'flag_column': [1, 2, 3], 'model': 'LinearRegression'})\n", (21343, 21524), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((22105, 22299), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'trim_min': 1, 'trim_max': 6, 'flag_column': [1, 2, 3],\n 'model': 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'trim_min': 1, 'trim_max': 6,\n 'flag_column': [1, 2, 3], 'model': 'LinearRegression'})\n", (22118, 22299), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((22939, 23104), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column': input_dict, 'model': 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column': input_dict, 'model':\n 'LinearRegression'})\n", (22952, 23104), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((23283, 23311), 'nose.tools.eq_', 'eq_', (['input_dict', 'output_dict'], {}), '(input_dict, output_dict)\n', (23286, 23311), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((23431, 23628), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column_test': input_dict, 'flag_column':\n input_dict, 'model': 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column_test': input_dict,\n 'flag_column': input_dict, 'model': 'LinearRegression'})\n", (23444, 23628), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((23857, 23885), 'nose.tools.eq_', 'eq_', (['input_dict', 'output_dict'], {}), '(input_dict, output_dict)\n', (23860, 23885), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((24005, 24170), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column': input_dict, 'model': 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column': input_dict, 'model':\n 'LinearRegression'})\n", (24018, 24170), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((24349, 24395), 'nose.tools.eq_', 'eq_', (['output_dict', "{'advisory flag': [1, 2, 3]}"], {}), "(output_dict, {'advisory flag': [1, 2, 3]})\n", (24352, 24395), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((24462, 24621), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column': None, 'model': 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column': None, 'model':\n 'LinearRegression'})\n", (24475, 24621), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((24798, 24816), 'nose.tools.eq_', 'eq_', (['flag_dict', '{}'], {}), '(flag_dict, {})\n', (24801, 24816), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((24889, 25063), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column': {'advisories': '0'}, 'model':\n 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column': {'advisories': '0'},\n 'model': 'LinearRegression'})\n", (24902, 25063), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((25240, 25277), 'nose.tools.eq_', 'eq_', (['flag_dict', "{'advisories': ['0']}"], {}), "(flag_dict, {'advisories': ['0']})\n", (25243, 25277), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((25355, 25529), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column': {'advisories': '0'}, 'model':\n 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column': {'advisories': '0'},\n 'model': 'LinearRegression'})\n", (25368, 25529), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((25855, 25892), 'nose.tools.eq_', 'eq_', (['flag_dict', "{'advisories': ['0']}"], {}), "(flag_dict, {'advisories': ['0']})\n", (25858, 25892), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((25971, 26145), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column': {'advisories': '0'}, 'model':\n 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column': {'advisories': '0'},\n 'model': 'LinearRegression'})\n", (25984, 26145), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((26470, 26507), 'nose.tools.eq_', 'eq_', (['flag_dict', "{'advisories': ['0']}"], {}), "(flag_dict, {'advisories': ['0']})\n", (26473, 26507), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((26585, 26759), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column': {'advisories': '0'}, 'model':\n 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column': {'advisories': '0'},\n 'model': 'LinearRegression'})\n", (26598, 26759), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((27098, 27135), 'nose.tools.eq_', 'eq_', (['flag_dict', "{'advisories': ['0']}"], {}), "(flag_dict, {'advisories': ['0']})\n", (27101, 27135), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((27216, 27390), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column': {'advisories': '0'}, 'model':\n 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column': {'advisories': '0'},\n 'model': 'LinearRegression'})\n", (27229, 27390), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((27735, 27772), 'nose.tools.eq_', 'eq_', (['flag_dict', "{'advisories': ['0']}"], {}), "(flag_dict, {'advisories': ['0']})\n", (27738, 27772), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((27884, 28058), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column': {'advisories': '0'}, 'model':\n 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column': {'advisories': '0'},\n 'model': 'LinearRegression'})\n", (27897, 28058), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((28423, 28597), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column': {'advisories': 123}, 'model':\n 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column': {'advisories': 123},\n 'model': 'LinearRegression'})\n", (28436, 28597), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((28774, 28811), 'nose.tools.eq_', 'eq_', (['flag_dict', "{'advisories': [123]}"], {}), "(flag_dict, {'advisories': [123]})\n", (28777, 28811), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((28863, 29037), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column': {'advisories': 123}, 'model':\n 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column': {'advisories': 123},\n 'model': 'LinearRegression'})\n", (28876, 29037), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((29167, 29232), 'nose.tools.ok_', 'ok_', (["('flag_column' in config)"], {'msg': '"""Test \'flag_column\' in config."""'}), '(\'flag_column\' in config, msg="Test \'flag_column\' in config.")\n', (29170, 29232), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((29299, 29473), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column': {'advisories': 123}, 'model':\n 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column': {'advisories': 123},\n 'model': 'LinearRegression'})\n", (29312, 29473), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((29603, 29637), 'nose.tools.eq_', 'eq_', (["('advisories' in config)", '(False)'], {}), "('advisories' in config, False)\n", (29606, 29637), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((29729, 29897), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column': expected_item, 'model': 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column': expected_item, 'model':\n 'LinearRegression'})\n", (29742, 29897), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((30064, 30088), 'nose.tools.eq_', 'eq_', (['item', 'expected_item'], {}), '(item, expected_item)\n', (30067, 30088), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((30171, 30345), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column': {'advisories': 123}, 'model':\n 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column': {'advisories': 123},\n 'model': 'LinearRegression'})\n", (30184, 30345), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((30522, 30564), 'nose.tools.eq_', 'eq_', (["config['other_column']", 'expected_item'], {}), "(config['other_column'], expected_item)\n", (30525, 30564), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((30658, 30827), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column': '[advisories]', 'model': 'LinearRegression'\n }"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column': '[advisories]', 'model':\n 'LinearRegression'})\n", (30671, 30827), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((31080, 31259), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column_test': {'advisories': 123}, 'model':\n 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column_test': {'advisories': 123},\n 'model': 'LinearRegression'})\n", (31093, 31259), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((31533, 31712), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column_test': {'advisories': 123}, 'model':\n 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column_test': {'advisories': 123},\n 'model': 'LinearRegression'})\n", (31546, 31712), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((32057, 32236), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column_test': {'advisories': 123}, 'model':\n 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column_test': {'advisories': 123},\n 'model': 'LinearRegression'})\n", (32070, 32236), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((32524, 32693), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'flag_column': '[advisories]', 'model': 'LinearRegression'\n }"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'flag_column': '[advisories]', 'model':\n 'LinearRegression'})\n", (32537, 32693), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((32823, 32865), 'nose.tools.eq_', 'eq_', (["config['flag_column']", '"""[advisories]"""'], {}), "(config['flag_column'], '[advisories]')\n", (32826, 32865), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((32954, 33179), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'trim_min': 1, 'trim_max': 6, 'flag_column':\n '[advisories]', 'model': 'LinearRegression'}"], {'configdir': 'configdir'}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'trim_min': 1, 'trim_max': 6,\n 'flag_column': '[advisories]', 'model': 'LinearRegression'}, configdir=\n configdir)\n", (32967, 33179), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((33574, 33799), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'trim_min': 1, 'trim_max': 6, 'flag_column':\n '[advisories]', 'model': 'LinearRegression'}"], {'configdir': 'configdir'}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'trim_min': 1, 'trim_max': 6,\n 'flag_column': '[advisories]', 'model': 'LinearRegression'}, configdir=\n configdir)\n", (33587, 33799), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((34226, 34293), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'flag_column': '[advisories]'}"], {'configdir': 'configdir'}), "({'flag_column': '[advisories]'}, configdir=configdir)\n", (34239, 34293), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((34437, 34658), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'trim_min': 1, 'trim_max': 6, 'flag_column':\n '[advisories]', 'model': 'LinearRegression'}"], {'context': 'context'}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'trim_min': 1, 'trim_max': 6,\n 'flag_column': '[advisories]', 'model': 'LinearRegression'}, context=\n context)\n", (34450, 34658), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((34877, 34905), 'nose.tools.eq_', 'eq_', (['config.context', 'context'], {}), '(config.context, context)\n', (34880, 34905), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((35019, 35240), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'trim_min': 1, 'trim_max': 6, 'flag_column':\n '[advisories]', 'model': 'LinearRegression'}"], {'context': 'context'}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'trim_min': 1, 'trim_max': 6,\n 'flag_column': '[advisories]', 'model': 'LinearRegression'}, context=\n context)\n", (35032, 35240), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((35496, 35528), 'nose.tools.eq_', 'eq_', (['config.context', 'new_context'], {}), '(config.context, new_context)\n', (35499, 35528), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((35571, 35770), 'rsmtool.configuration_parser.Configuration', 'Configuration', (["{'experiment_id': '001', 'train_file': '/foo/train.csv', 'test_file':\n '/foo/test.csv', 'trim_min': 1, 'trim_max': 6, 'flag_column':\n '[advisories]', 'model': 'LinearRegression'}"], {}), "({'experiment_id': '001', 'train_file': '/foo/train.csv',\n 'test_file': '/foo/test.csv', 'trim_min': 1, 'trim_max': 6,\n 'flag_column': '[advisories]', 'model': 'LinearRegression'})\n", (35584, 35770), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((36432, 36457), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['configdict'], {}), '(configdict)\n', (36445, 36457), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((36900, 36925), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['configdict'], {}), '(configdict)\n', (36913, 36925), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((37447, 37472), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['configdict'], {}), '(configdict)\n', (37460, 37472), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((38009, 38034), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['configdict'], {}), '(configdict)\n', (38022, 38034), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((38494, 38519), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['dictionary'], {}), '(dictionary)\n', (38507, 38519), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((38660, 38676), 'shutil.rmtree', 'rmtree', (['"""output"""'], {}), "('output')\n", (38666, 38676), False, 'from shutil import rmtree\n'), ((39156, 39203), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['dictionary'], {'context': '"""rsmcompare"""'}), "(dictionary, context='rsmcompare')\n", (39169, 39203), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((39400, 39416), 'shutil.rmtree', 'rmtree', (['"""output"""'], {}), "('output')\n", (39406, 39416), False, 'from shutil import rmtree\n'), ((39633, 39682), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['dictionary'], {'context': '"""rsmsummarize"""'}), "(dictionary, context='rsmsummarize')\n", (39646, 39682), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((39881, 39897), 'shutil.rmtree', 'rmtree', (['"""output"""'], {}), "('output')\n", (39887, 39897), False, 'from shutil import rmtree\n'), ((40355, 40380), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['dictionary'], {}), '(dictionary)\n', (40368, 40380), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((40449, 40477), 'nose.tools.eq_', 'eq_', (['exclude_list_wise', '(True)'], {}), '(exclude_list_wise, True)\n', (40452, 40477), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((40748, 40773), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['dictionary'], {}), '(dictionary)\n', (40761, 40773), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((40842, 40871), 'nose.tools.eq_', 'eq_', (['exclude_list_wise', '(False)'], {}), '(exclude_list_wise, False)\n', (40845, 40871), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((41339, 41364), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['dictionary'], {}), '(dictionary)\n', (41352, 41364), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((41442, 41491), 'nose.tools.eq_', 'eq_', (['trim_min_max_tolerance', '(None, None, 0.4998)'], {}), '(trim_min_max_tolerance, (None, None, 0.4998))\n', (41445, 41491), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((41837, 41862), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['dictionary'], {}), '(dictionary)\n', (41850, 41862), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((41940, 41987), 'nose.tools.eq_', 'eq_', (['trim_min_max_tolerance', '(1.0, 6.0, 0.4998)'], {}), '(trim_min_max_tolerance, (1.0, 6.0, 0.4998))\n', (41943, 41987), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((42383, 42408), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['dictionary'], {}), '(dictionary)\n', (42396, 42408), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((42486, 42531), 'nose.tools.eq_', 'eq_', (['trim_min_max_tolerance', '(1.0, 6.0, 0.51)'], {}), '(trim_min_max_tolerance, (1.0, 6.0, 0.51))\n', (42489, 42531), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((42849, 42874), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['dictionary'], {}), '(dictionary)\n', (42862, 42874), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((42952, 42999), 'nose.tools.eq_', 'eq_', (['trim_min_max_tolerance', '(None, None, 0.49)'], {}), '(trim_min_max_tolerance, (None, None, 0.49))\n', (42955, 42999), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((43322, 43347), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['dictionary'], {}), '(dictionary)\n', (43335, 43347), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((43421, 43454), 'nose.tools.eq_', 'eq_', (['rater_error_variance', '(2.2525)'], {}), '(rater_error_variance, 2.2525)\n', (43424, 43454), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((43726, 43751), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['dictionary'], {}), '(dictionary)\n', (43739, 43751), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((43825, 43856), 'nose.tools.eq_', 'eq_', (['rater_error_variance', 'None'], {}), '(rater_error_variance, None)\n', (43828, 43856), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((44557, 44582), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['dictionary'], {}), '(dictionary)\n', (44570, 44582), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((44890, 44922), 'nose.tools.eq_', 'eq_', (['values_for_reader', 'expected'], {}), '(values_for_reader, expected)\n', (44893, 44922), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((45655, 45680), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['dictionary'], {}), '(dictionary)\n', (45668, 45680), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((46006, 46038), 'nose.tools.eq_', 'eq_', (['values_for_reader', 'expected'], {}), '(values_for_reader, expected)\n', (46009, 46038), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((46679, 46704), 'rsmtool.configuration_parser.Configuration', 'Configuration', (['dictionary'], {}), '(dictionary)\n', (46692, 46704), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((47012, 47044), 'nose.tools.eq_', 'eq_', (['values_for_reader', 'expected'], {}), '(values_for_reader, expected)\n', (47015, 47044), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((47153, 47225), 'os.path.join', 'join', (['_MY_DIR', '"""data"""', '"""experiments"""', '"""lr-feature-json"""', '"""features.json"""'], {}), "(_MY_DIR, 'data', 'experiments', 'lr-feature-json', 'features.json')\n", (47157, 47225), False, 'from os.path import abspath, dirname, join\n'), ((47290, 47348), 'os.path.join', 'join', (['_MY_DIR', '"""data"""', '"""experiments"""', '"""lr"""', '"""features.csv"""'], {}), "(_MY_DIR, 'data', 'experiments', 'lr', 'features.csv')\n", (47294, 47348), False, 'from os.path import abspath, dirname, join\n'), ((47440, 47506), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'suffix': '""".csv"""', 'delete': '(False)'}), "(mode='w', suffix='.csv', delete=False)\n", (47467, 47506), False, 'import tempfile\n'), ((47515, 47585), 'rsmtool.convert_feature_json.convert_feature_json_file', 'convert_feature_json_file', (['json_feature_file', 'tempf.name'], {'delete': '(False)'}), '(json_feature_file, tempf.name, delete=False)\n', (47540, 47585), False, 'from rsmtool.convert_feature_json import convert_feature_json_file\n'), ((47674, 47707), 'pandas.read_csv', 'pd.read_csv', (['expected_feature_csv'], {}), '(expected_feature_csv)\n', (47685, 47707), True, 'import pandas as pd\n'), ((47731, 47754), 'pandas.read_csv', 'pd.read_csv', (['tempf.name'], {}), '(tempf.name)\n', (47742, 47754), True, 'import pandas as pd\n'), ((47850, 47871), 'os.unlink', 'os.unlink', (['tempf.name'], {}), '(tempf.name)\n', (47859, 47871), False, 'import os\n'), ((48100, 48166), 'os.path.join', 'join', (['_MY_DIR', '"""data"""', '"""experiments"""', '"""lr-feature-json"""', '"""lr.json"""'], {}), "(_MY_DIR, 'data', 'experiments', 'lr-feature-json', 'lr.json')\n", (48104, 48166), False, 'from os.path import abspath, dirname, join\n'), ((48258, 48324), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'suffix': '""".csv"""', 'delete': '(False)'}), "(mode='w', suffix='.csv', delete=False)\n", (48285, 48324), False, 'import tempfile\n'), ((48333, 48403), 'rsmtool.convert_feature_json.convert_feature_json_file', 'convert_feature_json_file', (['json_feature_file', 'tempf.name'], {'delete': '(False)'}), '(json_feature_file, tempf.name, delete=False)\n', (48358, 48403), False, 'from rsmtool.convert_feature_json import convert_feature_json_file\n'), ((48519, 48591), 'os.path.join', 'join', (['_MY_DIR', '"""data"""', '"""experiments"""', '"""lr-feature-json"""', '"""features.json"""'], {}), "(_MY_DIR, 'data', 'experiments', 'lr-feature-json', 'features.json')\n", (48523, 48591), False, 'from os.path import abspath, dirname, join\n'), ((48716, 48782), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'suffix': '""".txt"""', 'delete': '(False)'}), "(mode='w', suffix='.txt', delete=False)\n", (48743, 48782), False, 'import tempfile\n'), ((48791, 48861), 'rsmtool.convert_feature_json.convert_feature_json_file', 'convert_feature_json_file', (['json_feature_file', 'tempf.name'], {'delete': '(False)'}), '(json_feature_file, tempf.name, delete=False)\n', (48816, 48861), False, 'from rsmtool.convert_feature_json import convert_feature_json_file\n'), ((1031, 1060), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1058, 1060), False, 'import tempfile\n'), ((1087, 1113), 'rsmtool.configuration_parser.ConfigurationParser', 'ConfigurationParser', (['tempd'], {}), '(tempd)\n', (1106, 1113), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((1191, 1233), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".txt"""'}), "(suffix='.txt')\n", (1218, 1233), False, 'import tempfile\n'), ((1260, 1291), 'rsmtool.configuration_parser.ConfigurationParser', 'ConfigurationParser', (['tempf.name'], {}), '(tempf.name)\n', (1279, 1291), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((1972, 1980), 'os.getcwd', 'getcwd', ([], {}), '()\n', (1978, 1980), False, 'from os import getcwd\n'), ((2571, 2579), 'os.getcwd', 'getcwd', ([], {}), '()\n', (2577, 2579), False, 'from os import getcwd\n'), ((12569, 12605), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (12592, 12605), False, 'import warnings\n'), ((12639, 12680), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {}), '(data)\n', (12674, 12680), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((13228, 13264), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (13251, 13264), False, 'import warnings\n'), ((13298, 13339), 'rsmtool.configuration_parser.ConfigurationParser.validate_config', 'ConfigurationParser.validate_config', (['data'], {}), '(data)\n', (13333, 13339), False, 'from rsmtool.configuration_parser import Configuration, ConfigurationParser\n'), ((18775, 18817), 'nose.tools.eq_', 'eq_', (['config._config[key]', 'config_dict[key]'], {}), '(config._config[key], config_dict[key])\n', (18778, 18817), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((19843, 19853), 'io.StringIO', 'StringIO', ([], {}), '()\n', (19851, 19853), False, 'from io import StringIO\n'), ((19926, 19958), 'logging.StreamHandler', 'logging.StreamHandler', (['string_io'], {}), '(string_io)\n', (19947, 19958), False, 'import logging\n'), ((22009, 22052), 'nose.tools.assert_equal', 'assert_equal', (['config[key]', 'config_copy[key]'], {}), '(config[key], config_copy[key])\n', (22021, 22052), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((22793, 22836), 'nose.tools.assert_equal', 'assert_equal', (['config[key]', 'config_copy[key]'], {}), '(config[key], config_copy[key])\n', (22805, 22836), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((33421, 33439), 'os.path.abspath', 'abspath', (['configdir'], {}), '(configdir)\n', (33428, 33439), False, 'from os.path import abspath, dirname, join\n'), ((34082, 34104), 'os.path.abspath', 'abspath', (['new_configdir'], {}), '(new_configdir)\n', (34089, 34104), False, 'from os.path import abspath, dirname, join\n'), ((36501, 36534), 'nose.tools.eq_', 'eq_', (['config[key]', 'configdict[key]'], {}), '(config[key], configdict[key])\n', (36504, 36534), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((38720, 38757), 'nose.tools.eq_', 'eq_', (['config_new[key]', 'dictionary[key]'], {}), '(config_new[key], dictionary[key])\n', (38723, 38757), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((39460, 39497), 'nose.tools.eq_', 'eq_', (['config_new[key]', 'dictionary[key]'], {}), '(config_new[key], dictionary[key])\n', (39463, 39497), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((39941, 39978), 'nose.tools.eq_', 'eq_', (['config_new[key]', 'dictionary[key]'], {}), '(config_new[key], dictionary[key])\n', (39944, 39978), False, 'from nose.tools import assert_equal, assert_not_equal, eq_, ok_, raises\n'), ((18931, 18939), 'os.getcwd', 'getcwd', ([], {}), '()\n', (18937, 18939), False, 'from os import getcwd\n'), ((19390, 19405), 'pathlib.Path', 'Path', (['configdir'], {}), '(configdir)\n', (19394, 19405), False, 'from pathlib import Path\n')] |
# Copyright 2017 <NAME> Society
# Distributed under the BSD-3 Software license,
# (See accompanying file ./LICENSE.txt or copy at
# https://opensource.org/licenses/BSD-3-Clause)
"""
Wasserstein Auto-Encoder models
"""
import sys
import time
import os
import logging
from math import sqrt, cos, sin, pi
import numpy as np
import tensorflow as tf
import ops
import utils
from priors import init_gaussian_prior, init_cat_prior
from sampling_functions import sample_mixtures, sample_pz, generate_linespace
from loss_functions import matching_penalty, reconstruction_loss, moments_loss
from supervised_functions import accuracy, get_mean_probs, relabelling_mask_from_probs, one_hot
from plot_functions import save_train, save_vizu
from model_nn import label_encoder, cat_encoder, gaussian_encoder
from model_nn import continuous_decoder, discrete_decoder
from datahandler import datashapes
import pdb
class WAE(object):
def __init__(self, opts):
logging.error('Building the Tensorflow Graph')
# --- Create session
self.sess = tf.Session()
self.opts = opts
# --- Some of the parameters for future use
assert opts['dataset'] in datashapes, 'Unknown dataset.'
self.data_shape = datashapes[opts['dataset']]
# --- Placeholders
self.add_model_placeholders()
self.add_training_placeholders()
sample_size = tf.shape(self.u_points,out_type=tf.int64)[0]
range = tf.range(sample_size)
zero = tf.zeros([tf.cast(sample_size,dtype=tf.int32)],dtype=tf.int64)
# --- Initialize prior parameters
self.pz_mean, self.pz_sigma = init_gaussian_prior(opts)
self.pi0 = init_cat_prior(opts)
# --- Encoding inputs
probs_logit = label_encoder(self.opts, self.u_points, False,
self.is_training)
self.probs = ops.softmax(probs_logit,axis=-1)
logit_pi, self.u_enc_mean, self.u_enc_logSigma = self.encoder(
self.u_points,
False)
log_Zpi = ops.log_sum_exp(logit_pi,axis=-1,keepdims=True)
logit = logit_pi - log_Zpi \
+ tf.expand_dims(probs_logit,axis=-1)
u_logit = ops.log_sum_exp(logit,axis=1,keepdims=False)
#self.u_pi = ops.softmax(u_logit,axis=-1)
u_pi = tf.multiply(ops.softmax(logit_pi,axis=-1),tf.expand_dims(self.probs,axis=-1))
self.u_pi = tf.reduce_sum(u_pi,axis=1,keepdims=False)
logit_pi, self.l_enc_mean, self.l_enc_logSigma = self.encoder(
self.l_points,
True)
idx_label = tf.stack([range,self.l_labels], axis=-1)
logit = tf.gather_nd(logit_pi,idx_label)
self.l_pi = ops.softmax(logit,axis=-1)
# --- Sampling from encoded MoG prior
self.u_mixtures_encoded = sample_mixtures(opts, self.u_enc_mean,
tf.exp(self.u_enc_logSigma),
sample_size,'tensorflow')
self.l_mixtures_encoded = sample_mixtures(opts, self.l_enc_mean,
tf.exp(self.l_enc_logSigma),
sample_size,'tensorflow')
# --- Decoding encoded points (i.e. reconstruct)
self.u_reconstructed, self.u_reconstructed_logits = self.decoder(
self.u_mixtures_encoded,
False)
self.l_reconstructed, self.l_reconstructed_logits = self.decoder(
self.l_mixtures_encoded,
True)
self.labels_reconstructed, self.labels_reconstructed_logits = discrete_decoder(
opts,
self.label_noise,
False,
self.is_training)
# --- Reconstructing inputs (only for visualization)
idx = tf.reshape(tf.multinomial(tf.nn.log_softmax(u_logit),1),[-1])
mix_idx = tf.stack([range,idx],axis=-1)
self.encoded_point = tf.gather_nd(self.u_mixtures_encoded,mix_idx)
self.reconstructed_point = tf.gather_nd(self.u_reconstructed,mix_idx)
self.reconstructed_logit = tf.gather_nd(self.u_reconstructed_logits,mix_idx)
# --- Sampling from model (only for generation)
self.decoded, self.decoded_logits = self.decoder(self.sample_noise,
True)
# --- Objectives, losses, penalties, pretraining
# Compute reconstruction cost
self.l_loss_reconstruct = reconstruction_loss(opts, self.l_pi,
self.l_points,
self.l_reconstructed,
self.l_labels,
tf.argmax(self.labels_reconstructed,axis=-1))
self.u_loss_reconstruct = reconstruction_loss(opts, self.u_pi,
self.u_points,
self.u_reconstructed)
# Compute matching penalty cost
self.kl_g, self.kl_d, self.l_cont_penalty, self.l_disc_penalty = matching_penalty(opts,
self.pi0, self.l_pi,
self.l_enc_mean, self.l_enc_logSigma,
self.pz_mean, self.pz_sigma,
self.l_sample_mix_noise, self.l_mixtures_encoded)
self.kl_g, self.kl_d, self.u_cont_penalty, self.u_disc_penalty = matching_penalty(opts,
self.pi0, self.u_pi,
self.u_enc_mean, self.u_enc_logSigma,
self.pz_mean, self.pz_sigma,
self.u_sample_mix_noise, self.u_mixtures_encoded)
# Compute Labeled obj
self.l_loss = self.l_loss_reconstruct\
+ self.l_lmbd * self.l_cont_penalty\
+ self.l_beta * self.l_disc_penalty
# Compute Unlabeled obj
self.u_loss = self.u_loss_reconstruct\
+ self.u_lmbd * self.u_cont_penalty\
+ self.u_beta * self.u_disc_penalty
# Compute wae obj
self.objective = self.alpha*self.alpha_decay * self.l_loss + self.u_loss
# Pre Training
self.pretrain_loss()
# --- Optimizers, savers, etc
self.add_optimizers()
self.add_savers()
self.init = tf.global_variables_initializer()
def add_model_placeholders(self):
opts = self.opts
shape = self.data_shape
self.l_points = tf.placeholder(tf.float32,
[None] + shape,
name='l_points_ph')
self.l_labels = tf.placeholder(tf.int64,
[None,],
name='l_labels_ph')
self.l_sample_mix_noise = tf.placeholder(tf.float32,
[None] + [opts['nmixtures'],opts['zdim']],
name='l_mix_noise_ph')
self.u_points = tf.placeholder(tf.float32,
[None] + shape,
name='u_points_ph')
self.u_sample_mix_noise = tf.placeholder(tf.float32,
[None] + [opts['nmixtures'],opts['zdim']],
name='u_mix_noise_ph')
self.sample_noise = tf.placeholder(tf.float32,
[None] + [opts['nmixtures'],opts['zdim']],
name='noise_ph')
# self.l_points = l_data
# self.l_labels = l_label
# self.l_sample_mix_noise = l_mix_noise
# self.u_points = u_data
# self.u_sample_mix_noise = u_mix_noise
# self.sample_noise = noise
# self.label_noise = tf.placeholder(tf.float32,
# [None,1],
# name='noise_ph')
label_noise = tf.range(opts['nmixtures'],
dtype=tf.float32,
name='label_noise_ph')
self.label_noise = tf.expand_dims(label_noise, axis=0)
# placeholders fo logistic regression
self.preds = tf.placeholder(tf.float32, [None, 10], name='predictions') # discrete probabilities
self.y = tf.placeholder(tf.float32, [None, 10],name='labels') # 0-9 digits recognition => 10 classes
# self.preds = preds
# self.y = y
def add_training_placeholders(self):
opts = self.opts
decay = tf.placeholder(tf.float32, name='rate_decay_ph')
is_training = tf.placeholder(tf.bool, name='is_training_ph')
alpha = tf.placeholder(tf.float32, name='alpha')
alpha_decay = tf.placeholder(tf.float32, name='alpha')
l_lmbda = tf.placeholder(tf.float32, name='lambda')
l_beta = tf.placeholder(tf.float32, name='beta')
u_lmbda = tf.placeholder(tf.float32, name='lambda')
u_beta = tf.placeholder(tf.float32, name='beta')
self.lr_decay = decay
self.is_training = is_training
self.alpha = alpha
self.alpha_decay = alpha_decay
self.l_lmbd = l_lmbda
self.l_beta = l_beta
self.u_lmbd = u_lmbda
self.u_beta = u_beta
def add_savers(self):
opts = self.opts
saver = tf.train.Saver(max_to_keep=10)
# tf.add_to_collection('real_points_ph', self.sample_points)
# tf.add_to_collection('noise_ph', self.sample_noise)
# tf.add_to_collection('is_training_ph', self.is_training)
# if self.enc_mean is not None:
# tf.add_to_collection('encoder_mean', self.enc_mean)
# tf.add_to_collection('encoder_var', self.enc_logsigma)
# tf.add_to_collection('encoder', self.encoded_point)
# tf.add_to_collection('decoder', self.decoded)
#tf.add_to_collection('lambda', self.lmbd)
self.saver = saver
def optimizer(self, lr, decay=1.):
opts = self.opts
lr *= decay
if opts['optimizer'] == 'sgd':
return tf.train.GradientDescentOptimizer(lr)
elif opts['optimizer'] == 'adam':
return tf.train.AdamOptimizer(lr, beta1=opts['adam_beta1'])
else:
assert False, 'Unknown optimizer.'
def add_optimizers(self):
opts = self.opts
# SWAE optimizer
lr = opts['lr']
opt = self.optimizer(lr, self.lr_decay)
encoder_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='encoder')
decoder_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')
prior_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='prior')
ae_vars = encoder_vars + decoder_vars
#ae_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
if opts['clip_grad']:
grad, var = zip(*opt.compute_gradients(loss=self.objective, var_list=ae_vars))
clip_grad, _ = tf.clip_by_global_norm(grad, opts['clip_norm'])
self.swae_opt = opt.apply_gradients(zip(clip_grad, var))
else:
self.swae_opt = opt.minimize(loss=self.objective, var_list=ae_vars)
# Pretraining optimizer
pre_opt = self.optimizer(lr)
self.pre_opt = pre_opt.minimize(loss=self.pre_loss, var_list=encoder_vars+prior_vars)
def encoder(self, input_points, reuse=False):
## Categorical encoding
logit = cat_encoder(self.opts, inputs=input_points, reuse=reuse,
is_training=self.is_training)
## Gaussian encoding
if self.opts['e_means']=='fixed':
eps = tf.zeros([tf.cast(sample_size, dtype=tf.int32), self.opts['nmixtures'],
self.opts['zdim']],dtype=tf.float32)
enc_mean = self.pz_mean + eps
enc_logSigma = self.opts['init_e_std']*tf.ones([
tf.cast(sample_size,dtype=tf.int32),
self.opts['nmixtures'],
self.opts['zdim']],dtype=tf.float32)
elif self.opts['e_means']=='mean':
enc_mean, _ = gaussian_encoder(opts, inputs=input_points, reuse=reuse,
is_training=self.is_training)
enc_logSigma = tf.exp(self.opts['init_e_std'])*tf.ones([
tf.cast(sample_size,dtype=tf.int32),
self.opts['nmixtures'],
self.opts['zdim']],dtype=tf.float32)
elif self.opts['e_means']=='learnable':
enc_mean, enc_logSigma = gaussian_encoder(self.opts,
inputs=input_points,
reuse=reuse,
is_training=self.is_training)
return logit, enc_mean, enc_logSigma
def decoder(self, encoded, reuse=False):
noise = tf.reshape(encoded,[-1,self.opts['zdim']])
recon, log = continuous_decoder(self.opts, noise=noise,
reuse=reuse,
is_training=self.is_training)
reconstructed = tf.reshape(recon,
[-1,self.opts['nmixtures']]+self.data_shape)
logits = tf.reshape(log,
[-1,self.opts['nmixtures']]+self.data_shape)
return reconstructed, logits
def pretrain_loss(self):
# Adding ops to pretrain the encoder so that mean and covariance
# of Qz will try to match those of Pz
l_pre_loss = moments_loss(self.l_sample_mix_noise, self.l_mixtures_encoded)
u_pre_loss = moments_loss(self.u_sample_mix_noise, self.u_mixtures_encoded)
# Loss
self.pre_loss = l_pre_loss + u_pre_loss
def pretrain_encoder(self, data):
opts=self.opts
steps_max = 500
batch_size = opts['e_pretrain_sample_size']
full_train_size = data.num_points
l_train_size = max(int(full_train_size*opts['lu_split']),5)
u_train_size = full_train_size-l_train_size
for step in range(steps_max):
data_ids = np.random.choice(l_train_size,
batch_size,
replace=True)
l_batch_images = data.data[data_ids].astype(np.float32)
l_batch_mix_noise = sample_pz(opts, self.pz_mean,
self.pz_sigma,
batch_size,
sampling_mode='all_mixtures')
data_ids = l_train_size + np.random.choice(u_train_size,
batch_size,
replace=False)
u_batch_images = data.data[data_ids].astype(np.float32)
u_batch_mix_noise = sample_pz(opts, self.pz_mean,
self.pz_sigma,
batch_size,
sampling_mode='all_mixtures')
[_, pre_loss] = self.sess.run(
[self.pre_opt, self.pre_loss],
feed_dict={self.l_points: l_batch_images,
self.l_sample_mix_noise: l_batch_mix_noise,
self.u_points: u_batch_images,
self.u_sample_mix_noise: u_batch_mix_noise,
self.is_training: True})
logging.error('Pretraining the encoder done.')
logging.error ('Loss after %d iterations: %.3f' % (steps_max,pre_loss))
def train(self, data, MODEL_DIR, WEIGHTS_FILE):
"""
Train MoG model with chosen method
"""
opts = self.opts
if opts['method']=='swae':
logging.error('Training WAE')
elif opts['method']=='vae':
logging.error('Training VAE')
print('')
# Create work_dir
utils.create_dir(opts['method'])
work_dir = os.path.join(opts['method'],opts['work_dir'])
# Split data set
full_train_size = data.num_points
l_train_size = max(int(full_train_size*opts['lu_split']),opts['min_u_size'])
u_train_size = full_train_size-l_train_size
debug_str = 'Total:%d, Unlabelled:%d, Labelled:%d' % (
full_train_size, u_train_size, l_train_size)
logging.error(debug_str)
print('')
# Init sess and load trained weights if needed
if opts['use_trained']:
if not tf.gfile.Exists(WEIGHTS_FILE+".meta"):
raise Exception("weights file doesn't exist")
self.saver.restore(self.sess, WEIGHTS_FILE)
else:
self.sess.run(self.init)
if opts['e_pretrain']:
logging.error('Pretraining the encoder')
self.pretrain_encoder(data)
print('')
batches_num = int(max(l_train_size,u_train_size)/opts['batch_size'])
npics = opts['plot_num_pics']
fixed_noise = sample_pz(opts, self.pz_mean, self.pz_sigma,
opts['plot_num_pics'],
sampling_mode = 'per_mixture')
self.start_time = time.time()
losses, losses_rec, losses_match, losses_xent = [], [], [], []
kl_gau, kl_dis = [], []
decay, alpha_decay = 1., 1.
counter = 0
if opts['method']=='swae':
alpha = opts['alpha']
l_lmbda = opts['l_lambda']
l_beta = opts['l_beta']
u_lmbda = opts['u_lambda']
u_beta = opts['u_beta']
else:
assert False, 'to implement VAE'
wae_lmbda = 1
wait = 0
for epoch in range(opts['epoch_num']):
# Update learning rate if necessary
if epoch == 30:
decay = decay / 2.
if epoch == 50:
decay = decay / 5.
if epoch == 100:
decay = decay / 10.
# Update alpha
if (epoch+1)%5 == 0:
alpha_decay = alpha_decay / 2.
# Save the model
if epoch > 0 and epoch % opts['save_every_epoch'] == 0:
self.saver.save(self.sess, os.path.join(
work_dir,'checkpoints',
'trained-wae'),
global_step=counter)
##### TRAINING LOOP #####
for it in range(batches_num):
# Sample batches of data points and Pz noise
data_ids = np.random.choice(l_train_size,
opts['batch_size'],
replace=True)
l_batch_images = data.data[data_ids].astype(np.float32)
l_batch_labels = data.labels[data_ids].astype(np.float32)
l_batch_mix_noise = sample_pz(opts, self.pz_mean,
self.pz_sigma,
opts['batch_size'],
sampling_mode='all_mixtures')
data_ids = l_train_size + np.random.choice(u_train_size,
opts['batch_size'],
replace=False)
u_batch_images = data.data[data_ids].astype(np.float32)
u_batch_labels = data.labels[data_ids].astype(np.float32)
u_batch_mix_noise = sample_pz(opts, self.pz_mean,
self.pz_sigma,
opts['batch_size'],
sampling_mode='all_mixtures')
# Feeding dictionary
feed_dict={self.l_points: l_batch_images,
self.l_labels: l_batch_labels,
self.l_sample_mix_noise: l_batch_mix_noise,
self.u_points: u_batch_images,
self.u_sample_mix_noise: u_batch_mix_noise,
self.lr_decay: decay,
self.alpha: alpha,
self.alpha_decay: alpha_decay,
self.l_lmbd: l_lmbda,
self.l_beta: l_beta,
self.u_lmbd: u_lmbda,
self.u_beta: u_beta,
self.is_training: True}
# Update encoder and decoder
if opts['method']=='swae':
outputs = self.sess.run([self.swae_opt, self.objective,
self.l_loss_reconstruct,
self.l_cont_penalty,
self.l_disc_penalty,
self.u_loss_reconstruct,
self.u_cont_penalty,
self.u_disc_penalty,
self.probs],
feed_dict=feed_dict)
loss = outputs[1]
l_loss_rec, l_loss_match, l_loss_xent = outputs[2:5]
u_loss_rec, u_loss_match, u_loss_xent = outputs[5:8]
probs_labels = outputs[-1]
elif opts['method']=='vae':
assert False, 'to implement VAE'
[_, loss, loss_rec, loss_match, enc_mw, kl_g, kl_d] = self.sess.run(
[self.swae_opt,
self.objective,
self.loss_reconstruct,
self.penalty,
self.enc_mixweight,
self.kl_g,
self.kl_d],
feed_dict=feed_dict)
kl_gau.append(kl_g)
kl_dis.append(kl_d)
losses.append(loss)
losses_rec.append([l_loss_rec,u_loss_rec])
losses_match.append([l_loss_match,u_loss_match])
losses_xent.append([l_loss_xent,u_loss_xent])
#mean_probs += get_mean_probs(u_batch_labels,probs_labels) / batches_num
##### TESTING LOOP #####
if counter % opts['print_every'] == 0:
now = time.time()
test_size = np.shape(data.test_data)[0]
te_size = max(int(test_size*0.1),opts['batch_size'])
te_batches_num = int(te_size/opts['batch_size'])
tr_size = test_size - te_size
tr_batches_num = int(tr_size/opts['batch_size'])
# Determine clusters ID
mean_probs = np.zeros((10,10))
for it_ in range(tr_batches_num):
# Sample batches of data points
data_ids = te_size + np.random.choice(tr_size,
opts['batch_size'],
replace=False)
batch_images = data.test_data[data_ids].astype(np.float32)
batch_labels = data.test_labels[data_ids].astype(np.float32)
probs_train = self.sess.run(self.probs,
feed_dict={self.u_points:batch_images,
self.is_training:False})
mean_prob = get_mean_probs(batch_labels,probs_train)
mean_probs += mean_prob / tr_batches_num
# Determine clusters given mean probs
labelled_clusters = relabelling_mask_from_probs(mean_probs)
# Test accuracy & loss
u_loss_rec_test, l_loss_rec_test = 0., 0.
u_acc_test = 0.
for it_ in range(te_batches_num):
# Sample batches of data points
data_ids = np.random.choice(te_size,
opts['batch_size'],
replace=False)
batch_images = data.test_data[data_ids].astype(np.float32)
batch_labels = data.test_labels[data_ids].astype(np.float32)
[ulr, llr, probs_test] = self.sess.run(
[self.u_loss_reconstruct,
self.l_loss_reconstruct,
self.probs],
feed_dict={self.l_points:batch_images,
self.l_labels:batch_labels,
self.u_points:batch_images,
self.is_training:False})
# Computing accuracy
u_acc = accuracy(batch_labels, probs_test, labelled_clusters)
u_acc_test += u_acc / te_batches_num
u_loss_rec_test += ulr / te_batches_num
l_loss_rec_test += llr / te_batches_num
# Auto-encoding unlabeled test images
[rec_pics_test, encoded, labeling, probs_pics_test] = self.sess.run(
[self.reconstructed_point,
self.encoded_point,
self.labels_reconstructed,
self.probs],
feed_dict={self.l_points:data.test_data[:npics],
self.u_points:data.test_data[:npics],
self.is_training:False})
pi0 = self.sess.run(self.pi0,feed_dict={})
# Auto-encoding training images
[rec_pics_train, probs_pics_train] = self.sess.run(
[self.reconstructed_point,
self.probs],
feed_dict={self.u_points:data.data[l_train_size:l_train_size+npics],
self.is_training:False})
# Random samples generated by the model
sample_gen = self.sess.run(self.decoded,
feed_dict={self.u_points:data.data[l_train_size:l_train_size+npics],
self.sample_noise: fixed_noise,
self.is_training: False})
# Printing various loss values
debug_str = 'EPOCH: %d/%d, BATCH:%d/%d' % (
epoch + 1, opts['epoch_num'],
it + 1, batches_num)
logging.error(debug_str)
debug_str = 'TRAIN LOSS=%.3f, TEST ACC=%.2f' % (
losses[-1],
100*u_acc_test)
logging.error(debug_str)
debug_str = 'TEST REC(L/U)=%.3f/%.3f, TRAIN REC(L/U)=%.3f/%.3f' % (
l_loss_rec_test,
#opts['alpha']*alpha_decay*l_loss_rec_test,
u_loss_rec_test,
losses_rec[-1][0],
#opts['alpha']*losses_rec[-1][0],
losses_rec[-1][1])
logging.error(debug_str)
debug_str = 'MATCH(L/U)=%.3f/%.3f, XENT(L/U)=%.3f/%.3f' % (
opts['l_lambda']*losses_match[-1][0],
#opts['l_lambda']*opts['alpha']*losses_match[-1][0],
opts['u_lambda']*losses_match[-1][1],
opts['l_beta']*losses_xent[-1][0],
#opts['l_beta']*opts['alpha']*alpha_decay*losses_xent[-1][0],
opts['u_beta']*losses_xent[-1][1])
logging.error(debug_str)
debug_str = 'Clusters ID: %s' % (str(labelled_clusters))
logging.error(debug_str)
labs = np.argmax(labeling,axis=-1)
debug_str = 'Labelling: %s' % (str(labs))
logging.error(debug_str)
debug_str = 'Priors: %s' % (np.array2string(pi0,precision=3))
logging.error(debug_str)
print('')
# Making plots
#logging.error('Saving images..')
save_train(opts, data.data[:npics], data.test_data[:npics], # images
data.test_labels[:npics], # labels
rec_pics_test[:npics], rec_pics_test[:npics], # reconstructions
probs_pics_train, probs_pics_test, # mixweights
encoded, # encoded points
fixed_noise, # prior samples
sample_gen, # samples
losses, losses_rec, losses_match, losses_xent, # loses
kl_gau, kl_dis, # KL terms
work_dir, # working directory
'res_e%04d_mb%05d.png' % (epoch, it)) # filename
# Update learning rate if necessary and counter
# First 30 epochs do nothing
if epoch >= 30:
# If no significant progress was made in last 10 epochs
# then decrease the learning rate.
if loss < min(losses[-20 * batches_num:]):
wait = 0
else:
wait += 1
if wait > 10 * batches_num:
decay = max(decay / 1.4, 1e-6)
logging.error('Reduction in lr: %f' % decay)
wait = 0
counter += 1
# # Save the final model
# if epoch > 0:
# self.saver.save(self.sess,
# os.path.join(work_dir,
# 'checkpoints',
# 'trained-wae-final'),
# global_step=counter)
def test(self, data, MODEL_DIR, WEIGHTS_FILE):
"""
Test trained MoG model with chosen method
"""
opts = self.opts
# Load trained weights
MODEL_PATH = os.path.join(opts['method'],MODEL_DIR)
if not tf.gfile.IsDirectory(MODEL_PATH):
raise Exception("model doesn't exist")
WEIGHTS_PATH = os.path.join(MODEL_PATH,'checkpoints',WEIGHTS_FILE)
if not tf.gfile.Exists(WEIGHTS_PATH+".meta"):
raise Exception("weights file doesn't exist")
self.saver.restore(self.sess, WEIGHTS_PATH)
# Set up
batch_size = 100
tr_batches_num = int(data.num_points / batch_size)
train_size = data.num_points
te_batches_num = int(np.shape(data.test_data)[0] / batch_size)
test_size = np.shape(data.test_data)[0]
debug_str = 'test data size: %d' % (np.shape(data.test_data)[0])
logging.error(debug_str)
### Compute probs
# Iterate over batches
logging.error('Determining clusters ID using training..')
mean_probs = np.zeros((10,10))
for it in range(tr_batches_num):
# Sample batches of data points and Pz noise
data_ids = np.random.choice(train_size,
opts['batch_size'],
replace=False)
batch_images = data.data[data_ids].astype(np.float32)
batch_labels = data.labels[data_ids].astype(np.float32)
prob = self.sess.run(self.enc_mixweight,
feed_dict={self.sample_points: batch_images,
self.is_training: False})
mean_prob = get_mean_probs(batch_labels,prob)
mean_probs += mean_prob / tr_batches_num
# Determine clusters given mean probs
labelled_clusters = relabelling_mask_from_probs(mean_probs)
logging.error('Clusters ID:')
print(labelled_clusters)
### Accuracy
logging.error('Computing losses & accuracy..')
# Training accuracy & loss
acc_tr = 0.
loss_rec_tr, loss_match_tr = 0., 0.
for it in range(tr_batches_num):
# Sample batches of data points and Pz noise
data_ids = np.random.choice(train_size,
batch_size,
replace=False)
batch_images = data.data[data_ids].astype(np.float32)
batch_labels = data.labels[data_ids].astype(np.float32)
# Accuracy
probs = self.sess.run(self.enc_mixweight,
feed_dict={self.sample_points: batch_images,
self.is_training: False})
acc = accuracy(batch_labels,probs,labelled_clusters)
acc_tr += acc / tr_batches_num
# loss
batch_mix_noise = sample_pz(opts, self.pz_mean,
self.pz_cov,
opts['batch_size'],
sampling_mode='all_mixtures')
[loss_rec, loss_match] = self.sess.run(
[self.loss_reconstruct,
self.penalty],
feed_dict={self.sample_points: batch_images,
self.sample_mix_noise: batch_mix_noise,
self.is_training: False})
loss_rec_tr += loss_rec / tr_batches_num
loss_match_tr += loss_match / tr_batches_num
# Testing acc
acc_te = 0.
loss_rec_te, loss_match_te = 0., 0.
for it in range(te_batches_num):
# Sample batches of data points and Pz noise
data_ids = np.random.choice(test_size,
batch_size,
replace=False)
batch_images = data.test_data[data_ids].astype(np.float32)
batch_labels = data.test_labels[data_ids].astype(np.float32)
# Accuracy
probs = self.sess.run(self.enc_mixweight,
feed_dict={self.sample_points: batch_images,
self.is_training: False})
acc = accuracy(batch_labels,probs,labelled_clusters)
acc_te += acc / te_batches_num
# Testing loss
batch_mix_noise = sample_pz(opts, self.pz_mean,
self.pz_cov,
batch_size,
sampling_mode='all_mixtures')
[loss_rec, loss_match] = self.sess.run(
[self.loss_reconstruct,
self.penalty],
feed_dict={self.sample_points: batch_images,
self.sample_mix_noise: batch_mix_noise,
self.is_training: False})
loss_rec_te += loss_rec / te_batches_num
loss_match_te += loss_match / te_batches_num
### Logs
debug_str = 'rec train: %.4f, rec test: %.4f' % (loss_rec_tr,
loss_rec_te)
logging.error(debug_str)
debug_str = 'match train: %.4f, match test: %.4f' % (loss_match_tr,
loss_match_te)
logging.error(debug_str)
debug_str = 'acc train: %.2f, acc test: %.2f' % (100.*acc_tr,
100.*acc_te)
logging.error(debug_str)
### Saving
filename = 'res_test'
res_test = np.array((loss_rec_tr, loss_rec_te,
loss_match_tr, loss_match_te,
acc_tr, acc_te))
np.save(os.path.join(MODEL_PATH,filename),res_test)
def reg(self, data, MODEL_DIR, WEIGHTS_FILE):
"""
Trained a logistic regression on the trained MoG model
"""
opts = self.opts
# Load trained weights
MODEL_PATH = os.path.join(opts['method'],MODEL_DIR)
if not tf.gfile.IsDirectory(MODEL_PATH):
raise Exception("model doesn't exist")
WEIGHTS_PATH = os.path.join(MODEL_PATH,'checkpoints',WEIGHTS_FILE)
if not tf.gfile.Exists(WEIGHTS_PATH+".meta"):
raise Exception("weights file doesn't exist")
self.saver.restore(self.sess, WEIGHTS_PATH)
# set up
epoch_num = 20
print_every = 2
batch_size = 100
tr_batches_num = int(data.num_points / batch_size)
train_size = data.num_points
te_batches_num = int(np.shape(data.test_data)[0] / batch_size)
test_size = np.shape(data.test_data)[0]
lr = 0.001
### Logistic regression model
# Construct model
linear_layer = ops.linear(opts, self.preds, 10, scope='log_reg')
logreg_preds = tf.nn.softmax(linear_layer) # Softmax
# Minimize error using cross entropy
cross_entropy = tf.reduce_mean(-tf.reduce_sum(self.y*tf.log(logreg_preds), reduction_indices=1))
# Accuracy
correct_prediction = tf.equal(tf.argmax(logreg_preds, 1),tf.argmax(self.y, 1))
acc = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
### Optimizer
opt = tf.train.GradientDescentOptimizer(lr)
logreg_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='log_reg')
logreg_opt = opt.minimize(loss=cross_entropy, var_list=logreg_vars)
for var in logreg_vars:
self.sess.run(var.initializer)
### Training loop
costs, acc_train, acc_test = [], [], []
counter = 0
logging.error('Start training..')
self.start_time = time.time()
for epoch in range(epoch_num):
cost = 0.
# Iterate over batches
for it_ in range(tr_batches_num):
# Sample batches of data points and Pz noise
data_ids = np.random.choice(train_size,
batch_size,
replace=False)
batch_images = data.data[data_ids].astype(np.float32)
# Get preds
preds = self.sess.run(self.enc_mixweight,
feed_dict={self.sample_points: batch_images,
self.is_training: False})
# linear reg
batch_labels = one_hot(data.labels[data_ids])
[_ , c] = self.sess.run([logreg_opt,cross_entropy],
feed_dict={self.preds: preds,
self.y: batch_labels})
cost += c / tr_batches_num
costs.append(cost)
counter += 1
if counter==1 or counter % print_every == 0:
# Testing and logging info
acc_tr, acc_te = 0., 0.
# Training Acc
for it in range(tr_batches_num):
# Sample batches of data points and Pz noise
data_ids = np.random.choice(train_size,
batch_size,
replace=False)
batch_images = data.data[data_ids].astype(np.float32)
preds = self.sess.run(self.enc_mixweight,
feed_dict={self.sample_points: batch_images,
self.is_training: False})
batch_labels = one_hot(data.labels[data_ids])
a = self.sess.run(acc,
feed_dict={self.preds: preds,
self.y: batch_labels})
acc_tr += a/ tr_batches_num
# Testing Acc
for it in range(te_batches_num):
data_ids = np.random.choice(test_size,
batch_size,
replace=False)
batch_images = data.test_data[data_ids].astype(np.float32)
preds = self.sess.run(self.enc_mixweight,
feed_dict={self.sample_points: batch_images,
self.is_training: False})
batch_labels = one_hot(data.test_labels[data_ids])
a = self.sess.run(acc,
feed_dict={self.preds: preds,
self.y: batch_labels})
acc_te += a/ te_batches_num
acc_train.append(acc_tr)
acc_test.append(acc_te)
# logs
debug_str = 'EPOCH: %d/%d, BATCH:%d/%d' % (
epoch + 1, epoch_num,
it_ + 1, tr_batches_num)
logging.error(debug_str)
debug_str = 'cost=%.3f, TRAIN ACC=%.2f, TEST ACC=%.2f' % (
costs[-1], 100*acc_tr, 100*acc_te)
logging.error(debug_str)
### Saving
filename = 'logreg'
xstep = int(len(costs)/100)
np.savez(os.path.join(MODEL_PATH,filename),
costs=np.array(costs[::xstep]),
acc_tr=np.array(acc_train),
acc_te=np.array(acc_test))
def vizu(self, data, MODEL_DIR, WEIGHTS_FILE):
"""
Plot and save different visualizations
"""
opts = self.opts
# Load trained weights
MODEL_PATH = os.path.join(opts['method'],MODEL_DIR)
if not tf.gfile.IsDirectory(MODEL_PATH):
raise Exception("model doesn't exist")
WEIGHTS_PATH = os.path.join(MODEL_PATH,'checkpoints',WEIGHTS_FILE)
if not tf.gfile.Exists(WEIGHTS_PATH+".meta"):
raise Exception("weights file doesn't exist")
self.saver.restore(self.sess, WEIGHTS_PATH)
# Set up
num_pics = 200
test_size = np.shape(data.test_data)[0]
step_inter = 20
num_anchors = 10
imshape = datashapes[opts['dataset']]
# Auto-encoding training images
logging.error('Encoding and decoding train images..')
rec_train = self.sess.run(self.reconstructed_point,
feed_dict={self.sample_points: data.data[:num_pics],
self.is_training: False})
# Auto-encoding test images
logging.error('Encoding and decoding test images..')
[rec_test, encoded, enc_mw_test] = self.sess.run(
[self.reconstructed_point,
self.encoded_point,
self.enc_mixweight],
feed_dict={self.sample_points:data.test_data[:num_pics],
self.is_training:False})
# Encode anchors points and interpolate
logging.error('Encoding anchors points and interpolating..')
anchors_ids = np.random.choice(test_size,2*num_anchors,replace=False)
anchors = data.test_data[anchors_ids]
enc_anchors = self.sess.run(self.encoded_point,
feed_dict={self.sample_points: anchors,
self.is_training: False})
enc_interpolation = generate_linespace(opts, step_inter,
'points_interpolation',
anchors=enc_anchors)
noise = enc_interpolation.reshape(-1,opts['zdim'])
decoded = self.sess.run(self.decoded,
feed_dict={self.sample_noise: noise,
self.is_training: False})
interpolation = decoded.reshape([-1,step_inter]+imshape)
start_anchors = anchors[::2]
end_anchors = anchors[1::2]
interpolation = np.concatenate((start_anchors[:,np.newaxis],
np.concatenate((interpolation,end_anchors[:,np.newaxis]), axis=1)),
axis=1)
# Random samples generated by the model
logging.error('Decoding random samples..')
prior_noise = sample_pz(opts, self.pz_mean,
self.pz_cov,
num_pics,
sampling_mode = 'per_mixture')
samples = self.sess.run(self.decoded,
feed_dict={self.sample_noise: prior_noise,
self.is_training: False})
# Encode prior means and interpolate
logging.error('Generating latent linespace and decoding..')
if opts['zdim']==2:
pz_mean_interpolation = generate_linespace(opts, step_inter,
'transformation',
anchors=self.pz_mean)
else:
pz_mean_interpolation = generate_linespace(opts, step_inter,
'priors_interpolation',
anchors=self.pz_mean)
noise = pz_mean_interpolation.reshape(-1,opts['zdim'])
decoded = self.sess.run(self.decoded,
feed_dict={self.sample_noise: noise,
self.is_training: False})
prior_interpolation = decoded.reshape([-1,step_inter]+imshape)
# Making plots
logging.error('Saving images..')
save_vizu(opts, data.data[:num_pics], data.test_data[:num_pics], # images
data.test_labels[:num_pics], # labels
rec_train, rec_test, # reconstructions
enc_mw_test, # mixweights
encoded, # encoded points
prior_noise, # prior samples
samples, # samples
interpolation, prior_interpolation, # interpolations
MODEL_PATH) # working directory
| [
"utils.create_dir",
"tensorflow.shape",
"loss_functions.moments_loss",
"tensorflow.reduce_sum",
"numpy.array2string",
"model_nn.continuous_decoder",
"tensorflow.gfile.IsDirectory",
"numpy.array",
"tensorflow.nn.softmax",
"tensorflow.cast",
"logging.error",
"tensorflow.log",
"tensorflow.clip_... | [((960, 1006), 'logging.error', 'logging.error', (['"""Building the Tensorflow Graph"""'], {}), "('Building the Tensorflow Graph')\n", (973, 1006), False, 'import logging\n'), ((1057, 1069), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1067, 1069), True, 'import tensorflow as tf\n'), ((1457, 1478), 'tensorflow.range', 'tf.range', (['sample_size'], {}), '(sample_size)\n', (1465, 1478), True, 'import tensorflow as tf\n'), ((1637, 1662), 'priors.init_gaussian_prior', 'init_gaussian_prior', (['opts'], {}), '(opts)\n', (1656, 1662), False, 'from priors import init_gaussian_prior, init_cat_prior\n'), ((1682, 1702), 'priors.init_cat_prior', 'init_cat_prior', (['opts'], {}), '(opts)\n', (1696, 1702), False, 'from priors import init_gaussian_prior, init_cat_prior\n'), ((1755, 1819), 'model_nn.label_encoder', 'label_encoder', (['self.opts', 'self.u_points', '(False)', 'self.is_training'], {}), '(self.opts, self.u_points, False, self.is_training)\n', (1768, 1819), False, 'from model_nn import label_encoder, cat_encoder, gaussian_encoder\n'), ((1897, 1930), 'ops.softmax', 'ops.softmax', (['probs_logit'], {'axis': '(-1)'}), '(probs_logit, axis=-1)\n', (1908, 1930), False, 'import ops\n'), ((2153, 2202), 'ops.log_sum_exp', 'ops.log_sum_exp', (['logit_pi'], {'axis': '(-1)', 'keepdims': '(True)'}), '(logit_pi, axis=-1, keepdims=True)\n', (2168, 2202), False, 'import ops\n'), ((2310, 2356), 'ops.log_sum_exp', 'ops.log_sum_exp', (['logit'], {'axis': '(1)', 'keepdims': '(False)'}), '(logit, axis=1, keepdims=False)\n', (2325, 2356), False, 'import ops\n'), ((2518, 2561), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['u_pi'], {'axis': '(1)', 'keepdims': '(False)'}), '(u_pi, axis=1, keepdims=False)\n', (2531, 2561), True, 'import tensorflow as tf\n'), ((2785, 2826), 'tensorflow.stack', 'tf.stack', (['[range, self.l_labels]'], {'axis': '(-1)'}), '([range, self.l_labels], axis=-1)\n', (2793, 2826), True, 'import tensorflow as tf\n'), ((2842, 2875), 'tensorflow.gather_nd', 'tf.gather_nd', (['logit_pi', 'idx_label'], {}), '(logit_pi, idx_label)\n', (2854, 2875), True, 'import tensorflow as tf\n'), ((2895, 2922), 'ops.softmax', 'ops.softmax', (['logit'], {'axis': '(-1)'}), '(logit, axis=-1)\n', (2906, 2922), False, 'import ops\n'), ((4010, 4075), 'model_nn.discrete_decoder', 'discrete_decoder', (['opts', 'self.label_noise', '(False)', 'self.is_training'], {}), '(opts, self.label_noise, False, self.is_training)\n', (4026, 4075), False, 'from model_nn import continuous_decoder, discrete_decoder\n'), ((4456, 4487), 'tensorflow.stack', 'tf.stack', (['[range, idx]'], {'axis': '(-1)'}), '([range, idx], axis=-1)\n', (4464, 4487), True, 'import tensorflow as tf\n'), ((4515, 4561), 'tensorflow.gather_nd', 'tf.gather_nd', (['self.u_mixtures_encoded', 'mix_idx'], {}), '(self.u_mixtures_encoded, mix_idx)\n', (4527, 4561), True, 'import tensorflow as tf\n'), ((4596, 4639), 'tensorflow.gather_nd', 'tf.gather_nd', (['self.u_reconstructed', 'mix_idx'], {}), '(self.u_reconstructed, mix_idx)\n', (4608, 4639), True, 'import tensorflow as tf\n'), ((4674, 4724), 'tensorflow.gather_nd', 'tf.gather_nd', (['self.u_reconstructed_logits', 'mix_idx'], {}), '(self.u_reconstructed_logits, mix_idx)\n', (4686, 4724), True, 'import tensorflow as tf\n'), ((5441, 5514), 'loss_functions.reconstruction_loss', 'reconstruction_loss', (['opts', 'self.u_pi', 'self.u_points', 'self.u_reconstructed'], {}), '(opts, self.u_pi, self.u_points, self.u_reconstructed)\n', (5460, 5514), False, 'from loss_functions import matching_penalty, reconstruction_loss, moments_loss\n'), ((5740, 5909), 'loss_functions.matching_penalty', 'matching_penalty', (['opts', 'self.pi0', 'self.l_pi', 'self.l_enc_mean', 'self.l_enc_logSigma', 'self.pz_mean', 'self.pz_sigma', 'self.l_sample_mix_noise', 'self.l_mixtures_encoded'], {}), '(opts, self.pi0, self.l_pi, self.l_enc_mean, self.\n l_enc_logSigma, self.pz_mean, self.pz_sigma, self.l_sample_mix_noise,\n self.l_mixtures_encoded)\n', (5756, 5909), False, 'from loss_functions import matching_penalty, reconstruction_loss, moments_loss\n'), ((6198, 6367), 'loss_functions.matching_penalty', 'matching_penalty', (['opts', 'self.pi0', 'self.u_pi', 'self.u_enc_mean', 'self.u_enc_logSigma', 'self.pz_mean', 'self.pz_sigma', 'self.u_sample_mix_noise', 'self.u_mixtures_encoded'], {}), '(opts, self.pi0, self.u_pi, self.u_enc_mean, self.\n u_enc_logSigma, self.pz_mean, self.pz_sigma, self.u_sample_mix_noise,\n self.u_mixtures_encoded)\n', (6214, 6367), False, 'from loss_functions import matching_penalty, reconstruction_loss, moments_loss\n'), ((7260, 7293), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7291, 7293), True, 'import tensorflow as tf\n'), ((7414, 7476), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([None] + shape)'], {'name': '"""l_points_ph"""'}), "(tf.float32, [None] + shape, name='l_points_ph')\n", (7428, 7476), True, 'import tensorflow as tf\n'), ((7573, 7625), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[None]'], {'name': '"""l_labels_ph"""'}), "(tf.int64, [None], name='l_labels_ph')\n", (7587, 7625), True, 'import tensorflow as tf\n'), ((7733, 7831), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', "([None] + [opts['nmixtures'], opts['zdim']])"], {'name': '"""l_mix_noise_ph"""'}), "(tf.float32, [None] + [opts['nmixtures'], opts['zdim']], name\n ='l_mix_noise_ph')\n", (7747, 7831), True, 'import tensorflow as tf\n'), ((7922, 7984), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([None] + shape)'], {'name': '"""u_points_ph"""'}), "(tf.float32, [None] + shape, name='u_points_ph')\n", (7936, 7984), True, 'import tensorflow as tf\n'), ((8091, 8189), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', "([None] + [opts['nmixtures'], opts['zdim']])"], {'name': '"""u_mix_noise_ph"""'}), "(tf.float32, [None] + [opts['nmixtures'], opts['zdim']], name\n ='u_mix_noise_ph')\n", (8105, 8189), True, 'import tensorflow as tf\n'), ((8284, 8376), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', "([None] + [opts['nmixtures'], opts['zdim']])"], {'name': '"""noise_ph"""'}), "(tf.float32, [None] + [opts['nmixtures'], opts['zdim']], name\n ='noise_ph')\n", (8298, 8376), True, 'import tensorflow as tf\n'), ((8858, 8926), 'tensorflow.range', 'tf.range', (["opts['nmixtures']"], {'dtype': 'tf.float32', 'name': '"""label_noise_ph"""'}), "(opts['nmixtures'], dtype=tf.float32, name='label_noise_ph')\n", (8866, 8926), True, 'import tensorflow as tf\n'), ((9026, 9061), 'tensorflow.expand_dims', 'tf.expand_dims', (['label_noise'], {'axis': '(0)'}), '(label_noise, axis=0)\n', (9040, 9061), True, 'import tensorflow as tf\n'), ((9130, 9188), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 10]'], {'name': '"""predictions"""'}), "(tf.float32, [None, 10], name='predictions')\n", (9144, 9188), True, 'import tensorflow as tf\n'), ((9231, 9284), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 10]'], {'name': '"""labels"""'}), "(tf.float32, [None, 10], name='labels')\n", (9245, 9284), True, 'import tensorflow as tf\n'), ((9456, 9504), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""rate_decay_ph"""'}), "(tf.float32, name='rate_decay_ph')\n", (9470, 9504), True, 'import tensorflow as tf\n'), ((9527, 9573), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'name': '"""is_training_ph"""'}), "(tf.bool, name='is_training_ph')\n", (9541, 9573), True, 'import tensorflow as tf\n'), ((9590, 9630), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""alpha"""'}), "(tf.float32, name='alpha')\n", (9604, 9630), True, 'import tensorflow as tf\n'), ((9653, 9693), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""alpha"""'}), "(tf.float32, name='alpha')\n", (9667, 9693), True, 'import tensorflow as tf\n'), ((9712, 9753), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""lambda"""'}), "(tf.float32, name='lambda')\n", (9726, 9753), True, 'import tensorflow as tf\n'), ((9771, 9810), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""beta"""'}), "(tf.float32, name='beta')\n", (9785, 9810), True, 'import tensorflow as tf\n'), ((9829, 9870), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""lambda"""'}), "(tf.float32, name='lambda')\n", (9843, 9870), True, 'import tensorflow as tf\n'), ((9888, 9927), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""beta"""'}), "(tf.float32, name='beta')\n", (9902, 9927), True, 'import tensorflow as tf\n'), ((10250, 10280), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(10)'}), '(max_to_keep=10)\n', (10264, 10280), True, 'import tensorflow as tf\n'), ((11382, 11450), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""encoder"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='encoder')\n", (11399, 11450), True, 'import tensorflow as tf\n'), ((11474, 11544), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""generator"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')\n", (11491, 11544), True, 'import tensorflow as tf\n'), ((11566, 11632), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""prior"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='prior')\n", (11583, 11632), True, 'import tensorflow as tf\n'), ((12371, 12462), 'model_nn.cat_encoder', 'cat_encoder', (['self.opts'], {'inputs': 'input_points', 'reuse': 'reuse', 'is_training': 'self.is_training'}), '(self.opts, inputs=input_points, reuse=reuse, is_training=self.\n is_training)\n', (12382, 12462), False, 'from model_nn import label_encoder, cat_encoder, gaussian_encoder\n'), ((14088, 14132), 'tensorflow.reshape', 'tf.reshape', (['encoded', "[-1, self.opts['zdim']]"], {}), "(encoded, [-1, self.opts['zdim']])\n", (14098, 14132), True, 'import tensorflow as tf\n'), ((14152, 14242), 'model_nn.continuous_decoder', 'continuous_decoder', (['self.opts'], {'noise': 'noise', 'reuse': 'reuse', 'is_training': 'self.is_training'}), '(self.opts, noise=noise, reuse=reuse, is_training=self.\n is_training)\n', (14170, 14242), False, 'from model_nn import continuous_decoder, discrete_decoder\n'), ((14374, 14439), 'tensorflow.reshape', 'tf.reshape', (['recon', "([-1, self.opts['nmixtures']] + self.data_shape)"], {}), "(recon, [-1, self.opts['nmixtures']] + self.data_shape)\n", (14384, 14439), True, 'import tensorflow as tf\n'), ((14478, 14541), 'tensorflow.reshape', 'tf.reshape', (['log', "([-1, self.opts['nmixtures']] + self.data_shape)"], {}), "(log, [-1, self.opts['nmixtures']] + self.data_shape)\n", (14488, 14541), True, 'import tensorflow as tf\n'), ((14770, 14832), 'loss_functions.moments_loss', 'moments_loss', (['self.l_sample_mix_noise', 'self.l_mixtures_encoded'], {}), '(self.l_sample_mix_noise, self.l_mixtures_encoded)\n', (14782, 14832), False, 'from loss_functions import matching_penalty, reconstruction_loss, moments_loss\n'), ((14854, 14916), 'loss_functions.moments_loss', 'moments_loss', (['self.u_sample_mix_noise', 'self.u_mixtures_encoded'], {}), '(self.u_sample_mix_noise, self.u_mixtures_encoded)\n', (14866, 14916), False, 'from loss_functions import matching_penalty, reconstruction_loss, moments_loss\n'), ((16822, 16868), 'logging.error', 'logging.error', (['"""Pretraining the encoder done."""'], {}), "('Pretraining the encoder done.')\n", (16835, 16868), False, 'import logging\n'), ((16877, 16948), 'logging.error', 'logging.error', (["('Loss after %d iterations: %.3f' % (steps_max, pre_loss))"], {}), "('Loss after %d iterations: %.3f' % (steps_max, pre_loss))\n", (16890, 16948), False, 'import logging\n'), ((17303, 17335), 'utils.create_dir', 'utils.create_dir', (["opts['method']"], {}), "(opts['method'])\n", (17319, 17335), False, 'import utils\n'), ((17355, 17401), 'os.path.join', 'os.path.join', (["opts['method']", "opts['work_dir']"], {}), "(opts['method'], opts['work_dir'])\n", (17367, 17401), False, 'import os\n'), ((17742, 17766), 'logging.error', 'logging.error', (['debug_str'], {}), '(debug_str)\n', (17755, 17766), False, 'import logging\n'), ((18400, 18500), 'sampling_functions.sample_pz', 'sample_pz', (['opts', 'self.pz_mean', 'self.pz_sigma', "opts['plot_num_pics']"], {'sampling_mode': '"""per_mixture"""'}), "(opts, self.pz_mean, self.pz_sigma, opts['plot_num_pics'],\n sampling_mode='per_mixture')\n", (18409, 18500), False, 'from sampling_functions import sample_mixtures, sample_pz, generate_linespace\n'), ((18637, 18648), 'time.time', 'time.time', ([], {}), '()\n', (18646, 18648), False, 'import time\n'), ((34021, 34060), 'os.path.join', 'os.path.join', (["opts['method']", 'MODEL_DIR'], {}), "(opts['method'], MODEL_DIR)\n", (34033, 34060), False, 'import os\n'), ((34183, 34236), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""checkpoints"""', 'WEIGHTS_FILE'], {}), "(MODEL_PATH, 'checkpoints', WEIGHTS_FILE)\n", (34195, 34236), False, 'import os\n'), ((34737, 34761), 'logging.error', 'logging.error', (['debug_str'], {}), '(debug_str)\n', (34750, 34761), False, 'import logging\n'), ((34828, 34885), 'logging.error', 'logging.error', (['"""Determining clusters ID using training.."""'], {}), "('Determining clusters ID using training..')\n", (34841, 34885), False, 'import logging\n'), ((34907, 34925), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (34915, 34925), True, 'import numpy as np\n'), ((35697, 35736), 'supervised_functions.relabelling_mask_from_probs', 'relabelling_mask_from_probs', (['mean_probs'], {}), '(mean_probs)\n', (35724, 35736), False, 'from supervised_functions import accuracy, get_mean_probs, relabelling_mask_from_probs, one_hot\n'), ((35745, 35774), 'logging.error', 'logging.error', (['"""Clusters ID:"""'], {}), "('Clusters ID:')\n", (35758, 35774), False, 'import logging\n'), ((35838, 35884), 'logging.error', 'logging.error', (['"""Computing losses & accuracy.."""'], {}), "('Computing losses & accuracy..')\n", (35851, 35884), False, 'import logging\n'), ((39406, 39430), 'logging.error', 'logging.error', (['debug_str'], {}), '(debug_str)\n', (39419, 39430), False, 'import logging\n'), ((39589, 39613), 'logging.error', 'logging.error', (['debug_str'], {}), '(debug_str)\n', (39602, 39613), False, 'import logging\n'), ((39766, 39790), 'logging.error', 'logging.error', (['debug_str'], {}), '(debug_str)\n', (39779, 39790), False, 'import logging\n'), ((39860, 39946), 'numpy.array', 'np.array', (['(loss_rec_tr, loss_rec_te, loss_match_tr, loss_match_te, acc_tr, acc_te)'], {}), '((loss_rec_tr, loss_rec_te, loss_match_tr, loss_match_te, acc_tr,\n acc_te))\n', (39868, 39946), True, 'import numpy as np\n'), ((40275, 40314), 'os.path.join', 'os.path.join', (["opts['method']", 'MODEL_DIR'], {}), "(opts['method'], MODEL_DIR)\n", (40287, 40314), False, 'import os\n'), ((40437, 40490), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""checkpoints"""', 'WEIGHTS_FILE'], {}), "(MODEL_PATH, 'checkpoints', WEIGHTS_FILE)\n", (40449, 40490), False, 'import os\n'), ((41064, 41113), 'ops.linear', 'ops.linear', (['opts', 'self.preds', '(10)'], {'scope': '"""log_reg"""'}), "(opts, self.preds, 10, scope='log_reg')\n", (41074, 41113), False, 'import ops\n'), ((41137, 41164), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['linear_layer'], {}), '(linear_layer)\n', (41150, 41164), True, 'import tensorflow as tf\n'), ((41536, 41573), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['lr'], {}), '(lr)\n', (41569, 41573), True, 'import tensorflow as tf\n'), ((41596, 41664), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""log_reg"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='log_reg')\n", (41613, 41664), True, 'import tensorflow as tf\n'), ((41920, 41953), 'logging.error', 'logging.error', (['"""Start training.."""'], {}), "('Start training..')\n", (41933, 41953), False, 'import logging\n'), ((41980, 41991), 'time.time', 'time.time', ([], {}), '()\n', (41989, 41991), False, 'import time\n'), ((45965, 46004), 'os.path.join', 'os.path.join', (["opts['method']", 'MODEL_DIR'], {}), "(opts['method'], MODEL_DIR)\n", (45977, 46004), False, 'import os\n'), ((46127, 46180), 'os.path.join', 'os.path.join', (['MODEL_PATH', '"""checkpoints"""', 'WEIGHTS_FILE'], {}), "(MODEL_PATH, 'checkpoints', WEIGHTS_FILE)\n", (46139, 46180), False, 'import os\n'), ((46574, 46627), 'logging.error', 'logging.error', (['"""Encoding and decoding train images.."""'], {}), "('Encoding and decoding train images..')\n", (46587, 46627), False, 'import logging\n'), ((46890, 46942), 'logging.error', 'logging.error', (['"""Encoding and decoding test images.."""'], {}), "('Encoding and decoding test images..')\n", (46903, 46942), False, 'import logging\n'), ((47380, 47440), 'logging.error', 'logging.error', (['"""Encoding anchors points and interpolating.."""'], {}), "('Encoding anchors points and interpolating..')\n", (47393, 47440), False, 'import logging\n'), ((47463, 47522), 'numpy.random.choice', 'np.random.choice', (['test_size', '(2 * num_anchors)'], {'replace': '(False)'}), '(test_size, 2 * num_anchors, replace=False)\n', (47479, 47522), True, 'import numpy as np\n'), ((47798, 47884), 'sampling_functions.generate_linespace', 'generate_linespace', (['opts', 'step_inter', '"""points_interpolation"""'], {'anchors': 'enc_anchors'}), "(opts, step_inter, 'points_interpolation', anchors=\n enc_anchors)\n", (47816, 47884), False, 'from sampling_functions import sample_mixtures, sample_pz, generate_linespace\n'), ((48630, 48672), 'logging.error', 'logging.error', (['"""Decoding random samples.."""'], {}), "('Decoding random samples..')\n", (48643, 48672), False, 'import logging\n'), ((48695, 48781), 'sampling_functions.sample_pz', 'sample_pz', (['opts', 'self.pz_mean', 'self.pz_cov', 'num_pics'], {'sampling_mode': '"""per_mixture"""'}), "(opts, self.pz_mean, self.pz_cov, num_pics, sampling_mode=\n 'per_mixture')\n", (48704, 48781), False, 'from sampling_functions import sample_mixtures, sample_pz, generate_linespace\n'), ((49142, 49201), 'logging.error', 'logging.error', (['"""Generating latent linespace and decoding.."""'], {}), "('Generating latent linespace and decoding..')\n", (49155, 49201), False, 'import logging\n'), ((50033, 50065), 'logging.error', 'logging.error', (['"""Saving images.."""'], {}), "('Saving images..')\n", (50046, 50065), False, 'import logging\n'), ((50074, 50289), 'plot_functions.save_vizu', 'save_vizu', (['opts', 'data.data[:num_pics]', 'data.test_data[:num_pics]', 'data.test_labels[:num_pics]', 'rec_train', 'rec_test', 'enc_mw_test', 'encoded', 'prior_noise', 'samples', 'interpolation', 'prior_interpolation', 'MODEL_PATH'], {}), '(opts, data.data[:num_pics], data.test_data[:num_pics], data.\n test_labels[:num_pics], rec_train, rec_test, enc_mw_test, encoded,\n prior_noise, samples, interpolation, prior_interpolation, MODEL_PATH)\n', (50083, 50289), False, 'from plot_functions import save_train, save_vizu\n'), ((1396, 1438), 'tensorflow.shape', 'tf.shape', (['self.u_points'], {'out_type': 'tf.int64'}), '(self.u_points, out_type=tf.int64)\n', (1404, 1438), True, 'import tensorflow as tf\n'), ((2256, 2292), 'tensorflow.expand_dims', 'tf.expand_dims', (['probs_logit'], {'axis': '(-1)'}), '(probs_logit, axis=-1)\n', (2270, 2292), True, 'import tensorflow as tf\n'), ((2432, 2462), 'ops.softmax', 'ops.softmax', (['logit_pi'], {'axis': '(-1)'}), '(logit_pi, axis=-1)\n', (2443, 2462), False, 'import ops\n'), ((2462, 2497), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.probs'], {'axis': '(-1)'}), '(self.probs, axis=-1)\n', (2476, 2497), True, 'import tensorflow as tf\n'), ((3097, 3124), 'tensorflow.exp', 'tf.exp', (['self.u_enc_logSigma'], {}), '(self.u_enc_logSigma)\n', (3103, 3124), True, 'import tensorflow as tf\n'), ((3337, 3364), 'tensorflow.exp', 'tf.exp', (['self.l_enc_logSigma'], {}), '(self.l_enc_logSigma)\n', (3343, 3364), True, 'import tensorflow as tf\n'), ((5361, 5406), 'tensorflow.argmax', 'tf.argmax', (['self.labels_reconstructed'], {'axis': '(-1)'}), '(self.labels_reconstructed, axis=-1)\n', (5370, 5406), True, 'import tensorflow as tf\n'), ((10993, 11030), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['lr'], {}), '(lr)\n', (11026, 11030), True, 'import tensorflow as tf\n'), ((11898, 11945), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grad', "opts['clip_norm']"], {}), "(grad, opts['clip_norm'])\n", (11920, 11945), True, 'import tensorflow as tf\n'), ((15341, 15397), 'numpy.random.choice', 'np.random.choice', (['l_train_size', 'batch_size'], {'replace': '(True)'}), '(l_train_size, batch_size, replace=True)\n', (15357, 15397), True, 'import numpy as np\n'), ((15562, 15653), 'sampling_functions.sample_pz', 'sample_pz', (['opts', 'self.pz_mean', 'self.pz_sigma', 'batch_size'], {'sampling_mode': '"""all_mixtures"""'}), "(opts, self.pz_mean, self.pz_sigma, batch_size, sampling_mode=\n 'all_mixtures')\n", (15571, 15653), False, 'from sampling_functions import sample_mixtures, sample_pz, generate_linespace\n'), ((16093, 16184), 'sampling_functions.sample_pz', 'sample_pz', (['opts', 'self.pz_mean', 'self.pz_sigma', 'batch_size'], {'sampling_mode': '"""all_mixtures"""'}), "(opts, self.pz_mean, self.pz_sigma, batch_size, sampling_mode=\n 'all_mixtures')\n", (16102, 16184), False, 'from sampling_functions import sample_mixtures, sample_pz, generate_linespace\n'), ((17142, 17171), 'logging.error', 'logging.error', (['"""Training WAE"""'], {}), "('Training WAE')\n", (17155, 17171), False, 'import logging\n'), ((34075, 34107), 'tensorflow.gfile.IsDirectory', 'tf.gfile.IsDirectory', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (34095, 34107), True, 'import tensorflow as tf\n'), ((34250, 34289), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (["(WEIGHTS_PATH + '.meta')"], {}), "(WEIGHTS_PATH + '.meta')\n", (34265, 34289), True, 'import tensorflow as tf\n'), ((34628, 34652), 'numpy.shape', 'np.shape', (['data.test_data'], {}), '(data.test_data)\n', (34636, 34652), True, 'import numpy as np\n'), ((35046, 35109), 'numpy.random.choice', 'np.random.choice', (['train_size', "opts['batch_size']"], {'replace': '(False)'}), "(train_size, opts['batch_size'], replace=False)\n", (35062, 35109), True, 'import numpy as np\n'), ((35536, 35570), 'supervised_functions.get_mean_probs', 'get_mean_probs', (['batch_labels', 'prob'], {}), '(batch_labels, prob)\n', (35550, 35570), False, 'from supervised_functions import accuracy, get_mean_probs, relabelling_mask_from_probs, one_hot\n'), ((36105, 36160), 'numpy.random.choice', 'np.random.choice', (['train_size', 'batch_size'], {'replace': '(False)'}), '(train_size, batch_size, replace=False)\n', (36121, 36160), True, 'import numpy as np\n'), ((36620, 36668), 'supervised_functions.accuracy', 'accuracy', (['batch_labels', 'probs', 'labelled_clusters'], {}), '(batch_labels, probs, labelled_clusters)\n', (36628, 36668), False, 'from supervised_functions import accuracy, get_mean_probs, relabelling_mask_from_probs, one_hot\n'), ((36759, 36855), 'sampling_functions.sample_pz', 'sample_pz', (['opts', 'self.pz_mean', 'self.pz_cov', "opts['batch_size']"], {'sampling_mode': '"""all_mixtures"""'}), "(opts, self.pz_mean, self.pz_cov, opts['batch_size'],\n sampling_mode='all_mixtures')\n", (36768, 36855), False, 'from sampling_functions import sample_mixtures, sample_pz, generate_linespace\n'), ((37773, 37827), 'numpy.random.choice', 'np.random.choice', (['test_size', 'batch_size'], {'replace': '(False)'}), '(test_size, batch_size, replace=False)\n', (37789, 37827), True, 'import numpy as np\n'), ((38297, 38345), 'supervised_functions.accuracy', 'accuracy', (['batch_labels', 'probs', 'labelled_clusters'], {}), '(batch_labels, probs, labelled_clusters)\n', (38305, 38345), False, 'from supervised_functions import accuracy, get_mean_probs, relabelling_mask_from_probs, one_hot\n'), ((38444, 38533), 'sampling_functions.sample_pz', 'sample_pz', (['opts', 'self.pz_mean', 'self.pz_cov', 'batch_size'], {'sampling_mode': '"""all_mixtures"""'}), "(opts, self.pz_mean, self.pz_cov, batch_size, sampling_mode=\n 'all_mixtures')\n", (38453, 38533), False, 'from sampling_functions import sample_mixtures, sample_pz, generate_linespace\n'), ((40015, 40049), 'os.path.join', 'os.path.join', (['MODEL_PATH', 'filename'], {}), '(MODEL_PATH, filename)\n', (40027, 40049), False, 'import os\n'), ((40329, 40361), 'tensorflow.gfile.IsDirectory', 'tf.gfile.IsDirectory', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (40349, 40361), True, 'import tensorflow as tf\n'), ((40504, 40543), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (["(WEIGHTS_PATH + '.meta')"], {}), "(WEIGHTS_PATH + '.meta')\n", (40519, 40543), True, 'import tensorflow as tf\n'), ((40929, 40953), 'numpy.shape', 'np.shape', (['data.test_data'], {}), '(data.test_data)\n', (40937, 40953), True, 'import numpy as np\n'), ((41382, 41408), 'tensorflow.argmax', 'tf.argmax', (['logreg_preds', '(1)'], {}), '(logreg_preds, 1)\n', (41391, 41408), True, 'import tensorflow as tf\n'), ((41409, 41429), 'tensorflow.argmax', 'tf.argmax', (['self.y', '(1)'], {}), '(self.y, 1)\n', (41418, 41429), True, 'import tensorflow as tf\n'), ((41460, 41499), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (41467, 41499), True, 'import tensorflow as tf\n'), ((45582, 45616), 'os.path.join', 'os.path.join', (['MODEL_PATH', 'filename'], {}), '(MODEL_PATH, filename)\n', (45594, 45616), False, 'import os\n'), ((46019, 46051), 'tensorflow.gfile.IsDirectory', 'tf.gfile.IsDirectory', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (46039, 46051), True, 'import tensorflow as tf\n'), ((46194, 46233), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (["(WEIGHTS_PATH + '.meta')"], {}), "(WEIGHTS_PATH + '.meta')\n", (46209, 46233), True, 'import tensorflow as tf\n'), ((46403, 46427), 'numpy.shape', 'np.shape', (['data.test_data'], {}), '(data.test_data)\n', (46411, 46427), True, 'import numpy as np\n'), ((49266, 49342), 'sampling_functions.generate_linespace', 'generate_linespace', (['opts', 'step_inter', '"""transformation"""'], {'anchors': 'self.pz_mean'}), "(opts, step_inter, 'transformation', anchors=self.pz_mean)\n", (49284, 49342), False, 'from sampling_functions import sample_mixtures, sample_pz, generate_linespace\n'), ((49499, 49586), 'sampling_functions.generate_linespace', 'generate_linespace', (['opts', 'step_inter', '"""priors_interpolation"""'], {'anchors': 'self.pz_mean'}), "(opts, step_inter, 'priors_interpolation', anchors=self.\n pz_mean)\n", (49517, 49586), False, 'from sampling_functions import sample_mixtures, sample_pz, generate_linespace\n'), ((1504, 1540), 'tensorflow.cast', 'tf.cast', (['sample_size'], {'dtype': 'tf.int32'}), '(sample_size, dtype=tf.int32)\n', (1511, 1540), True, 'import tensorflow as tf\n'), ((4402, 4428), 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['u_logit'], {}), '(u_logit)\n', (4419, 4428), True, 'import tensorflow as tf\n'), ((11092, 11144), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr'], {'beta1': "opts['adam_beta1']"}), "(lr, beta1=opts['adam_beta1'])\n", (11114, 11144), True, 'import tensorflow as tf\n'), ((13186, 13277), 'model_nn.gaussian_encoder', 'gaussian_encoder', (['opts'], {'inputs': 'input_points', 'reuse': 'reuse', 'is_training': 'self.is_training'}), '(opts, inputs=input_points, reuse=reuse, is_training=self.\n is_training)\n', (13202, 13277), False, 'from model_nn import label_encoder, cat_encoder, gaussian_encoder\n'), ((15825, 15882), 'numpy.random.choice', 'np.random.choice', (['u_train_size', 'batch_size'], {'replace': '(False)'}), '(u_train_size, batch_size, replace=False)\n', (15841, 15882), True, 'import numpy as np\n'), ((17220, 17249), 'logging.error', 'logging.error', (['"""Training VAE"""'], {}), "('Training VAE')\n", (17233, 17249), False, 'import logging\n'), ((17892, 17931), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (["(WEIGHTS_FILE + '.meta')"], {}), "(WEIGHTS_FILE + '.meta')\n", (17907, 17931), True, 'import tensorflow as tf\n'), ((18151, 18191), 'logging.error', 'logging.error', (['"""Pretraining the encoder"""'], {}), "('Pretraining the encoder')\n", (18164, 18191), False, 'import logging\n'), ((20075, 20139), 'numpy.random.choice', 'np.random.choice', (['l_train_size', "opts['batch_size']"], {'replace': '(True)'}), "(l_train_size, opts['batch_size'], replace=True)\n", (20091, 20139), True, 'import numpy as np\n'), ((20434, 20532), 'sampling_functions.sample_pz', 'sample_pz', (['opts', 'self.pz_mean', 'self.pz_sigma', "opts['batch_size']"], {'sampling_mode': '"""all_mixtures"""'}), "(opts, self.pz_mean, self.pz_sigma, opts['batch_size'],\n sampling_mode='all_mixtures')\n", (20443, 20532), False, 'from sampling_functions import sample_mixtures, sample_pz, generate_linespace\n'), ((21097, 21195), 'sampling_functions.sample_pz', 'sample_pz', (['opts', 'self.pz_mean', 'self.pz_sigma', "opts['batch_size']"], {'sampling_mode': '"""all_mixtures"""'}), "(opts, self.pz_mean, self.pz_sigma, opts['batch_size'],\n sampling_mode='all_mixtures')\n", (21106, 21195), False, 'from sampling_functions import sample_mixtures, sample_pz, generate_linespace\n'), ((34700, 34724), 'numpy.shape', 'np.shape', (['data.test_data'], {}), '(data.test_data)\n', (34708, 34724), True, 'import numpy as np\n'), ((42222, 42277), 'numpy.random.choice', 'np.random.choice', (['train_size', 'batch_size'], {'replace': '(False)'}), '(train_size, batch_size, replace=False)\n', (42238, 42277), True, 'import numpy as np\n'), ((42729, 42759), 'supervised_functions.one_hot', 'one_hot', (['data.labels[data_ids]'], {}), '(data.labels[data_ids])\n', (42736, 42759), False, 'from supervised_functions import accuracy, get_mean_probs, relabelling_mask_from_probs, one_hot\n'), ((45277, 45301), 'logging.error', 'logging.error', (['debug_str'], {}), '(debug_str)\n', (45290, 45301), False, 'import logging\n'), ((45456, 45480), 'logging.error', 'logging.error', (['debug_str'], {}), '(debug_str)\n', (45469, 45480), False, 'import logging\n'), ((45643, 45667), 'numpy.array', 'np.array', (['costs[::xstep]'], {}), '(costs[::xstep])\n', (45651, 45667), True, 'import numpy as np\n'), ((45696, 45715), 'numpy.array', 'np.array', (['acc_train'], {}), '(acc_train)\n', (45704, 45715), True, 'import numpy as np\n'), ((45744, 45762), 'numpy.array', 'np.array', (['acc_test'], {}), '(acc_test)\n', (45752, 45762), True, 'import numpy as np\n'), ((48458, 48525), 'numpy.concatenate', 'np.concatenate', (['(interpolation, end_anchors[:, np.newaxis])'], {'axis': '(1)'}), '((interpolation, end_anchors[:, np.newaxis]), axis=1)\n', (48472, 48525), True, 'import numpy as np\n'), ((12609, 12645), 'tensorflow.cast', 'tf.cast', (['sample_size'], {'dtype': 'tf.int32'}), '(sample_size, dtype=tf.int32)\n', (12616, 12645), True, 'import tensorflow as tf\n'), ((13352, 13383), 'tensorflow.exp', 'tf.exp', (["self.opts['init_e_std']"], {}), "(self.opts['init_e_std'])\n", (13358, 13383), True, 'import tensorflow as tf\n'), ((13733, 13829), 'model_nn.gaussian_encoder', 'gaussian_encoder', (['self.opts'], {'inputs': 'input_points', 'reuse': 'reuse', 'is_training': 'self.is_training'}), '(self.opts, inputs=input_points, reuse=reuse, is_training=\n self.is_training)\n', (13749, 13829), False, 'from model_nn import label_encoder, cat_encoder, gaussian_encoder\n'), ((19664, 19716), 'os.path.join', 'os.path.join', (['work_dir', '"""checkpoints"""', '"""trained-wae"""'], {}), "(work_dir, 'checkpoints', 'trained-wae')\n", (19676, 19716), False, 'import os\n'), ((20739, 20804), 'numpy.random.choice', 'np.random.choice', (['u_train_size', "opts['batch_size']"], {'replace': '(False)'}), "(u_train_size, opts['batch_size'], replace=False)\n", (20755, 20804), True, 'import numpy as np\n'), ((24398, 24409), 'time.time', 'time.time', ([], {}), '()\n', (24407, 24409), False, 'import time\n'), ((24808, 24826), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (24816, 24826), True, 'import numpy as np\n'), ((25811, 25850), 'supervised_functions.relabelling_mask_from_probs', 'relabelling_mask_from_probs', (['mean_probs'], {}), '(mean_probs)\n', (25838, 25850), False, 'from supervised_functions import accuracy, get_mean_probs, relabelling_mask_from_probs, one_hot\n'), ((29484, 29508), 'logging.error', 'logging.error', (['debug_str'], {}), '(debug_str)\n', (29497, 29508), False, 'import logging\n'), ((29730, 29754), 'logging.error', 'logging.error', (['debug_str'], {}), '(debug_str)\n', (29743, 29754), False, 'import logging\n'), ((30349, 30373), 'logging.error', 'logging.error', (['debug_str'], {}), '(debug_str)\n', (30362, 30373), False, 'import logging\n'), ((31071, 31095), 'logging.error', 'logging.error', (['debug_str'], {}), '(debug_str)\n', (31084, 31095), False, 'import logging\n'), ((31193, 31217), 'logging.error', 'logging.error', (['debug_str'], {}), '(debug_str)\n', (31206, 31217), False, 'import logging\n'), ((31245, 31273), 'numpy.argmax', 'np.argmax', (['labeling'], {'axis': '(-1)'}), '(labeling, axis=-1)\n', (31254, 31273), True, 'import numpy as np\n'), ((31355, 31379), 'logging.error', 'logging.error', (['debug_str'], {}), '(debug_str)\n', (31368, 31379), False, 'import logging\n'), ((31482, 31506), 'logging.error', 'logging.error', (['debug_str'], {}), '(debug_str)\n', (31495, 31506), False, 'import logging\n'), ((31646, 31974), 'plot_functions.save_train', 'save_train', (['opts', 'data.data[:npics]', 'data.test_data[:npics]', 'data.test_labels[:npics]', 'rec_pics_test[:npics]', 'rec_pics_test[:npics]', 'probs_pics_train', 'probs_pics_test', 'encoded', 'fixed_noise', 'sample_gen', 'losses', 'losses_rec', 'losses_match', 'losses_xent', 'kl_gau', 'kl_dis', 'work_dir', "('res_e%04d_mb%05d.png' % (epoch, it))"], {}), "(opts, data.data[:npics], data.test_data[:npics], data.\n test_labels[:npics], rec_pics_test[:npics], rec_pics_test[:npics],\n probs_pics_train, probs_pics_test, encoded, fixed_noise, sample_gen,\n losses, losses_rec, losses_match, losses_xent, kl_gau, kl_dis, work_dir,\n 'res_e%04d_mb%05d.png' % (epoch, it))\n", (31656, 31974), False, 'from plot_functions import save_train, save_vizu\n'), ((34566, 34590), 'numpy.shape', 'np.shape', (['data.test_data'], {}), '(data.test_data)\n', (34574, 34590), True, 'import numpy as np\n'), ((40867, 40891), 'numpy.shape', 'np.shape', (['data.test_data'], {}), '(data.test_data)\n', (40875, 40891), True, 'import numpy as np\n'), ((43397, 43452), 'numpy.random.choice', 'np.random.choice', (['train_size', 'batch_size'], {'replace': '(False)'}), '(train_size, batch_size, replace=False)\n', (43413, 43452), True, 'import numpy as np\n'), ((43886, 43916), 'supervised_functions.one_hot', 'one_hot', (['data.labels[data_ids]'], {}), '(data.labels[data_ids])\n', (43893, 43916), False, 'from supervised_functions import accuracy, get_mean_probs, relabelling_mask_from_probs, one_hot\n'), ((44247, 44301), 'numpy.random.choice', 'np.random.choice', (['test_size', 'batch_size'], {'replace': '(False)'}), '(test_size, batch_size, replace=False)\n', (44263, 44301), True, 'import numpy as np\n'), ((44740, 44775), 'supervised_functions.one_hot', 'one_hot', (['data.test_labels[data_ids]'], {}), '(data.test_labels[data_ids])\n', (44747, 44775), False, 'from supervised_functions import accuracy, get_mean_probs, relabelling_mask_from_probs, one_hot\n'), ((12915, 12951), 'tensorflow.cast', 'tf.cast', (['sample_size'], {'dtype': 'tf.int32'}), '(sample_size, dtype=tf.int32)\n', (12922, 12951), True, 'import tensorflow as tf\n'), ((24442, 24466), 'numpy.shape', 'np.shape', (['data.test_data'], {}), '(data.test_data)\n', (24450, 24466), True, 'import numpy as np\n'), ((25607, 25648), 'supervised_functions.get_mean_probs', 'get_mean_probs', (['batch_labels', 'probs_train'], {}), '(batch_labels, probs_train)\n', (25621, 25648), False, 'from supervised_functions import accuracy, get_mean_probs, relabelling_mask_from_probs, one_hot\n'), ((26138, 26198), 'numpy.random.choice', 'np.random.choice', (['te_size', "opts['batch_size']"], {'replace': '(False)'}), "(te_size, opts['batch_size'], replace=False)\n", (26154, 26198), True, 'import numpy as np\n'), ((27231, 27284), 'supervised_functions.accuracy', 'accuracy', (['batch_labels', 'probs_test', 'labelled_clusters'], {}), '(batch_labels, probs_test, labelled_clusters)\n', (27239, 27284), False, 'from supervised_functions import accuracy, get_mean_probs, relabelling_mask_from_probs, one_hot\n'), ((31428, 31461), 'numpy.array2string', 'np.array2string', (['pi0'], {'precision': '(3)'}), '(pi0, precision=3)\n', (31443, 31461), True, 'import numpy as np\n'), ((33381, 33425), 'logging.error', 'logging.error', (["('Reduction in lr: %f' % decay)"], {}), "('Reduction in lr: %f' % decay)\n", (33394, 33425), False, 'import logging\n'), ((41281, 41301), 'tensorflow.log', 'tf.log', (['logreg_preds'], {}), '(logreg_preds)\n', (41287, 41301), True, 'import tensorflow as tf\n'), ((13446, 13482), 'tensorflow.cast', 'tf.cast', (['sample_size'], {'dtype': 'tf.int32'}), '(sample_size, dtype=tf.int32)\n', (13453, 13482), True, 'import tensorflow as tf\n'), ((24981, 25041), 'numpy.random.choice', 'np.random.choice', (['tr_size', "opts['batch_size']"], {'replace': '(False)'}), "(tr_size, opts['batch_size'], replace=False)\n", (24997, 25041), True, 'import numpy as np\n')] |
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from estimators import npeet_entropy, gcmi_entropy
from utils.algebra import entropy_normal_theoretic
from utils.common import set_seed, timer_profile, Timer
from utils.constants import RESULTS_DIR
IMAGES_ENTROPY_DIR = RESULTS_DIR / "entropy" / "images"
class EntropyTest:
def __init__(self, x, entropy_true, verbose=False):
if x.ndim == 1:
x = np.expand_dims(x, axis=1)
self.x = x
self.entropy_true = entropy_true
self.verbose = verbose
@timer_profile
def npeet(self, k=3):
"""
Non-parametric Entropy Estimation Toolbox.
Parameters
----------
k : int
No. of nearest neighbors.
See https://github.com/gregversteeg/NPEET/blob/master/npeet_doc.pdf.
Returns
-------
float
Estimated Entropy H(X).
"""
return npeet_entropy(self.x, k=k)
@timer_profile
def gcmi(self):
"""
Gaussian-Copula Mutual Information.
Returns
-------
float
Estimated Entropy H(X).
"""
return gcmi_entropy(self.x.T)
def run_all(self):
estimated = {}
for estimator in (self.npeet, self.gcmi):
try:
estimated[estimator.__name__] = estimator()
except Exception:
estimated[estimator.__name__] = np.nan
if self.verbose:
for estimator_name, estimator_value in estimated.items():
print(f"{estimator_name} H(X)={estimator_value:.3f} (true value: {self.entropy_true:.3f})")
return estimated
def generate_normal_correlated(n_samples, n_features, sigma, loc=None):
cov = np.random.uniform(low=0, high=sigma, size=(n_features, n_features))
cov = cov.dot(cov.T) # make cov positive definite
if loc is None:
loc = np.repeat(0, n_features)
x = np.random.multivariate_normal(mean=loc, cov=cov, size=n_samples)
entropy_true = entropy_normal_theoretic(cov)
return x, entropy_true, cov
def _entropy_normal_correlated(n_samples, n_features, param):
x, entropy_true, cov = generate_normal_correlated(n_samples, n_features, param)
return x, entropy_true
def _entropy_uniform(n_samples, n_features, param):
x = np.random.uniform(low=0, high=param, size=(n_samples, n_features))
entropy_true = n_features * np.log2(param)
return x, entropy_true
def _entropy_exponential(n_samples, n_features, param):
# here param is scale (inverse of rate)
x = np.random.exponential(scale=param, size=(n_samples, n_features))
entropy_true = n_features * (1. + np.log(param)) * np.log2(np.e)
return x, entropy_true
def _entropy_randint(n_samples, n_features, param):
x = np.random.randint(low=0, high=param + 1, size=(n_samples, n_features))
entropy_true = n_features * np.log2(param)
return x, entropy_true
def entropy_test(generator, n_samples=1000, n_features=10, parameters=np.linspace(1, 50, num=10), xlabel=''):
estimated = defaultdict(list)
for param in tqdm(parameters, desc=f"{generator.__name__} test"):
x, entropy_true = generator(n_samples, n_features, param)
estimated_test = EntropyTest(x=x, entropy_true=entropy_true, verbose=False).run_all()
estimated['true'].append(entropy_true)
for estimator_name, estimator_value in estimated_test.items():
estimated[estimator_name].append(estimator_value)
entropy_true = estimated.pop('true')
plt.figure()
plt.plot(parameters, entropy_true, label='true', ls='--', marker='x')
for estimator_name, estimator_value in estimated.items():
plt.plot(parameters, estimator_value, label=estimator_name)
plt.xlabel(xlabel)
plt.ylabel('Estimated entropy, bits')
plt.title(f"{generator.__name__.lstrip('_')}: len(X)={n_samples}, dim(X)={n_features}")
plt.legend()
plt.savefig(IMAGES_ENTROPY_DIR / f"{generator.__name__}.png")
# plt.show()
def entropy_all_tests(n_samples=10_000, n_features=10):
set_seed(26)
entropy_test(_entropy_randint, n_samples=n_samples, n_features=n_features, xlabel=r'$X \sim $Randint$(0, x)$')
entropy_test(_entropy_normal_correlated, n_samples=n_samples, n_features=n_features,
xlabel=r'$X \sim \mathcal{N}(0, \Sigma^\top \Sigma), \Sigma_{ij} \sim $Uniform$(0, x)$')
entropy_test(_entropy_uniform, n_samples=n_samples, n_features=n_features, xlabel=r'$X \sim $Uniform$(0, x)$')
entropy_test(_entropy_exponential, n_samples=n_samples, n_features=n_features, xlabel=r'$X \sim $Exp(scale=x)')
Timer.checkpoint()
if __name__ == '__main__':
entropy_all_tests()
| [
"estimators.npeet_entropy",
"utils.algebra.entropy_normal_theoretic",
"matplotlib.pyplot.ylabel",
"numpy.log",
"numpy.random.exponential",
"numpy.repeat",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"utils.common.set_seed",
"numpy.linspace",
"matplotlib.pyplot.savefig",
"numpy.random... | [((1823, 1890), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': 'sigma', 'size': '(n_features, n_features)'}), '(low=0, high=sigma, size=(n_features, n_features))\n', (1840, 1890), True, 'import numpy as np\n'), ((2013, 2077), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', ([], {'mean': 'loc', 'cov': 'cov', 'size': 'n_samples'}), '(mean=loc, cov=cov, size=n_samples)\n', (2042, 2077), True, 'import numpy as np\n'), ((2097, 2126), 'utils.algebra.entropy_normal_theoretic', 'entropy_normal_theoretic', (['cov'], {}), '(cov)\n', (2121, 2126), False, 'from utils.algebra import entropy_normal_theoretic\n'), ((2396, 2462), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': 'param', 'size': '(n_samples, n_features)'}), '(low=0, high=param, size=(n_samples, n_features))\n', (2413, 2462), True, 'import numpy as np\n'), ((2647, 2711), 'numpy.random.exponential', 'np.random.exponential', ([], {'scale': 'param', 'size': '(n_samples, n_features)'}), '(scale=param, size=(n_samples, n_features))\n', (2668, 2711), True, 'import numpy as np\n'), ((2870, 2940), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(param + 1)', 'size': '(n_samples, n_features)'}), '(low=0, high=param + 1, size=(n_samples, n_features))\n', (2887, 2940), True, 'import numpy as np\n'), ((3087, 3113), 'numpy.linspace', 'np.linspace', (['(1)', '(50)'], {'num': '(10)'}), '(1, 50, num=10)\n', (3098, 3113), True, 'import numpy as np\n'), ((3143, 3160), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3154, 3160), False, 'from collections import defaultdict\n'), ((3178, 3229), 'tqdm.tqdm', 'tqdm', (['parameters'], {'desc': 'f"""{generator.__name__} test"""'}), "(parameters, desc=f'{generator.__name__} test')\n", (3182, 3229), False, 'from tqdm import tqdm\n'), ((3616, 3628), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3626, 3628), True, 'import matplotlib.pyplot as plt\n'), ((3633, 3702), 'matplotlib.pyplot.plot', 'plt.plot', (['parameters', 'entropy_true'], {'label': '"""true"""', 'ls': '"""--"""', 'marker': '"""x"""'}), "(parameters, entropy_true, label='true', ls='--', marker='x')\n", (3641, 3702), True, 'import matplotlib.pyplot as plt\n'), ((3837, 3855), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (3847, 3855), True, 'import matplotlib.pyplot as plt\n'), ((3860, 3897), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Estimated entropy, bits"""'], {}), "('Estimated entropy, bits')\n", (3870, 3897), True, 'import matplotlib.pyplot as plt\n'), ((3994, 4006), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4004, 4006), True, 'import matplotlib.pyplot as plt\n'), ((4011, 4072), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(IMAGES_ENTROPY_DIR / f'{generator.__name__}.png')"], {}), "(IMAGES_ENTROPY_DIR / f'{generator.__name__}.png')\n", (4022, 4072), True, 'import matplotlib.pyplot as plt\n'), ((4152, 4164), 'utils.common.set_seed', 'set_seed', (['(26)'], {}), '(26)\n', (4160, 4164), False, 'from utils.common import set_seed, timer_profile, Timer\n'), ((4710, 4728), 'utils.common.Timer.checkpoint', 'Timer.checkpoint', ([], {}), '()\n', (4726, 4728), False, 'from utils.common import set_seed, timer_profile, Timer\n'), ((995, 1021), 'estimators.npeet_entropy', 'npeet_entropy', (['self.x'], {'k': 'k'}), '(self.x, k=k)\n', (1008, 1021), False, 'from estimators import npeet_entropy, gcmi_entropy\n'), ((1229, 1251), 'estimators.gcmi_entropy', 'gcmi_entropy', (['self.x.T'], {}), '(self.x.T)\n', (1241, 1251), False, 'from estimators import npeet_entropy, gcmi_entropy\n'), ((1980, 2004), 'numpy.repeat', 'np.repeat', (['(0)', 'n_features'], {}), '(0, n_features)\n', (1989, 2004), True, 'import numpy as np\n'), ((2495, 2509), 'numpy.log2', 'np.log2', (['param'], {}), '(param)\n', (2502, 2509), True, 'import numpy as np\n'), ((2767, 2780), 'numpy.log2', 'np.log2', (['np.e'], {}), '(np.e)\n', (2774, 2780), True, 'import numpy as np\n'), ((2973, 2987), 'numpy.log2', 'np.log2', (['param'], {}), '(param)\n', (2980, 2987), True, 'import numpy as np\n'), ((3773, 3832), 'matplotlib.pyplot.plot', 'plt.plot', (['parameters', 'estimator_value'], {'label': 'estimator_name'}), '(parameters, estimator_value, label=estimator_name)\n', (3781, 3832), True, 'import matplotlib.pyplot as plt\n'), ((484, 509), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (498, 509), True, 'import numpy as np\n'), ((2750, 2763), 'numpy.log', 'np.log', (['param'], {}), '(param)\n', (2756, 2763), True, 'import numpy as np\n')] |
"""
Script for showing results of GFDL-CM3 experiments
Author : <NAME>
Date : 21 July 2021
Version : 4 - subsamples random weight class (#8) for mmmean
"""
### Import packages
import sys
import matplotlib.pyplot as plt
import numpy as np
import cmocean as cmocean
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=DeprecationWarning)
### LRP param
DEFAULT_NUM_BWO_ITERATIONS = 200
DEFAULT_BWO_LEARNING_RATE = .001
### Plotting defaults
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
###############################################################################
###############################################################################
###############################################################################
### Data preliminaries
directorydataLLL = '/Users/zlabe/Data/LENS/monthly'
directorydataENS = '/Users/zlabe/Data/SMILE/'
directorydataBB = '/Users/zlabe/Data/BEST/'
directorydataEE = '/Users/zlabe/Data/ERA5/'
directoryoutput = '/Users/zlabe/Documents/Research/ModelComparison/Data/'
###############################################################################
###############################################################################
modelGCMs = ['CCCma_canesm2','MPI','CSIRO_MK3.6','KNMI_ecearth',
'GFDL_CM3','GFDL_ESM2M','lens']
datasetsingle = ['SMILE']
dataset_obs = 'ERA5BE'
seasons = ['annual']
variq = 'T2M'
reg_name = 'LowerArctic'
timeper = 'historical'
###############################################################################
###############################################################################
# pickSMILE = ['CCCma_canesm2','CSIRO_MK3.6','KNMI_ecearth',
# 'GFDL_ESM2M','lens']
# pickSMILE = ['CCCma_canesm2','MPI','lens']
pickSMILE = []
if len(pickSMILE) >= 1:
lenOfPicks = len(pickSMILE)
else:
lenOfPicks = len(modelGCMs)
###############################################################################
###############################################################################
land_only = False
ocean_only = False
if land_only == True:
maskNoiseClass = 'land'
elif ocean_only == True:
maskNoiseClass = 'ocean'
else:
maskNoiseClass = 'none'
###############################################################################
###############################################################################
rm_merid_mean = False
rm_annual_mean = False
###############################################################################
###############################################################################
rm_ensemble_mean = False
rm_observational_mean = False
###############################################################################
###############################################################################
calculate_anomalies = False
if calculate_anomalies == True:
if timeper == 'historical':
baseline = np.arange(1951,1980+1,1)
elif timeper == 'future':
baseline = np.arange(2021,2050+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
###############################################################################
###############################################################################
window = 0
ensTypeExperi = 'ENS'
# shuffletype = 'TIMEENS'
# shuffletype = 'ALLENSRAND'
# shuffletype = 'ALLENSRANDrmmean'
shuffletype = 'RANDGAUSS'
sizeOfTwin = 4 # name of experiment for adding noise class #8
if sizeOfTwin > 0:
sizeOfTwinq = 1
else:
sizeOfTwinq = sizeOfTwin
###############################################################################
###############################################################################
if ensTypeExperi == 'ENS':
if window == 0:
rm_standard_dev = False
if timeper == 'historical':
yearsall = np.arange(1950,2019+1,1)
elif timeper == 'future':
yearsall = np.arange(2020,2099+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
sys.exit()
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
if timeper == 'historical':
yearsall = np.arange(1950+window,2019+1,1)
elif timeper == 'future':
yearsall = np.arange(2020+window,2099+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
sys.exit()
ravelmodeltime = False
ravel_modelens = True
elif ensTypeExperi == 'GCM':
if window == 0:
rm_standard_dev = False
yearsall = np.arange(1950,2019+1,1)
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
if timeper == 'historical':
yearsall = np.arange(1950,2019+1,1)
elif timeper == 'future':
yearsall = np.arange(2020,2099+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
sys.exit()
ravelmodeltime = False
ravel_modelens = True
###############################################################################
###############################################################################
numOfEns = 16
lensalso = True
if len(pickSMILE) == 0:
if modelGCMs[-1] == 'RANDOM':
randomalso = True
else:
randomalso = False
elif len(pickSMILE) != 0:
if pickSMILE[-1] == 'RANDOM':
randomalso = True
else:
randomalso = False
lentime = len(yearsall)
###############################################################################
###############################################################################
ravelyearsbinary = False
ravelbinary = False
num_of_class = lenOfPicks + sizeOfTwinq
###############################################################################
###############################################################################
lrpRule = 'z'
normLRP = True
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Picking experiment to save
typeOfAnalysis = 'issueWithExperiment'
# Experiment #1
if rm_ensemble_mean == True:
if window > 1:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-1'
# Experiment #2
if rm_ensemble_mean == True:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-2'
# Experiment #3 (raw data)
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-3'
if variq == 'T2M':
integer = 20 # random noise value to add/subtract from each grid point
elif variq == 'P':
integer = 20 # random noise value to add/subtract from each grid point
elif variq == 'SLP':
integer = 20 # random noise value to add/subtract from each grid point
# Experiment #4
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == True:
typeOfAnalysis = 'Experiment-4'
if variq == 'T2M':
integer = 25 # random noise value to add/subtract from each grid point
elif variq == 'P':
integer = 15 # random noise value to add/subtract from each grid point
elif variq == 'SLP':
integer = 5 # random noise value to add/subtract from each grid point
# Experiment #5
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-5'
# Experiment #6
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == True:
typeOfAnalysis = 'Experiment-6'
# Experiment #7
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-7'
# Experiment #8
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-8'
if variq == 'T2M':
integer = 1 # random noise value to add/subtract from each grid point
elif variq == 'P':
integer = 1 # random noise value to add/subtract from each grid point
elif variq == 'SLP':
integer = 5 # random noise value to add/subtract from each grid point
# Experiment #9
if rm_ensemble_mean == False:
if window > 1:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-9'
print('\n<<<<<<<<<<<< Analysis == %s (%s) ! >>>>>>>>>>>>>>>\n' % (typeOfAnalysis,timeper))
if typeOfAnalysis == 'issueWithExperiment':
sys.exit('Wrong parameters selected to analyze')
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Read in labels for each experiment
experinumber = 11
labels = []
for i in range(experinumber):
factorObs = i # factor to add to obs
### Select how to save files
if land_only == True:
saveData = timeper + '_' + seasons[0] + '_LAND' + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
elif ocean_only == True:
saveData = timeper + '_' + seasons[0] + '_OCEAN' + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
else:
saveData = timeper + '_' + seasons[0] + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
print('*Filename == < %s >' % saveData)
labelexperi = np.genfromtxt(directoryoutput + 'obsLabels_' + saveData + '.txt',unpack=True)
labels.append(labelexperi)
labels = np.asarray(labels).astype(int)
np.set_printoptions(linewidth=np.inf)
print('\n\n\n')
print('%100s' % labels[0])
print('-----------Regular data-----------\n')
np.set_printoptions(linewidth=np.inf)
print('%100s' % labels[1])
print('-----------Warm mean state by 3C----------\n')
np.set_printoptions(linewidth=np.inf)
print('%100s' % labels[2])
print('-----------Cool mean state by 3c-----------\n')
np.set_printoptions(linewidth=np.inf)
print('%100s' % labels[3])
print('-----------Warm recent 10 years by 3c-----------\n')
np.set_printoptions(linewidth=np.inf)
print('%100s' % labels[4])
print('-----------Cool recent 10 years by 3c-----------\n')
np.set_printoptions(linewidth=np.inf)
print('%100s' % labels[5])
print('-----------Warm North Pole by 5C-----------\n')
np.set_printoptions(linewidth=np.inf)
print('%100s' % labels[6])
print('-----------Cool North Pole by 5C-----------\n')
np.set_printoptions(linewidth=np.inf)
print('%100s' % labels[7])
print('-----------Warm Lower Arctic by 5C-----------\n')
np.set_printoptions(linewidth=np.inf)
print('%100s' % labels[8])
print('-----------Cool Lower Arctic by 5C-----------\n')
np.set_printoptions(linewidth=np.inf)
print('%100s' % labels[9])
print('-----------Warm first 50 years by 3C-----------\n')
np.set_printoptions(linewidth=np.inf)
print('%100s' % labels[10])
print('-----------Cool first 50 years by 3C-----------\n')
# if factorObs == 0:
# data = data
# elif factorObs == 1: # warm its mean state
# GFDL = data[4,:,:,:,:]
# GFDLwarmer = GFDL + 3
# data[4,:,:,:,:] = GFDLwarmer
# elif factorObs == 2: # cool its mean state
# GFDL = data[4,:,:,:,:]
# GFDLcooler = GFDL - 3
# data[4,:,:,:,:] = GFDLcooler
# elif factorObs == 3: # warm recent 10 years
# GFDL = data[4,:,:,:,:]
# GFDLbefore = GFDL[:,:-10,:,:]
# GFDLafter = GFDL[:,-10:,:,:] + 3
# GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
# data[4,:,:,:,:] = GFDLq
# elif factorObs == 4: # cool recent 10 years
# GFDL = data[4,:,:,:,:]
# GFDLbefore = GFDL[:,:-10,:,:]
# GFDLafter = GFDL[:,-10:,:,:] - 3
# GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
# data[4,:,:,:,:] = GFDLq
# elif factorObs == 5: # warm the North Pole
# sizeofNP = 10
# GFDL = data[4,:,:,:,:]
# warmerNP = np.zeros((GFDL.shape[0],GFDL.shape[1],GFDL.shape[2]-sizeofNP,GFDL.shape[3])) + 5
# addtoclimoNP = GFDL[:,:,sizeofNP:,:] + warmerNP
# GFDL[:,:,sizeofNP:,:] = addtoclimoNP
# data[4,:,:,:,:] = GFDL
# elif factorObs == 6: # cool the North Pole
# sizeofNP = 10
# GFDL = data[4,:,:,:,:]
# coolerNP = np.zeros((GFDL.shape[0],GFDL.shape[1],GFDL.shape[2]-sizeofNP,GFDL.shape[3])) - 5
# addtoclimoNP = GFDL[:,:,sizeofNP:,:] + coolerNP
# GFDL[:,:,sizeofNP:,:] = addtoclimoNP
# data[4,:,:,:,:] = GFDL
# elif factorObs == 7: # warm the Lower Arctic
# sizeofLA = 5
# GFDL = data[4,:,:,:,:]
# warmerLA = np.zeros((GFDL.shape[0],GFDL.shape[1],sizeofLA,GFDL.shape[3])) + 5
# addtoclimoLA = GFDL[:,:,:sizeofLA,:] + warmerLA
# GFDL[:,:,:sizeofLA,:] = addtoclimoLA
# data[4,:,:,:,:] = GFDL
# elif factorObs == 8: # cool the Lower Arctic
# sizeofLA = 5
# GFDL = data[4,:,:,:,:]
# coolerLA = np.zeros((GFDL.shape[0],GFDL.shape[1],sizeofLA,GFDL.shape[3])) - 5
# addtoclimoLA = GFDL[:,:,:sizeofLA,:] + coolerLA
# GFDL[:,:,:sizeofLA,:] = addtoclimoLA
# data[4,:,:,:,:] = GFDL
# elif factorObs == 9: # warm early 50 years
# GFDL = data[4,:,:,:,:]
# GFDLafter = GFDL[:,50:,:,:]
# GFDLbefore = GFDL[:,:50,:,:] + 3
# GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
# data[4,:,:,:,:] = GFDLq
# elif factorObs == 10: # cool early 50 years
# GFDL = data[4,:,:,:,:]
# GFDLafter = GFDL[:,50:,:,:]
# GFDLbefore = GFDL[:,:50,:,:] - 3
# GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
# data[4,:,:,:,:] = GFDLq | [
"numpy.arange",
"numpy.asarray",
"sys.exit",
"warnings.simplefilter",
"numpy.genfromtxt",
"warnings.filterwarnings",
"matplotlib.pyplot.rc",
"numpy.set_printoptions"
] | [((296, 358), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (317, 358), False, 'import warnings\n'), ((359, 421), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (382, 421), False, 'import warnings\n'), ((527, 554), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (533, 554), True, 'import matplotlib.pyplot as plt\n'), ((554, 627), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **{'family': 'sans-serif', 'sans-serif': ['Avant Garde']})\n", (560, 627), True, 'import matplotlib.pyplot as plt\n'), ((12410, 12447), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': 'np.inf'}), '(linewidth=np.inf)\n', (12429, 12447), True, 'import numpy as np\n'), ((12538, 12575), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': 'np.inf'}), '(linewidth=np.inf)\n', (12557, 12575), True, 'import numpy as np\n'), ((12658, 12695), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': 'np.inf'}), '(linewidth=np.inf)\n', (12677, 12695), True, 'import numpy as np\n'), ((12779, 12816), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': 'np.inf'}), '(linewidth=np.inf)\n', (12798, 12816), True, 'import numpy as np\n'), ((12905, 12942), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': 'np.inf'}), '(linewidth=np.inf)\n', (12924, 12942), True, 'import numpy as np\n'), ((13031, 13068), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': 'np.inf'}), '(linewidth=np.inf)\n', (13050, 13068), True, 'import numpy as np\n'), ((13152, 13189), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': 'np.inf'}), '(linewidth=np.inf)\n', (13171, 13189), True, 'import numpy as np\n'), ((13273, 13310), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': 'np.inf'}), '(linewidth=np.inf)\n', (13292, 13310), True, 'import numpy as np\n'), ((13396, 13433), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': 'np.inf'}), '(linewidth=np.inf)\n', (13415, 13433), True, 'import numpy as np\n'), ((13519, 13556), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': 'np.inf'}), '(linewidth=np.inf)\n', (13538, 13556), True, 'import numpy as np\n'), ((13644, 13681), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': 'np.inf'}), '(linewidth=np.inf)\n', (13663, 13681), True, 'import numpy as np\n'), ((10488, 10536), 'sys.exit', 'sys.exit', (['"""Wrong parameters selected to analyze"""'], {}), "('Wrong parameters selected to analyze')\n", (10496, 10536), False, 'import sys\n'), ((12255, 12333), 'numpy.genfromtxt', 'np.genfromtxt', (["(directoryoutput + 'obsLabels_' + saveData + '.txt')"], {'unpack': '(True)'}), "(directoryoutput + 'obsLabels_' + saveData + '.txt', unpack=True)\n", (12268, 12333), True, 'import numpy as np\n'), ((2993, 3021), 'numpy.arange', 'np.arange', (['(1951)', '(1980 + 1)', '(1)'], {}), '(1951, 1980 + 1, 1)\n', (3002, 3021), True, 'import numpy as np\n'), ((12378, 12396), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (12388, 12396), True, 'import numpy as np\n'), ((3067, 3095), 'numpy.arange', 'np.arange', (['(2021)', '(2050 + 1)', '(1)'], {}), '(2021, 2050 + 1, 1)\n', (3076, 3095), True, 'import numpy as np\n'), ((3890, 3918), 'numpy.arange', 'np.arange', (['(1950)', '(2019 + 1)', '(1)'], {}), '(1950, 2019 + 1, 1)\n', (3899, 3918), True, 'import numpy as np\n'), ((4245, 4282), 'numpy.arange', 'np.arange', (['(1950 + window)', '(2019 + 1)', '(1)'], {}), '(1950 + window, 2019 + 1, 1)\n', (4254, 4282), True, 'import numpy as np\n'), ((4612, 4640), 'numpy.arange', 'np.arange', (['(1950)', '(2019 + 1)', '(1)'], {}), '(1950, 2019 + 1, 1)\n', (4621, 4640), True, 'import numpy as np\n'), ((3972, 4000), 'numpy.arange', 'np.arange', (['(2020)', '(2099 + 1)', '(1)'], {}), '(2020, 2099 + 1, 1)\n', (3981, 4000), True, 'import numpy as np\n'), ((4071, 4081), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4079, 4081), False, 'import sys\n'), ((4334, 4371), 'numpy.arange', 'np.arange', (['(2020 + window)', '(2099 + 1)', '(1)'], {}), '(2020 + window, 2099 + 1, 1)\n', (4343, 4371), True, 'import numpy as np\n'), ((4440, 4450), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4448, 4450), False, 'import sys\n'), ((4800, 4828), 'numpy.arange', 'np.arange', (['(1950)', '(2019 + 1)', '(1)'], {}), '(1950, 2019 + 1, 1)\n', (4809, 4828), True, 'import numpy as np\n'), ((4882, 4910), 'numpy.arange', 'np.arange', (['(2020)', '(2099 + 1)', '(1)'], {}), '(2020, 2099 + 1, 1)\n', (4891, 4910), True, 'import numpy as np\n'), ((4981, 4991), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4989, 4991), False, 'import sys\n')] |
import _tkinter
import PIL
import numpy as np
from tkinter import *
from tkinter import filedialog
from PIL import Image, ImageTk
# global variables
path = ''
message = ''
img = None
img_as_np_array = None
width = None
height = None
popup_window = None
popup_window2 = None
# create window and set title
window = Tk()
window.title('Simple Steganography App')
def close(*args):
for popup in args:
if popup != None:
popup.destroy()
# browse function looks for image on your computer
def clearWidgets(*args):
for frame in args:
for widget in frame.winfo_children():
widget.destroy()
def browse():
global path, img, img_as_np_array, width, height
clear()
close(popup_window, popup_window2)
clearWidgets(frame1, frame2, frame3)
try:
path = filedialog.askopenfilename(initialdir='/', title='Select image:',
filetype=(('png', '*.png'), ('jpg', '*.jpg'), ('jpeg', '*.jpeg')))
img = PIL.Image.open(path)
width, height = img.size
img_as_np_array = np.asarray(img, dtype=np.dtype('B'))
Label(frame1, text=f'Image Selected: {path}').grid(row=0, column=0)
except AttributeError:
Label(frame1, text='Select Image').grid(row=0, column=0)
def get_message():
global message
if img:
clearWidgets(frame3)
last_lsb = img_as_np_array % 2
second_last_lsb = (img_as_np_array // 2) % 2
stacked_array = np.stack((second_last_lsb, last_lsb), axis=3)
flat_array = np.ndarray.flatten(stacked_array)
bytes_array = flat_array.reshape((width * height * 2 * 3 // 8, 8))
integer_array = np.packbits(bytes_array, bitorder='big')
entire_img = ''
message = ''
reading_message = False
indicator_start = '<<<<<'
indicator_end = '>>>>>'
for i in integer_array:
if len(entire_img) >= 5:
if entire_img[-5:] == indicator_start:
reading_message = True
if reading_message:
message += chr(i)
if entire_img[-5:] == indicator_end:
reading_message = False
message = message[:-6]
break
entire_img += chr(i)
text_window()
def clear():
global path, message, img, img_as_np_array, width, height
path = ''
message = ''
img = None
img_as_np_array = None
width = None
height = None
def image_window():
global popup_window
if img != None:
popup_window = Toplevel(window)
popup_window.title('Images')
resized_im = img.resize((round(img.size[0] * 0.5), round(img.size[1] * 0.5)))
tk_img = ImageTk.PhotoImage(resized_im)
image = Label(popup_window, image=tk_img)
image.image = tk_img
Label(popup_window, text='Image').grid(row=0, column=0)
image.grid(row=1, column=0)
popup_window.mainloop()
def text_window():
global popup_window2
try:
if message != '':
close(popup_window2)
popup_window2 = Toplevel(window)
popup_window2.title('Text')
text_box = Text(popup_window2)
text_box.insert(INSERT, message)
text_box.config(state=DISABLED)
text_box.grid(row=2, column=0)
Label(popup_window2, text='Your Message').grid(row=2, column=1)
Label(frame2, text='Message displayed').grid(row=1, column=0)
popup_window2.mainloop()
elif img and message == '':
Label(frame2, text='No Message Found in this image').grid(row=1, column=0)
else:
Label(frame2, text='Submit your image first').grid(row=1, column=0)
except _tkinter.TclError:
pass
def save():
if message != '':
clearWidgets(frame3)
file_name = path[:-4] + ' message.txt'
with open(file_name, 'w') as f:
f.write(message)
Label(frame3, text=f'Message saved at {file_name}').grid(row=2, column=0)
elif message=='':
Label(frame3, text='Show Message First').grid(row=2, column=0)
# widgets used in GUI
# -------------------------
frame1 = Frame(window)
frame1.grid(row=0, column=0)
Label(frame1, text=f'Select Image').grid(row=0, column=0)
browse_button = Button(window, text='Browse',command=lambda: [browse(), image_window()])
browse_button.grid(row=0, column=1)
# ------------------------
frame2 = Frame(window)
frame2.grid(row=1, column=0)
submit_button = Button(window, text='Show Message', command=lambda: [get_message(), text_window()])
submit_button.grid(row=1, column=1)
# ------------------------
frame3 = Frame(window)
frame3.grid(row=2, column=0)
save_button = Button(window, text='Save', command=save)
save_button.grid(row=2, column=1)
window.mainloop()
| [
"numpy.packbits",
"PIL.Image.open",
"PIL.ImageTk.PhotoImage",
"numpy.stack",
"numpy.ndarray.flatten",
"numpy.dtype",
"tkinter.filedialog.askopenfilename"
] | [((821, 958), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'initialdir': '"""/"""', 'title': '"""Select image:"""', 'filetype': "(('png', '*.png'), ('jpg', '*.jpg'), ('jpeg', '*.jpeg'))"}), "(initialdir='/', title='Select image:', filetype=\n (('png', '*.png'), ('jpg', '*.jpg'), ('jpeg', '*.jpeg')))\n", (847, 958), False, 'from tkinter import filedialog\n'), ((1010, 1030), 'PIL.Image.open', 'PIL.Image.open', (['path'], {}), '(path)\n', (1024, 1030), False, 'import PIL\n'), ((1493, 1538), 'numpy.stack', 'np.stack', (['(second_last_lsb, last_lsb)'], {'axis': '(3)'}), '((second_last_lsb, last_lsb), axis=3)\n', (1501, 1538), True, 'import numpy as np\n'), ((1560, 1593), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['stacked_array'], {}), '(stacked_array)\n', (1578, 1593), True, 'import numpy as np\n'), ((1693, 1733), 'numpy.packbits', 'np.packbits', (['bytes_array'], {'bitorder': '"""big"""'}), "(bytes_array, bitorder='big')\n", (1704, 1733), True, 'import numpy as np\n'), ((2787, 2817), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['resized_im'], {}), '(resized_im)\n', (2805, 2817), False, 'from PIL import Image, ImageTk\n'), ((1112, 1125), 'numpy.dtype', 'np.dtype', (['"""B"""'], {}), "('B')\n", (1120, 1125), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from ..wind_profile_clustering.read_requested_data import get_wind_data
from .single_loc_plots import plot_figure_5a
from .plot_maps import plot_all
# TODO import all functions needed
# TODO add processing functionality
# TODO add plot maps single functions
# TODO careful old code: height range from top down
# TODO add stuff to config: and remove standalone config
# # Plots of figure 5 use data from 2016.
# start_year = 2016
# end_year = 2016
class ResourceAnalysis:
#TODO inherit from config... or as is set config object as config item?
def __init__(self, config):
# Set configuration from Config class object
# TODO yes/no? super().__init__(config)
setattr(self, 'config', config)
def single_loc_plot(self,
loc=(51.0, 1.0),
time_ids=None,
ceiling=500,
floor=50):
hours, v_req_alt, v_ceilings, optimal_heights = \
self.eval_single_location(loc=loc,
time_ids=time_ids,
ceilings=[ceiling],
floor=floor)
ref_height = self.config.General.ref_height
v_at_ref_height = v_req_alt[
:, self.config.Data.height_range.index(ref_height)]
# TODO SINGLE ceiling selected hard coded -> config
dates = plot_figure_5a(hours, v_ceilings[:, 0], optimal_heights[:, 0],
height_range=None,
ref_velocity=v_at_ref_height,
height_bounds=[floor, ceiling],
v_bounds=[3, 25], # TODO v_bounds from average over all clusters - not really showing max v_bounds - maybe just use max in the end?
show_n_hours=24*7)
plt.savefig(
self.config.IO.result_dir
+ 'optimal_harvesting_height_over_time'
+ '_{:.2f}_lat_{:.2f}_lon_{}_time.pdf'
.format(loc[0], loc[1], dates[0]))
# plot_figure_5b(hours, v_req_alt, v_ceilings[:, 0], optimal_heights[:, 1], heights_of_interest,
# analyzed_heights_ids['ceilings'][1], analyzed_heights_ids['floor'])
# Plots of figure 6 use data from 2011 until 2017.
# start_year = 2011
# end_year = 2017
# hours, v_req_alt, v_ceilings, optimal_heights = eval_single_location(eval_lat, eval_lon, start_year, end_year)
# plot_figure_6a(optimal_heights[:, 1])
# plot_figure_6b(optimal_heights[:, 1])
# plot_weibull_fixed_and_ceiling(v_req_alt, heights_of_interest, [100., 500., 1500.], v_ceilings[:, 1]) # figure 6c
# plot_figure_6d(v_ceilings, analyzed_heights['ceilings'])
# plot_weibull_fixed_and_ceiling(v_req_alt, heights_of_interest, [1500.], v_ceilings[:, 0], 300.) # figure 6e
def eval_single_location(self,
loc=None,
time_ids=None,
ceilings=[500],
floor=50):
""""Execute analyses on the data of single grid point.
Args:
loc (float, tuple): Latitude, longitude of evaluated grid point.
start_year (int): Process wind data starting from this year.
final_year (int): Process wind data up to this year.
Returns:
tuple of ndarray: Tuple containing hour timestamps, wind speeds
at `heights_of_interest`, optimal wind speeds in
analyzed height ranges, and time series of
corresponding optimal heights
"""
# TODO update docstring, include params to config
if time_ids is not None:
# TODO change to input real time?
# TODO use config input sample ids?
sel_sample_ids = time_ids
else:
sel_sample_ids = []
if loc is None:
loc = self.config.Data.locations[0]
# TODO more height range below ceiling!?
data = get_wind_data(self.config,
locs=[loc],
sel_sample_ids=sel_sample_ids)
hours = data['datetime']
v_req_alt = np.sqrt(data['wind_speed_east']**2
+ data['wind_speed_north']**2)
v_ceilings = np.zeros((len(hours), len(ceilings)))
optimal_heights = np.zeros((len(hours), len(ceilings)))
ceilings_ids = [self.config.Data.height_range.index(c)
for c in ceilings]
floor_id = self.config.Data.height_range.index(floor)
for i, ceiling_id in enumerate(ceilings_ids):
# Find the height maximizing the wind speed for each hour.
v_ceilings[:, i] = np.amax(v_req_alt[:, floor_id:ceiling_id + 1],
axis=1)
print(v_ceilings)
v_ceiling_ids = np.argmax(v_req_alt[:, floor_id:ceiling_id + 1],
axis=1) + floor_id
print(v_ceiling_ids)
optimal_heights[:, i] = [self.config.Data.height_range[max_id]
for max_id in v_ceiling_ids]
return hours, v_req_alt, v_ceilings, optimal_heights
# TODO include processing
def plot_all_maps(self):
plot_all()
# TODO include plot_maps
def height_range_sanity_check(self):
get_wind_data(self.config, sel_sample_ids=[0])
self.config.update({
'Data': {
'height_range': self.config.Data.height_range_DOWA}
})
print('DOWA:')
get_wind_data(self.config, sel_sample_ids=[0]) | [
"numpy.amax",
"numpy.sqrt",
"numpy.argmax"
] | [((4325, 4394), 'numpy.sqrt', 'np.sqrt', (["(data['wind_speed_east'] ** 2 + data['wind_speed_north'] ** 2)"], {}), "(data['wind_speed_east'] ** 2 + data['wind_speed_north'] ** 2)\n", (4332, 4394), True, 'import numpy as np\n'), ((4866, 4920), 'numpy.amax', 'np.amax', (['v_req_alt[:, floor_id:ceiling_id + 1]'], {'axis': '(1)'}), '(v_req_alt[:, floor_id:ceiling_id + 1], axis=1)\n', (4873, 4920), True, 'import numpy as np\n'), ((5018, 5074), 'numpy.argmax', 'np.argmax', (['v_req_alt[:, floor_id:ceiling_id + 1]'], {'axis': '(1)'}), '(v_req_alt[:, floor_id:ceiling_id + 1], axis=1)\n', (5027, 5074), True, 'import numpy as np\n')] |
from torch.utils.data.sampler import Sampler
from torch.utils.data.sampler import BatchSampler
import torch
import numpy as np
import itertools
from collections import OrderedDict
class _RepeatSampler(object):
"""
Sampler that repeats forever.
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class HensmanDataLoader(torch.utils.data.dataloader.DataLoader):
"""
Dataloader when using minibatching with Stochastic Variational Inference.
"""
def __init__(self, dataset, batch_sampler, num_workers):
super().__init__(dataset, batch_sampler=_RepeatSampler(batch_sampler), num_workers=num_workers)
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class SubjectSampler(Sampler):
"""
Perform individual-wise sampling
"""
def __init__(self, data_source, P, T):
super(SubjectSampler, self).__init__(data_source)
self.data_source = data_source
self.P = P
self.T = T
def __iter__(self):
r = np.arange(self.P)
np.random.shuffle(r)
list_of_lists = list(map(lambda x: [i for i in range(self.T*x, self.T*(x+1))], r))
res = list(itertools.chain.from_iterable(list_of_lists))
return iter(res)
def __len__(self):
return len(self.data_source)
class VaryingLengthSubjectSampler(Sampler):
"""
Perform individual-wise sampling when individuals have varying number of temporal samples.
"""
def __init__(self, data_source, id_covariate):
super(VaryingLengthSubjectSampler, self).__init__(data_source)
self.data_source = data_source
self.id_covariate = id_covariate
def f(x):
return int(x['label'][id_covariate].item())
l = list(map(f, data_source))
self.P = len(set(l))
self.start_indices = [l.index(x) for x in list(OrderedDict.fromkeys(l))]
self.end_indices = self.start_indices[1:] + [len(data_source)]
def __iter__(self):
r = np.arange(self.P)
np.random.shuffle(r)
list_of_lists = list(map(lambda x: [(i, x) for i in range(self.start_indices[x], self.end_indices[x])], r))
res = iter(itertools.chain.from_iterable(list_of_lists))
return iter(res)
def __len__(self):
return self.P
class VaryingLengthBatchSampler(BatchSampler):
"""
Perform batch sampling when individuals have varying number of temporal samples.
"""
def __init__(self, sampler, batch_size):
super(VaryingLengthBatchSampler, self).__init__(sampler, batch_size, False)
assert isinstance(sampler, VaryingLengthSubjectSampler)
self.sampler = sampler
self.batch_size = batch_size
#__len__ defined by the superclass
def __iter__(self):
batch = []
batch_subjects = set()
for idx, subj in self.sampler:
if subj not in batch_subjects:
if len(batch_subjects) == self.batch_size:
yield batch
batch = []
batch_subjects.clear()
batch_subjects.add(subj)
batch.append(idx)
yield batch
def batch_predict_varying_T(latent_dim, covar_module0, covar_module1, likelihoods, prediction_x,
test_x, mu, zt_list, id_covariate, eps):
"""
Perform batch predictions when individuals have varying number of temporal samples.
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
Q = prediction_x.shape[1]
M = zt_list[0].shape[0]
I_M = torch.eye(M, dtype=torch.double).to(device)
if isinstance(covar_module0, list):
K0xz = torch.zeros(latent_dim, prediction_x.shape[0], M).double().to(device)
K0zz = torch.zeros(latent_dim, M, M).double().to(device)
K0Xz = torch.zeros(latent_dim, test_x.shape[0], M).double().to(device)
for i in range(latent_dim):
covar_module0[i].eval()
covar_module1[i].eval()
likelihoods[i].eval()
z = zt_list[i].to(device)
K0xz[i] = covar_module0[i](prediction_x, z).evaluate()
K0zz[i] = covar_module0[i](z, z).evaluate()
K0Xz[i] = covar_module0[i](test_x, z).evaluate()
else:
covar_module0.eval()
covar_module1.eval()
likelihoods.eval()
K0xz = covar_module0(prediction_x, zt_list).evaluate()
K0zz = covar_module0(zt_list, zt_list).evaluate()
K0Xz = covar_module0(test_x, zt_list).evaluate()
K0zz = K0zz + eps * I_M
K0zx = K0xz.transpose(-1, -2)
iB_st_list = []
H = K0zz
subjects = torch.unique(prediction_x[:, id_covariate]).tolist()
iB_mu = torch.zeros(latent_dim, prediction_x.shape[0], 1, dtype=torch.double).to(device)
for s in subjects:
indices = prediction_x[:, id_covariate] == s
x_st = prediction_x[indices]
T = x_st.shape[0]
I_T = torch.eye(T, dtype=torch.double).to(device)
if isinstance(covar_module0, list):
B_st = torch.zeros(latent_dim, T, T, dtype=torch.double).to(device)
for i in range(latent_dim):
B_st[i] = covar_module1[i](x_st, x_st).evaluate() + I_T * likelihoods[i].noise_covar.noise
else:
stacked_x_st = torch.stack([x_st for i in range(latent_dim)], dim=0)
B_st = covar_module1(stacked_x_st, stacked_x_st).evaluate() + I_T * likelihoods.noise_covar.noise.unsqueeze(dim=2)
LB_st = torch.cholesky(B_st)
iB_st = torch.cholesky_solve(I_T, LB_st)
K0xz_st = K0xz[:, indices]
K0zx_st = K0xz_st.transpose(-1, -2)
iB_K0xz = torch.matmul(iB_st, K0xz_st)
K0zx_iB_K0xz = torch.matmul(K0zx_st, iB_K0xz)
H = H + K0zx_iB_K0xz
iB_mu[:, indices] = torch.matmul(iB_st, mu[indices].T.unsqueeze(dim=2))
iB_st_list.append(iB_st)
K0xz_iH_K0zx_iB_mu_st = torch.matmul(K0xz, torch.solve(torch.matmul(K0zx, iB_mu), H)[0])
iB_K0xz_iH_K0zx_iB_mu = torch.zeros(latent_dim, prediction_x.shape[0], 1, dtype=torch.double).to(device)
for i, s in enumerate(subjects):
indices = prediction_x[:, id_covariate] == s
iB_K0xz_iH_K0zx_iB_mu[:, indices] = torch.matmul(iB_st_list[i], K0xz_iH_K0zx_iB_mu_st[:, indices])
mu_tilde = iB_mu - iB_K0xz_iH_K0zx_iB_mu
K0Xz_iK0zz_K0zx_mu_tilde = torch.matmul(K0Xz, torch.solve(torch.matmul(K0zx, mu_tilde), K0zz)[0])
test_subjects = torch.unique(test_x[:, id_covariate]).cpu().numpy()
mask = np.isin(prediction_x[:, id_covariate].cpu().numpy(), test_subjects)
K1Xx_mu_tilde = torch.zeros(latent_dim, test_x.shape[0], 1, dtype=torch.double).to(device)
for s in test_subjects:
indices = test_x[:, id_covariate] == s
if isinstance(covar_module0, list):
K1Xx = torch.zeros(latent_dim, test_x[indices].shape[0], np.sum(mask)).double().to(device)
for i in range(latent_dim):
K1Xx[i] = covar_module1[i](test_x[indices], prediction_x[mask]).evaluate()
else:
stacked_test_x_indices = torch.stack([test_x[indices] for i in range(latent_dim)], dim=0)
stacked_prediction_x_mask = torch.stack([prediction_x[mask] for i in range(latent_dim)], dim=0)
K1Xx = covar_module1(stacked_test_x_indices, stacked_prediction_x_mask).evaluate()
K1Xx_mu_tilde[:, indices] = torch.matmul(K1Xx, mu_tilde[:, mask])
Z_pred = (K0Xz_iK0zz_K0zx_mu_tilde + K1Xx_mu_tilde).squeeze(dim=2).T
return Z_pred
def batch_predict(latent_dim, covar_module0, covar_module1, likelihoods, prediction_x, test_x, mu,
zt_list, P, T, id_covariate, eps):
"""
Perform batch-wise predictions
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
Q = prediction_x.shape[1]
M = zt_list[0].shape[0]
I_M = torch.eye(M, dtype=torch.double).to(device)
I_T = torch.eye(T, dtype=torch.double).to(device)
x_st = torch.reshape(prediction_x, [P, T, Q])
mu = mu.T
mu_st = torch.reshape(mu, [latent_dim, P, T, 1])
if isinstance(covar_module0, list):
K0xz = torch.zeros(latent_dim, P*T, M).double().to(device)
K0zz = torch.zeros(latent_dim, M, M).double().to(device)
B_st = torch.zeros(latent_dim, P, T, T).double().to(device)
K0Xz = torch.zeros(latent_dim, test_x.shape[0], M).double().to(device)
for i in range(latent_dim):
covar_module0[i].eval()
covar_module1[i].eval()
likelihoods[i].eval()
z = zt_list[i].to(device)
K0xz[i] = covar_module0[i](prediction_x, z).evaluate()
K0zz[i] = covar_module0[i](z, z).evaluate()
B_st[i] = covar_module1[i](x_st, x_st).evaluate() + I_T * likelihoods[i].noise_covar.noise
K0Xz[i] = covar_module0[i](test_x, z).evaluate()
else:
covar_module0.eval()
covar_module1.eval()
likelihoods.eval()
stacked_x_st = torch.stack([x_st for i in range(latent_dim)], dim=1)
K0xz = covar_module0(prediction_x, zt_list).evaluate()
K0zz = covar_module0(zt_list, zt_list).evaluate()
B_st = (covar_module1(stacked_x_st, stacked_x_st).evaluate() + I_T * likelihoods.noise_covar.noise.unsqueeze(dim=2)).transpose(0, 1)
K0Xz = covar_module0(test_x, zt_list).evaluate()
K0zz = K0zz + eps * I_M
LB_st = torch.cholesky(B_st)
iB_st = torch.cholesky_solve(I_T, LB_st)
K0xz_st = torch.reshape(K0xz, [latent_dim, P, T, M])
K0zx_st = K0xz_st.transpose(-1, -2)
K0zx = K0xz.transpose(-1, -2)
iB_K0xz = torch.matmul(iB_st, K0xz_st)
K0zx_iB_K0xz = torch.matmul(K0zx, torch.reshape(iB_K0xz, [latent_dim, P*T, M]))
H = K0zz + K0zx_iB_K0xz
iB_mu = torch.matmul(iB_st, mu_st).view(latent_dim, -1, 1)
K0xz_iH_K0zx_iB_mu_st = torch.matmul(K0xz, torch.solve(torch.matmul(K0zx, iB_mu), H)[0]).reshape(latent_dim, P, T, -1)
iB_K0xz_iH_K0zx_iB_mu = torch.matmul(iB_st, K0xz_iH_K0zx_iB_mu_st).view(latent_dim, -1, 1)
mu_tilde = iB_mu - iB_K0xz_iH_K0zx_iB_mu
K0Xz_iK0zz_K0zx_mu_tilde = torch.matmul(K0Xz, torch.solve(torch.matmul(K0zx, mu_tilde), K0zz)[0])
test_subjects = torch.unique(test_x[:, id_covariate]).cpu().numpy()
mask = np.isin(prediction_x[:, id_covariate].cpu().numpy(), test_subjects)
K1Xx_mu_tilde = torch.zeros(latent_dim, test_x.shape[0], 1, dtype=torch.double).to(device)
for s in test_subjects:
indices = test_x[:, id_covariate] == s
if isinstance(covar_module0, list):
K1Xx = torch.zeros(latent_dim, test_x[indices].shape[0], np.sum(mask)).double().to(device)
for i in range(latent_dim):
K1Xx[i] = covar_module1[i](test_x[indices], prediction_x[mask]).evaluate()
else:
stacked_test_x_indices = torch.stack([test_x[indices] for i in range(latent_dim)], dim=0)
stacked_prediction_x_mask = torch.stack([prediction_x[mask] for i in range(latent_dim)], dim=0)
K1Xx = covar_module1(stacked_test_x_indices, stacked_prediction_x_mask).evaluate()
K1Xx_mu_tilde[:, indices] = torch.matmul(K1Xx, mu_tilde[:, mask])
Z_pred = (K0Xz_iK0zz_K0zx_mu_tilde + K1Xx_mu_tilde).squeeze(dim=2).T
return Z_pred
def predict(covar_module0, covar_module1, likelihood, train_xt, test_x, mu, z, P, T, id_covariate, eps):
"""
Helper function to perform predictions.
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
Q = train_xt.shape[1]
M = z.shape[0]
I_M = torch.eye(M, dtype=torch.double).to(device)
I_T = torch.eye(T, dtype=torch.double).to(device)
x_st = torch.reshape(train_xt, [P, T, Q])
mu_st = torch.reshape(mu, [P, T, 1])
K0xz = covar_module0(train_xt, z).evaluate()
K0zz = covar_module0(z, z).evaluate() + eps * I_M
K1_st = covar_module1(x_st, x_st).evaluate()
K0Xz = covar_module0(test_x, z).evaluate()
B_st = K1_st + I_T * likelihood.noise_covar.noise
LB_st = torch.cholesky(B_st)
iB_st = torch.cholesky_solve(I_T, LB_st)
K0xz_st = torch.reshape(K0xz, [P, T, M])
K0zx_st = K0xz_st.transpose(-1, -2)
iB_K0xz = torch.matmul(iB_st, K0xz_st)
K0zx_iB_K0xz = torch.matmul(K0xz.T, torch.reshape(iB_K0xz, [P*T, M]))
H = K0zz + K0zx_iB_K0xz
iB_mu = torch.matmul(iB_st, mu_st).view(-1)
K0xz_iH_K0zx_iB_mu_st = torch.matmul(K0xz, torch.solve(torch.matmul(K0xz.T, iB_mu).unsqueeze(dim=1), H)[0]).reshape(P, T, -1)
iB_K0xz_iH_K0zx_iB_mu = torch.matmul(iB_st, K0xz_iH_K0zx_iB_mu_st).view(-1)
mu_tilde = iB_mu - iB_K0xz_iH_K0zx_iB_mu
K0Xz_iK0zz_K0zx_mu_tilde = torch.matmul(K0Xz, torch.solve(torch.matmul(K0xz.T, mu_tilde).unsqueeze(dim=1), K0zz)[0]).squeeze()
test_subjects = torch.unique(test_x[:, id_covariate]).cpu().numpy()
mask = np.isin(train_xt[:, id_covariate].cpu().numpy(), test_subjects)
K1Xx_mu_tilde = torch.zeros(test_x.shape[0], dtype=torch.double).to(device)
for s in test_subjects:
indices = test_x[:, id_covariate] == s
K1Xx = covar_module1(test_x[indices], train_xt[mask]).evaluate()
K1Xx_mu_tilde[indices] = torch.matmul(K1Xx, mu_tilde[mask])
Z_pred = K0Xz_iK0zz_K0zx_mu_tilde + K1Xx_mu_tilde
return Z_pred
| [
"torch.cholesky_solve",
"torch.unique",
"collections.OrderedDict.fromkeys",
"torch.eye",
"torch.cholesky",
"itertools.chain.from_iterable",
"numpy.sum",
"torch.cuda.is_available",
"torch.matmul",
"torch.reshape",
"torch.zeros",
"numpy.arange",
"numpy.random.shuffle"
] | [((8288, 8326), 'torch.reshape', 'torch.reshape', (['prediction_x', '[P, T, Q]'], {}), '(prediction_x, [P, T, Q])\n', (8301, 8326), False, 'import torch\n'), ((8354, 8394), 'torch.reshape', 'torch.reshape', (['mu', '[latent_dim, P, T, 1]'], {}), '(mu, [latent_dim, P, T, 1])\n', (8367, 8394), False, 'import torch\n'), ((9719, 9739), 'torch.cholesky', 'torch.cholesky', (['B_st'], {}), '(B_st)\n', (9733, 9739), False, 'import torch\n'), ((9752, 9784), 'torch.cholesky_solve', 'torch.cholesky_solve', (['I_T', 'LB_st'], {}), '(I_T, LB_st)\n', (9772, 9784), False, 'import torch\n'), ((9799, 9841), 'torch.reshape', 'torch.reshape', (['K0xz', '[latent_dim, P, T, M]'], {}), '(K0xz, [latent_dim, P, T, M])\n', (9812, 9841), False, 'import torch\n'), ((9931, 9959), 'torch.matmul', 'torch.matmul', (['iB_st', 'K0xz_st'], {}), '(iB_st, K0xz_st)\n', (9943, 9959), False, 'import torch\n'), ((12004, 12038), 'torch.reshape', 'torch.reshape', (['train_xt', '[P, T, Q]'], {}), '(train_xt, [P, T, Q])\n', (12017, 12038), False, 'import torch\n'), ((12051, 12079), 'torch.reshape', 'torch.reshape', (['mu', '[P, T, 1]'], {}), '(mu, [P, T, 1])\n', (12064, 12079), False, 'import torch\n'), ((12347, 12367), 'torch.cholesky', 'torch.cholesky', (['B_st'], {}), '(B_st)\n', (12361, 12367), False, 'import torch\n'), ((12380, 12412), 'torch.cholesky_solve', 'torch.cholesky_solve', (['I_T', 'LB_st'], {}), '(I_T, LB_st)\n', (12400, 12412), False, 'import torch\n'), ((12427, 12457), 'torch.reshape', 'torch.reshape', (['K0xz', '[P, T, M]'], {}), '(K0xz, [P, T, M])\n', (12440, 12457), False, 'import torch\n'), ((12513, 12541), 'torch.matmul', 'torch.matmul', (['iB_st', 'K0xz_st'], {}), '(iB_st, K0xz_st)\n', (12525, 12541), False, 'import torch\n'), ((1294, 1311), 'numpy.arange', 'np.arange', (['self.P'], {}), '(self.P)\n', (1303, 1311), True, 'import numpy as np\n'), ((1320, 1340), 'numpy.random.shuffle', 'np.random.shuffle', (['r'], {}), '(r)\n', (1337, 1340), True, 'import numpy as np\n'), ((2278, 2295), 'numpy.arange', 'np.arange', (['self.P'], {}), '(self.P)\n', (2287, 2295), True, 'import numpy as np\n'), ((2305, 2325), 'numpy.random.shuffle', 'np.random.shuffle', (['r'], {}), '(r)\n', (2322, 2325), True, 'import numpy as np\n'), ((5793, 5813), 'torch.cholesky', 'torch.cholesky', (['B_st'], {}), '(B_st)\n', (5807, 5813), False, 'import torch\n'), ((5830, 5862), 'torch.cholesky_solve', 'torch.cholesky_solve', (['I_T', 'LB_st'], {}), '(I_T, LB_st)\n', (5850, 5862), False, 'import torch\n'), ((5960, 5988), 'torch.matmul', 'torch.matmul', (['iB_st', 'K0xz_st'], {}), '(iB_st, K0xz_st)\n', (5972, 5988), False, 'import torch\n'), ((6012, 6042), 'torch.matmul', 'torch.matmul', (['K0zx_st', 'iB_K0xz'], {}), '(K0zx_st, iB_K0xz)\n', (6024, 6042), False, 'import torch\n'), ((6522, 6584), 'torch.matmul', 'torch.matmul', (['iB_st_list[i]', 'K0xz_iH_K0zx_iB_mu_st[:, indices]'], {}), '(iB_st_list[i], K0xz_iH_K0zx_iB_mu_st[:, indices])\n', (6534, 6584), False, 'import torch\n'), ((7690, 7727), 'torch.matmul', 'torch.matmul', (['K1Xx', 'mu_tilde[:, mask]'], {}), '(K1Xx, mu_tilde[:, mask])\n', (7702, 7727), False, 'import torch\n'), ((9998, 10044), 'torch.reshape', 'torch.reshape', (['iB_K0xz', '[latent_dim, P * T, M]'], {}), '(iB_K0xz, [latent_dim, P * T, M])\n', (10011, 10044), False, 'import torch\n'), ((11458, 11495), 'torch.matmul', 'torch.matmul', (['K1Xx', 'mu_tilde[:, mask]'], {}), '(K1Xx, mu_tilde[:, mask])\n', (11470, 11495), False, 'import torch\n'), ((12582, 12616), 'torch.reshape', 'torch.reshape', (['iB_K0xz', '[P * T, M]'], {}), '(iB_K0xz, [P * T, M])\n', (12595, 12616), False, 'import torch\n'), ((13489, 13523), 'torch.matmul', 'torch.matmul', (['K1Xx', 'mu_tilde[mask]'], {}), '(K1Xx, mu_tilde[mask])\n', (13501, 13523), False, 'import torch\n'), ((1451, 1495), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['list_of_lists'], {}), '(list_of_lists)\n', (1480, 1495), False, 'import itertools\n'), ((2461, 2505), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['list_of_lists'], {}), '(list_of_lists)\n', (2490, 2505), False, 'import itertools\n'), ((3764, 3789), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3787, 3789), False, 'import torch\n'), ((3871, 3903), 'torch.eye', 'torch.eye', (['M'], {'dtype': 'torch.double'}), '(M, dtype=torch.double)\n', (3880, 3903), False, 'import torch\n'), ((4939, 4982), 'torch.unique', 'torch.unique', (['prediction_x[:, id_covariate]'], {}), '(prediction_x[:, id_covariate])\n', (4951, 4982), False, 'import torch\n'), ((5004, 5073), 'torch.zeros', 'torch.zeros', (['latent_dim', 'prediction_x.shape[0]', '(1)'], {'dtype': 'torch.double'}), '(latent_dim, prediction_x.shape[0], 1, dtype=torch.double)\n', (5015, 5073), False, 'import torch\n'), ((6307, 6376), 'torch.zeros', 'torch.zeros', (['latent_dim', 'prediction_x.shape[0]', '(1)'], {'dtype': 'torch.double'}), '(latent_dim, prediction_x.shape[0], 1, dtype=torch.double)\n', (6318, 6376), False, 'import torch\n'), ((6906, 6969), 'torch.zeros', 'torch.zeros', (['latent_dim', 'test_x.shape[0]', '(1)'], {'dtype': 'torch.double'}), '(latent_dim, test_x.shape[0], 1, dtype=torch.double)\n', (6917, 6969), False, 'import torch\n'), ((8071, 8096), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8094, 8096), False, 'import torch\n'), ((8178, 8210), 'torch.eye', 'torch.eye', (['M'], {'dtype': 'torch.double'}), '(M, dtype=torch.double)\n', (8187, 8210), False, 'import torch\n'), ((8232, 8264), 'torch.eye', 'torch.eye', (['T'], {'dtype': 'torch.double'}), '(T, dtype=torch.double)\n', (8241, 8264), False, 'import torch\n'), ((10084, 10110), 'torch.matmul', 'torch.matmul', (['iB_st', 'mu_st'], {}), '(iB_st, mu_st)\n', (10096, 10110), False, 'import torch\n'), ((10286, 10328), 'torch.matmul', 'torch.matmul', (['iB_st', 'K0xz_iH_K0zx_iB_mu_st'], {}), '(iB_st, K0xz_iH_K0zx_iB_mu_st)\n', (10298, 10328), False, 'import torch\n'), ((10673, 10736), 'torch.zeros', 'torch.zeros', (['latent_dim', 'test_x.shape[0]', '(1)'], {'dtype': 'torch.double'}), '(latent_dim, test_x.shape[0], 1, dtype=torch.double)\n', (10684, 10736), False, 'import torch\n'), ((11800, 11825), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11823, 11825), False, 'import torch\n'), ((11894, 11926), 'torch.eye', 'torch.eye', (['M'], {'dtype': 'torch.double'}), '(M, dtype=torch.double)\n', (11903, 11926), False, 'import torch\n'), ((11948, 11980), 'torch.eye', 'torch.eye', (['T'], {'dtype': 'torch.double'}), '(T, dtype=torch.double)\n', (11957, 11980), False, 'import torch\n'), ((12657, 12683), 'torch.matmul', 'torch.matmul', (['iB_st', 'mu_st'], {}), '(iB_st, mu_st)\n', (12669, 12683), False, 'import torch\n'), ((12851, 12893), 'torch.matmul', 'torch.matmul', (['iB_st', 'K0xz_iH_K0zx_iB_mu_st'], {}), '(iB_st, K0xz_iH_K0zx_iB_mu_st)\n', (12863, 12893), False, 'import torch\n'), ((13248, 13296), 'torch.zeros', 'torch.zeros', (['test_x.shape[0]'], {'dtype': 'torch.double'}), '(test_x.shape[0], dtype=torch.double)\n', (13259, 13296), False, 'import torch\n'), ((5238, 5270), 'torch.eye', 'torch.eye', (['T'], {'dtype': 'torch.double'}), '(T, dtype=torch.double)\n', (5247, 5270), False, 'import torch\n'), ((6245, 6270), 'torch.matmul', 'torch.matmul', (['K0zx', 'iB_mu'], {}), '(K0zx, iB_mu)\n', (6257, 6270), False, 'import torch\n'), ((6693, 6721), 'torch.matmul', 'torch.matmul', (['K0zx', 'mu_tilde'], {}), '(K0zx, mu_tilde)\n', (6705, 6721), False, 'import torch\n'), ((10460, 10488), 'torch.matmul', 'torch.matmul', (['K0zx', 'mu_tilde'], {}), '(K0zx, mu_tilde)\n', (10472, 10488), False, 'import torch\n'), ((2144, 2167), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['l'], {}), '(l)\n', (2164, 2167), False, 'from collections import OrderedDict\n'), ((5346, 5395), 'torch.zeros', 'torch.zeros', (['latent_dim', 'T', 'T'], {'dtype': 'torch.double'}), '(latent_dim, T, T, dtype=torch.double)\n', (5357, 5395), False, 'import torch\n'), ((6754, 6791), 'torch.unique', 'torch.unique', (['test_x[:, id_covariate]'], {}), '(test_x[:, id_covariate])\n', (6766, 6791), False, 'import torch\n'), ((10521, 10558), 'torch.unique', 'torch.unique', (['test_x[:, id_covariate]'], {}), '(test_x[:, id_covariate])\n', (10533, 10558), False, 'import torch\n'), ((13100, 13137), 'torch.unique', 'torch.unique', (['test_x[:, id_covariate]'], {}), '(test_x[:, id_covariate])\n', (13112, 13137), False, 'import torch\n'), ((3972, 4021), 'torch.zeros', 'torch.zeros', (['latent_dim', 'prediction_x.shape[0]', 'M'], {}), '(latent_dim, prediction_x.shape[0], M)\n', (3983, 4021), False, 'import torch\n'), ((4057, 4086), 'torch.zeros', 'torch.zeros', (['latent_dim', 'M', 'M'], {}), '(latent_dim, M, M)\n', (4068, 4086), False, 'import torch\n'), ((4122, 4165), 'torch.zeros', 'torch.zeros', (['latent_dim', 'test_x.shape[0]', 'M'], {}), '(latent_dim, test_x.shape[0], M)\n', (4133, 4165), False, 'import torch\n'), ((8451, 8484), 'torch.zeros', 'torch.zeros', (['latent_dim', '(P * T)', 'M'], {}), '(latent_dim, P * T, M)\n', (8462, 8484), False, 'import torch\n'), ((8518, 8547), 'torch.zeros', 'torch.zeros', (['latent_dim', 'M', 'M'], {}), '(latent_dim, M, M)\n', (8529, 8547), False, 'import torch\n'), ((8583, 8615), 'torch.zeros', 'torch.zeros', (['latent_dim', 'P', 'T', 'T'], {}), '(latent_dim, P, T, T)\n', (8594, 8615), False, 'import torch\n'), ((8651, 8694), 'torch.zeros', 'torch.zeros', (['latent_dim', 'test_x.shape[0]', 'M'], {}), '(latent_dim, test_x.shape[0], M)\n', (8662, 8694), False, 'import torch\n'), ((10194, 10219), 'torch.matmul', 'torch.matmul', (['K0zx', 'iB_mu'], {}), '(K0zx, iB_mu)\n', (10206, 10219), False, 'import torch\n'), ((7170, 7182), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (7176, 7182), True, 'import numpy as np\n'), ((10937, 10949), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (10943, 10949), True, 'import numpy as np\n'), ((12752, 12779), 'torch.matmul', 'torch.matmul', (['K0xz.T', 'iB_mu'], {}), '(K0xz.T, iB_mu)\n', (12764, 12779), False, 'import torch\n'), ((13010, 13040), 'torch.matmul', 'torch.matmul', (['K0xz.T', 'mu_tilde'], {}), '(K0xz.T, mu_tilde)\n', (13022, 13040), False, 'import torch\n')] |
from dassl.engine import TRAINER_REGISTRY,TrainerXU
from dassl.data import DataManager
from torch.utils.data import Dataset as TorchDataset
from dassl.optim import build_optimizer, build_lr_scheduler
from dassl.utils import count_num_param
import torch
import torch.nn as nn
from torch.nn import functional as F
from dassl.engine.trainer_tmp import SimpleNet
from dassl.utils import MetricMeter
import numpy as np
@TRAINER_REGISTRY.register()
class CustomMCD(TrainerXU):
def __init__(self, cfg):
super().__init__(cfg)
self.n_step_F = cfg.TRAINER.CustomMCD.N_STEP_F
self._best_epoch_val_loss = 10000
self.ce = nn.CrossEntropyLoss()
if cfg.DATASET.TOTAL_CLASS_WEIGHT:
total_data_class_weight = self.dm.dataset.whole_class_weight
if total_data_class_weight is not None:
torch_weight = torch.from_numpy(np.array(total_data_class_weight)).float().to(self.device)
self.ce = nn.CrossEntropyLoss(weight=torch_weight)
def build_model(self):
cfg = self.cfg
print('Building F')
self.F = SimpleNet(cfg, cfg.MODEL, 0,**cfg.MODEL.BACKBONE.PARAMS)
self.F.to(self.device)
print('# params: {:,}'.format(count_num_param(self.F)))
self.optim_F = build_optimizer(self.F, cfg.OPTIM)
self.sched_F = build_lr_scheduler(self.optim_F, cfg.OPTIM)
self.register_model('F', self.F, self.optim_F, self.sched_F)
fdim = self.F.fdim
print('Building C1')
print("fdim : ",fdim)
print("num_classes : ",self.num_classes)
self.C1 = nn.Linear(fdim, self.num_classes)
self.C1.to(self.device)
print('# params: {:,}'.format(count_num_param(self.C1)))
self.optim_C1 = build_optimizer(self.C1, cfg.OPTIM)
self.sched_C1 = build_lr_scheduler(self.optim_C1, cfg.OPTIM)
self.register_model('C1', self.C1, self.optim_C1, self.sched_C1)
print('Building C2')
self.C2 = nn.Linear(fdim, self.num_classes)
self.C2.to(self.device)
print('# params: {:,}'.format(count_num_param(self.C2)))
self.optim_C2 = build_optimizer(self.C2, cfg.OPTIM)
self.sched_C2 = build_lr_scheduler(self.optim_C2, cfg.OPTIM)
self.register_model('C2', self.C2, self.optim_C2, self.sched_C2)
def forward_backward(self, batch_x, batch_u,backprob = True):
parsed = self.parse_batch_train(batch_x, batch_u)
input_x, label_x,_ ,input_u = parsed
# Step A
feat_x = self.F(input_x)
logit_x1 = self.C1(feat_x)
logit_x2 = self.C2(feat_x)
# loss_x1 = F.cross_entropy(logit_x1, label_x)
# loss_x2 = F.cross_entropy(logit_x2, label_x)
loss_x1 = self.ce(logit_x1, label_x)
loss_x2 = self.ce(logit_x2, label_x)
loss_step_A = loss_x1 + loss_x2
if backprob:
self.model_backward_and_update(loss_step_A)
# Step B
with torch.no_grad():
feat_x = self.F(input_x)
logit_x1 = self.C1(feat_x)
logit_x2 = self.C2(feat_x)
# loss_x1 = F.cross_entropy(logit_x1, label_x)
# loss_x2 = F.cross_entropy(logit_x2, label_x)
loss_x1 = self.ce(logit_x1, label_x)
loss_x2 = self.ce(logit_x2, label_x)
loss_x = loss_x1 + loss_x2
with torch.no_grad():
feat_u = self.F(input_u)
pred_u1 = F.softmax(self.C1(feat_u), 1)
pred_u2 = F.softmax(self.C2(feat_u), 1)
loss_dis = self.discrepancy(pred_u1, pred_u2)
loss_step_B = loss_x - loss_dis
if backprob:
self.model_backward_and_update(loss_step_B, ['C1', 'C2'])
if (self.batch_idx + 1) == self.num_batches:
self.update_lr()
# Step C
for _ in range(self.n_step_F):
feat_u = self.F(input_u)
pred_u1 = F.softmax(self.C1(feat_u), 1)
pred_u2 = F.softmax(self.C2(feat_u), 1)
loss_step_C = self.discrepancy(pred_u1, pred_u2)
if backprob:
self.model_backward_and_update(loss_step_C, 'F')
loss_summary = {
'loss_step_A': loss_step_A.item(),
'loss_step_B': loss_step_B.item(),
'loss_step_C': loss_step_C.item()
}
return loss_summary
@torch.no_grad()
# def validate(self,full_results = False):
def validate(self):
"""A generic testing pipeline."""
self.set_model_mode('eval')
self.evaluator.reset()
losses = MetricMeter()
print('Do evaluation on {} set'.format('valid set'))
data_loader = self.val_loader
assert data_loader is not None
self.num_batches = len(data_loader)
valid_loader_x_iter = iter(data_loader)
loader_u_iter = iter(self.train_loader_u)
for self.batch_idx in range(self.num_batches):
try:
batch_x = next(valid_loader_x_iter)
except StopIteration:
valid_loader_x_iter = iter(data_loader)
batch_x = next(valid_loader_x_iter)
try:
batch_u = next(loader_u_iter)
except StopIteration:
train_loader_u_iter = iter(self.train_loader_u)
batch_u = next(train_loader_u_iter)
input, label, domain, target = self.parse_batch_train(batch_x, batch_u)
loss = self.forward_backward(batch_x, batch_u, backprob=False)
losses.update(loss)
output = self.model_inference(input)
self.evaluator.process(output, label)
results = self.evaluator.evaluate()
total_loss = losses.meters['loss_step_A'].avg
for k, v in results.items():
tag = '{}/{}'.format('validation', k)
self.write_scalar(tag, v, self.epoch)
# if full_results:
return [total_loss,losses.dict_results(),results]
# return total_loss
# def after_epoch(self):
# """
# save the best model for given validation loss
# """
# epoch_total_loss = self.validate()
# if self._best_epoch_val_loss > epoch_total_loss:
# print("save best model at epoch %f , Improve loss from %4f -> %4f" % (
# self.epoch, self._best_epoch_val_loss, epoch_total_loss))
# self._best_epoch_val_loss = epoch_total_loss
# self.save_model(epoch=self.epoch, directory=self.output_dir, is_best=True)
# super().after_epoch()
def discrepancy(self, y1, y2):
return (y1 - y2).abs().mean()
def model_inference(self, input):
feat = self.F(input)
return F.softmax(self.C1(feat),dim=1)
| [
"dassl.engine.TRAINER_REGISTRY.register",
"dassl.engine.trainer_tmp.SimpleNet",
"torch.nn.CrossEntropyLoss",
"dassl.optim.build_optimizer",
"dassl.utils.count_num_param",
"dassl.utils.MetricMeter",
"numpy.array",
"torch.nn.Linear",
"torch.no_grad",
"dassl.optim.build_lr_scheduler"
] | [((416, 443), 'dassl.engine.TRAINER_REGISTRY.register', 'TRAINER_REGISTRY.register', ([], {}), '()\n', (441, 443), False, 'from dassl.engine import TRAINER_REGISTRY, TrainerXU\n'), ((4324, 4339), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4337, 4339), False, 'import torch\n'), ((647, 668), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (666, 668), True, 'import torch.nn as nn\n'), ((1108, 1165), 'dassl.engine.trainer_tmp.SimpleNet', 'SimpleNet', (['cfg', 'cfg.MODEL', '(0)'], {}), '(cfg, cfg.MODEL, 0, **cfg.MODEL.BACKBONE.PARAMS)\n', (1117, 1165), False, 'from dassl.engine.trainer_tmp import SimpleNet\n'), ((1283, 1317), 'dassl.optim.build_optimizer', 'build_optimizer', (['self.F', 'cfg.OPTIM'], {}), '(self.F, cfg.OPTIM)\n', (1298, 1317), False, 'from dassl.optim import build_optimizer, build_lr_scheduler\n'), ((1341, 1384), 'dassl.optim.build_lr_scheduler', 'build_lr_scheduler', (['self.optim_F', 'cfg.OPTIM'], {}), '(self.optim_F, cfg.OPTIM)\n', (1359, 1384), False, 'from dassl.optim import build_optimizer, build_lr_scheduler\n'), ((1608, 1641), 'torch.nn.Linear', 'nn.Linear', (['fdim', 'self.num_classes'], {}), '(fdim, self.num_classes)\n', (1617, 1641), True, 'import torch.nn as nn\n'), ((1763, 1798), 'dassl.optim.build_optimizer', 'build_optimizer', (['self.C1', 'cfg.OPTIM'], {}), '(self.C1, cfg.OPTIM)\n', (1778, 1798), False, 'from dassl.optim import build_optimizer, build_lr_scheduler\n'), ((1823, 1867), 'dassl.optim.build_lr_scheduler', 'build_lr_scheduler', (['self.optim_C1', 'cfg.OPTIM'], {}), '(self.optim_C1, cfg.OPTIM)\n', (1841, 1867), False, 'from dassl.optim import build_optimizer, build_lr_scheduler\n'), ((1989, 2022), 'torch.nn.Linear', 'nn.Linear', (['fdim', 'self.num_classes'], {}), '(fdim, self.num_classes)\n', (1998, 2022), True, 'import torch.nn as nn\n'), ((2144, 2179), 'dassl.optim.build_optimizer', 'build_optimizer', (['self.C2', 'cfg.OPTIM'], {}), '(self.C2, cfg.OPTIM)\n', (2159, 2179), False, 'from dassl.optim import build_optimizer, build_lr_scheduler\n'), ((2204, 2248), 'dassl.optim.build_lr_scheduler', 'build_lr_scheduler', (['self.optim_C2', 'cfg.OPTIM'], {}), '(self.optim_C2, cfg.OPTIM)\n', (2222, 2248), False, 'from dassl.optim import build_optimizer, build_lr_scheduler\n'), ((4537, 4550), 'dassl.utils.MetricMeter', 'MetricMeter', ([], {}), '()\n', (4548, 4550), False, 'from dassl.utils import MetricMeter\n'), ((2963, 2978), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2976, 2978), False, 'import torch\n'), ((3336, 3351), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3349, 3351), False, 'import torch\n'), ((970, 1010), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'weight': 'torch_weight'}), '(weight=torch_weight)\n', (989, 1010), True, 'import torch.nn as nn\n'), ((1234, 1257), 'dassl.utils.count_num_param', 'count_num_param', (['self.F'], {}), '(self.F)\n', (1249, 1257), False, 'from dassl.utils import count_num_param\n'), ((1712, 1736), 'dassl.utils.count_num_param', 'count_num_param', (['self.C1'], {}), '(self.C1)\n', (1727, 1736), False, 'from dassl.utils import count_num_param\n'), ((2093, 2117), 'dassl.utils.count_num_param', 'count_num_param', (['self.C2'], {}), '(self.C2)\n', (2108, 2117), False, 'from dassl.utils import count_num_param\n'), ((885, 918), 'numpy.array', 'np.array', (['total_data_class_weight'], {}), '(total_data_class_weight)\n', (893, 918), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import os
import sys
import argparse
import operator
import math
import numpy as np
parser = argparse.ArgumentParser(description='Process output of Kraken run on contigs.')
parser.add_argument('-c','--contig', default="", help='per-contig output from Kraken')
parser.add_argument('-r','--report', default="", help='report output from Kraken (all taxa must be reported)')
parser.add_argument('-e','--entropy', default=0.0,type=float,help='upper cut-off for entropy, defaults to 0.0')
parser.add_argument('-u','--unknown', action='store_true',help='set -u, if contigs should be reported that were not annotated by Kraken')
parser.add_argument('-o','--outname', default="",help='name for the output can be specified or will be constructed from input file name and entropy cut-off')
parser.add_argument('-s','--silent', action='store_false',help='set -s, to suppress printing the number of contigs not annotated by Kraken')
#parser.add_argument('-d','--taxdir', default=".",type=str,help='directory where the tax files sit')
args = parser.parse_args()
krakFile = args.contig
taxFile = args.report
divthresh = args.entropy
#taxdir = args.taxdir+"/"
if args.outname != "":
outFile1 = args.outname
elif divthresh>0:
outFile1 = krakFile + "annoEnt" + str(divthresh) + ".tsv"
else:
outFile1 = krakFile + "annoUnambig.tsv"
# Definition of the class Node
class Node:
"""Node"""
def __init__(self):
self.tax_id = 0 # Number of the tax id.
self.parent = 0 # Number of the parent of this node
self.children = [] # List of the children of this node
self.tip = 0 # Tip=1 if it's a terminal node, 0 if not.
self.name = "" # Name of the node: taxa if it's a terminal node, numero if not.
def genealogy(self): # Trace genealogy from root to leaf
ancestors = [] # Initialise the list of all nodes from root to leaf.
tax_id = self.tax_id # Define leaf
while 1:
if tax_id in name_object:
ancestors.append(tax_id)
tax_id = name_object[tax_id].parent
else:
break
if tax_id == "1":
# If it is root, we reached the end.
# Add it to the list and break the loop
ancestors.append(tax_id)
break
return ancestors # Return the list
# Function to find common ancestor between two nodes or more
def common_ancestor(node_list):
global name_object
list1 = name_object[node_list[0]].genealogy() # Define the whole genealogy of the first node
for node in node_list:
list2 = name_object[node].genealogy() # Define the whole genealogy of the second node
ancestral_list = []
for i in list1:
if i in list2: # Identify common nodes between the two genealogy
ancestral_list.append(i)
list1 = ancestral_list # Reassing ancestral_list to list 1.
common_ancestor = ancestral_list[0] # Finally, the first node of the ancestral_list is the common ancestor of all nodes.
return common_ancestor # Return a node
#############################
# #
# Read taxonomy file #
# #
#############################
global name_object
name_object = {}
name_dict = {} # Initialise dictionary with TAX_ID:NAME
name_dict_reverse = {} # Initialise dictionary with NAME:TAX_ID #not all names are unique, though
parentList = [None] * 100
rank_dict = {} # Initialise dictionary with TAX_ID:RANK
rank_dict_reverse = {} # Initialise dictionary with RANK:[TAX_IDs]
tax_file = open(taxFile, "r")
old_ilevel = 0
while 1:
line = tax_file.readline()
if line == "":
break
line = line.rstrip()
tab = line.split("\t") # 0: total perc, 1:total counts, 2: counts at this level, 3: rank abbrev., 4: tax ID, 5: name (indented)
if tab[3] == "U":
next
rank, tax_id, indentName = tab[3], tab[4], tab[5] # Assign tax_id and name ...
name = indentName.lstrip(' ')
ilevel = int((len(indentName) - len(name))/2) # get level from indentation
# print(ilevel)
parentList[ilevel] = tax_id
if ilevel > 0:
tax_id_parent = parentList[ilevel-1]
if name not in name_dict_reverse:
name_dict_reverse[name] = tax_id
name_dict[tax_id] = name # ... and load them into dictionary
else:
if ilevel > 0:
name_dict_reverse[name + "." + name_dict[tax_id_parent]] = tax_id
name_dict[tax_id] = name + "." + name_dict[tax_id_parent] # ... for kids that have their parent's names or worse
rank_dict[tax_id] = rank
if rank not in rank_dict_reverse:
rank_dict_reverse[rank] = [tax_id]
else:
rank_dict_reverse[rank].append(tax_id)
if tax_id not in name_object: # this should always be the case - otherwise we would overwrite in the following lines
name_object[tax_id] = Node()
name_object[tax_id].tax_id = tax_id # Assign tax_id
if ilevel > 0:
name_object[tax_id].parent = tax_id_parent # Assign tax_id parent
name_object[tax_id].name = name # Assign name
if ilevel > 0:
siblings = name_object[tax_id_parent].children # Parent is always already in the object
siblings.append(tax_id) # ...we found its children.
name_object[tax_id_parent].children = siblings # ... so add them to the parent
tax_file.close()
########################
# #
# reconstruct taxonomy #
# #
########################
convention = ["R","D","K","P","C","O","F","G","S"]
ranklist = [rank_dict["1"]]
indx1 = convention.index(ranklist[-1])
all_ranks = list(rank_dict_reverse.keys())
all_main_ranks = [s[0] for s in all_ranks]
#print(" ".join(all_ranks))
#print(" ".join(all_main_ranks))
for main_level in convention[indx1:]:
curr_ranks = [all_ranks[i] for i, e in enumerate(all_main_ranks) if e == main_level]
curr_ranks.sort()
ranklist = [*ranklist, *curr_ranks]
print("All ranks: " + " ".join(ranklist))
leftover_ranks = np.setdiff1d(all_ranks,ranklist)
print("Ranks in report that were not used: " + " ".join(leftover_ranks))
#####################
# #
# contig annotation #
# #
#####################
#function to calculate diversity of annotations
def shannonDiv(dictionary,sumTax):
taxDiv = 0.0
if len(dictionary) > 0 and sumTax > 1:
for item in dictionary:
taxDiv += (float(dictionary[item])/sumTax) * math.log(float(dictionary[item])/sumTax,2) / math.log(sumTax,2)
else:
taxDiv = 1.0
return 0.00 - taxDiv
# function to retrieve name and number of annotated bases for a taxon
def orgGenealCount(anc,taxDict,orgCnt,geneaDict,taxSum):
if anc in geneaDict:
taxName = geneaDict[anc]
taxSum += int(orgCnt)
if taxName not in taxDict:
taxDict[taxName] = int(orgCnt)
else:
taxDict[taxName] += int(orgCnt)
return taxDict, taxSum
# function to retrieve taxon name
def orgGenealName(anc,geneaDict,taxName):
if anc in geneaDict:
taxName = geneaDict[anc]
return taxName
# function to test if lower taxon is in higher taxon and stop the annotation if necessary
def phyloTester(taxName,testList,retVal,annotationList):
if taxName != "unknown":
testList.append(name_dict_reverse[taxName])
if len(testList) == 2:
if testList[0] in name_object[testList[1]].genealogy():
del testList[0]
else:
del annotationList[-1]
retVal = -2.0
taxName = "unknown"
ucnt = 0
krak_file = open(krakFile,"r")
out_file1 = open(outFile1, "w")
out_file1.write("contig" + "\t"+ "length" +"\t"+ "entropy" + "\t"+ "annotationLevel" + "\t"+ "\t".join(ranklist) +"\n")
while 1:
linek = krak_file.readline()
if linek == "":
break
linek = linek.rstrip()
tabk = linek.split("\t")
if tabk[0] == "U":
if args.unknown:
out_file1.write(tabk[1] + "\t"+ tabk[3] +"\t"+ "NA" + "\t"+ "not annotated" + "\t"+ "\t".join(["NA"] * len(ranklist)) +"\n")
ucnt += 1
else:
# print(tabk[0])
cotak = tabk[4].split(" ")
orgs = {}
for i in cotak:
orgID = i.split(":")[0]
orgCount = i.split(":")[1]
if orgID != "A" and orgID != "0":
# if orgID not in orgs:
# orgs[orgID] = int(orgCount)
# else:
# orgs[orgID] += int(orgCount)
if orgID in name_dict:
for anc in name_object[orgID].genealogy(): #should return taxids
# print(rank_dict[anc] + " " + anc)
if anc not in orgs:
orgs[anc] = int(orgCount)
else:
orgs[anc] += int(orgCount)
else:
print("Unknown tax ID " + orgID + " !")
ranksum_dict = {}
rank_orgs = {}
for ctax in orgs.keys():
# print(rank_dict[ctax] + " " + ctax)
# print(rank_dict[ctax])
if rank_dict[ctax] not in ranksum_dict:
ranksum_dict[rank_dict[ctax]] = [orgs[ctax],len(ranklist)-ranklist.index(rank_dict[ctax])]
rank_orgs[rank_dict[ctax]] = {ctax: orgs[ctax]}
else:
ranksum_dict[rank_dict[ctax]][0] += orgs[ctax]
rank_orgs[rank_dict[ctax]][ctax] = orgs[ctax]
# print(ranksum_dict)
cranks = sorted(ranksum_dict.items(), key=lambda x:(x[1][0],x[1][1]), reverse = True)
rankwin_dict = {}
winnerList = []
first = True
for rank in cranks:
# print(rank)
rank = rank[0]
cdiv = shannonDiv(rank_orgs[rank],ranksum_dict[rank][0])
if cdiv <= divthresh and cdiv >= 0:
cwin = max(rank_orgs[rank].items(), key=operator.itemgetter(1))[0]
winancs = name_object[cwin].genealogy()
if first:
rankwin_dict[rank] = cwin
winnerList.append(cwin)
first = False
elif not set(winnerList).isdisjoint(set(winancs)):
rankwin_dict[rank] = cwin
winnerList.append(cwin)
else:
break
#out_file1.write(tabk[1] + "\t"+ tabk[3] +"\t"+ "NA" + "\t"+ "not annotated" + "\t"+ "\t".join(["NA"] * len(ranklist)) +"\n")
ranked_winner_list = []
went = "NA"
wrank = "not annotated"
for crank in ranklist:
if crank in rankwin_dict:
ranked_winner_list.append(name_dict[rankwin_dict[crank]])
went = shannonDiv(rank_orgs[crank],ranksum_dict[crank][0])
wrank = crank
else:
ranked_winner_list.append("NA")
if went != "NA" or args.unknown:
out_file1.write(tabk[1] + "\t"+ tabk[3] +"\t"+ str(went) + "\t"+ wrank + "\t"+ "\t".join(ranked_winner_list) +"\n")
krak_file.close()
out_file1.close()
if args.silent:
print(ucnt)
| [
"operator.itemgetter",
"numpy.setdiff1d",
"argparse.ArgumentParser",
"math.log"
] | [((118, 197), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process output of Kraken run on contigs."""'}), "(description='Process output of Kraken run on contigs.')\n", (141, 197), False, 'import argparse\n'), ((6404, 6437), 'numpy.setdiff1d', 'np.setdiff1d', (['all_ranks', 'ranklist'], {}), '(all_ranks, ranklist)\n', (6416, 6437), True, 'import numpy as np\n'), ((6903, 6922), 'math.log', 'math.log', (['sumTax', '(2)'], {}), '(sumTax, 2)\n', (6911, 6922), False, 'import math\n'), ((10365, 10387), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (10384, 10387), False, 'import operator\n')] |
from profiles import *
import numpy as np
import matplotlib.pyplot as plt
NUM_GENS = 35
NUM_ROUNDS = 100
INITIAL_PROFILE = defectors_with_some_tft()
def run_simulation(init_profile: dict, num_gens, num_rounds):
dist = {
'gens': np.linspace(1, num_gens, num_gens)
}
init_gen = populationize(init_profile)
init_dist = init_gen.distribution()
for strat in init_profile:
dist[strat] = np.zeros(num_gens)
dist[strat][0] = init_dist[strat]
curr_gen = init_gen
for gen in range(num_gens):
curr_gen, curr_dist = update_gen_dist(curr_gen, num_rounds)
for strat in init_profile:
if strat not in curr_dist:
dist[strat][gen] = 0
else:
dist[strat][gen] = curr_dist[strat]
return dist
def plot(simulation_results: dict):
x_axis = simulation_results.pop('gens')
for strat in simulation_results:
plt.plot(x_axis, simulation_results[strat], label=strat)
plt.title('Changes to population distribution with respect to time')
plt.xlabel('Generation')
plt.ylabel('Population Distribution [%]')
plt.legend()
plt.show()
def populationize(input_dict):
""" Example --
>>> sample_dict = {
'Kantian': 2,
'Defector': 1
}
>>> sample_population = populationize(sample_dict)
is the same as doing:
>>> sample_list = [Kantian(), Kantian(), Defector()]
>>> sample_population = Population(sample_list)
"""
profile = []
for strategy in input_dict:
if strategy not in all_strategies:
raise Exception('Specified strategy does not exist.')
else:
for i in range(input_dict[strategy]):
profile.append(deepcopy(all_strategies[strategy]))
return Population(profile)
def update_gen_dist(curr_gen, num_rounds):
round_robin(curr_gen, num_rounds)
new_gen = curr_gen.create_next_gen()
new_dist = new_gen.distribution()
return new_gen, new_dist
if __name__ == '__main__':
a = run_simulation(INITIAL_PROFILE, NUM_GENS, NUM_ROUNDS)
plot(a)
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.zeros",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((990, 1058), 'matplotlib.pyplot.title', 'plt.title', (['"""Changes to population distribution with respect to time"""'], {}), "('Changes to population distribution with respect to time')\n", (999, 1058), True, 'import matplotlib.pyplot as plt\n'), ((1063, 1087), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Generation"""'], {}), "('Generation')\n", (1073, 1087), True, 'import matplotlib.pyplot as plt\n'), ((1092, 1133), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Population Distribution [%]"""'], {}), "('Population Distribution [%]')\n", (1102, 1133), True, 'import matplotlib.pyplot as plt\n'), ((1138, 1150), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1148, 1150), True, 'import matplotlib.pyplot as plt\n'), ((1155, 1165), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1163, 1165), True, 'import matplotlib.pyplot as plt\n'), ((243, 277), 'numpy.linspace', 'np.linspace', (['(1)', 'num_gens', 'num_gens'], {}), '(1, num_gens, num_gens)\n', (254, 277), True, 'import numpy as np\n'), ((420, 438), 'numpy.zeros', 'np.zeros', (['num_gens'], {}), '(num_gens)\n', (428, 438), True, 'import numpy as np\n'), ((929, 985), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', 'simulation_results[strat]'], {'label': 'strat'}), '(x_axis, simulation_results[strat], label=strat)\n', (937, 985), True, 'import matplotlib.pyplot as plt\n')] |
import os
import re
import nltk
import numpy as np
from sklearn import feature_extraction
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import jaccard_similarity_score
from tqdm import tqdm
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import pickle
_wnl = nltk.WordNetLemmatizer()
def normalize_word(w):
return _wnl.lemmatize(w).lower()
def get_tokenized_lemmas(s):
return [normalize_word(t) for t in nltk.word_tokenize(s)]
def clean(s):
# Cleans a string: Lowercasing, trimming, removing non-alphanumeric
return " ".join(re.findall(r'\w+', s, flags=re.UNICODE)).lower()
def remove_stopwords(l):
# Removes stopwords from a list of tokens
return [w for w in l if w not in feature_extraction.text.ENGLISH_STOP_WORDS]
def gen_or_load_feats(feat_fn, headlines, bodies, feature_file):
if not os.path.isfile(feature_file):
feats = feat_fn(headlines, bodies)
np.save(feature_file, feats)
return np.load(feature_file)
def word_overlap_features(headlines, bodies):
X = []
for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):
clean_headline = clean(headline)
clean_body = clean(body)
clean_headline = get_tokenized_lemmas(clean_headline)
clean_body = get_tokenized_lemmas(clean_body)
features = [
len(set(clean_headline).intersection(clean_body)) / float(len(set(clean_headline).union(clean_body)))]
X.append(features)
return X
def refuting_features(headlines, bodies):
_refuting_words = [
'fabricated',
'invented', 'invented',
'fake',
'fraud',
'hoax',
'false',
'deny', 'denies',
'not',
'despite',
'nope',
'doubt', 'doubts',
'bogus',
'debunk',
'pranks',
'retract',
'lie', 'lies',
'bullshit',
'stage', 'stagged',
'rumor', 'rumors'
]
X = []
for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):
clean_headline = clean(headline)
clean_headline = get_tokenized_lemmas(clean_headline)
features = [1 if word in clean_headline else 0 for word in _refuting_words]
X.append(features)
return X
def polarity_features(headlines, bodies):
_refuting_words = [
'fabricated',
'invented', 'invented',
'fake',
'fraud',
'hoax',
'false',
'deny', 'denies',
'not',
'despite',
'nope',
'doubt', 'doubts',
'bogus',
'debunk',
'pranks',
'retract',
'lie', 'lies',
'bullshit',
'stage', 'stagged',
'rumor', 'rumors'
]
def calculate_polarity(text):
tokens = get_tokenized_lemmas(text)
return sum([t in _refuting_words for t in tokens]) / len(_refuting_words)
X = []
for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):
clean_headline = clean(headline)
clean_body = clean(body)
features = []
features.append(calculate_polarity(clean_headline))
features.append(calculate_polarity(clean_body))
X.append(features)
return np.array(X)
def ngrams(input, n):
input = input.split(' ')
output = []
for i in range(len(input) - n + 1):
output.append(input[i:i + n])
return output
def chargrams(input, n):
output = []
for i in range(len(input) - n + 1):
output.append(input[i:i + n])
return output
def append_chargrams(features, text_headline, text_body, size):
grams = [' '.join(x) for x in chargrams(" ".join(remove_stopwords(text_headline.split())), size)]
grams_hits = 0
grams_early_hits = 0
grams_first_hits = 0
for gram in grams:
if gram in text_body:
grams_hits += 1
if gram in text_body[:255]:
grams_early_hits += 1
if gram in text_body[:100]:
grams_first_hits += 1
features.append(grams_hits)
features.append(grams_early_hits)
features.append(grams_first_hits)
return features
def append_ngrams(features, text_headline, text_body, size):
grams = [' '.join(x) for x in ngrams(text_headline, size)]
grams_hits = 0
grams_early_hits = 0
for gram in grams:
if gram in text_body:
grams_hits += 1
if gram in text_body[:255]:
grams_early_hits += 1
features.append(grams_hits)
features.append(grams_early_hits)
return features
def hand_features(headlines, bodies):
def binary_co_occurence(headline, body):
# Count how many times a token in the title
# appears in the body text.
bin_count = 0
bin_count_early = 0
for headline_token in clean(headline).split(" "):
if headline_token in clean(body):
bin_count += 1
if headline_token in clean(body)[:255]:
bin_count_early += 1
return [bin_count, bin_count_early]
def binary_co_occurence_stops(headline, body):
# Count how many times a token in the title
# appears in the body text. Stopwords in the title
# are ignored.
bin_count = 0
bin_count_early = 0
for headline_token in remove_stopwords(clean(headline).split(" ")):
if headline_token in clean(body):
bin_count += 1
bin_count_early += 1
return [bin_count, bin_count_early]
def count_grams(headline, body):
# Count how many times an n-gram of the title
# appears in the entire body, and intro paragraph
clean_body = clean(body)
clean_headline = clean(headline)
features = []
features = append_chargrams(features, clean_headline, clean_body, 2)
features = append_chargrams(features, clean_headline, clean_body, 8)
features = append_chargrams(features, clean_headline, clean_body, 4)
features = append_chargrams(features, clean_headline, clean_body, 16)
features = append_ngrams(features, clean_headline, clean_body, 2)
features = append_ngrams(features, clean_headline, clean_body, 3)
features = append_ngrams(features, clean_headline, clean_body, 4)
features = append_ngrams(features, clean_headline, clean_body, 5)
features = append_ngrams(features, clean_headline, clean_body, 6)
return features
X = []
for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):
X.append(binary_co_occurence(headline, body)
+ binary_co_occurence_stops(headline, body)
+ count_grams(headline, body))
return X
def sentiment_features(headlines, bodies):
def calculate_sentiment(text,analyzer):
vs = analyzer.polarity_scores(text)
return np.argmax([vs['neg'], vs['pos']])
X = []
analyzer = SentimentIntensityAnalyzer()
for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):
clean_headline = clean(headline)
clean_body = clean(body)
features = []
features.append(calculate_sentiment(clean_headline,analyzer))
features.append(calculate_sentiment(clean_body,analyzer))
X.append(features)
return np.array(X)
def cosine_tfidf_features(headlines, bodies):
X = []
tfidf = feature_extraction.text.TfidfVectorizer()
for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):
clean_headline = clean(headline)
clean_body = clean(body)
matrix = tfidf.fit_transform([clean_headline, clean_body])
X.append(cosine_similarity(matrix[0], matrix[1])[0][0])
return np.array(X)
def bleu_features(headlines, bodies):
X = []
for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):
clean_headline = clean(headline)
clean_body = clean(body)
matrix = nltk.translate.bleu_score.sentence_bleu(clean_headline, clean_body)
X.append(matrix)
return np.array(X)
def glove_features(headlines, bodies):
X = []
model = get_glove()
for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):
clean_headline = clean(headline)
clean_body = clean(body)
clean_headline = get_tokenized_lemmas(clean_headline)
clean_body = get_tokenized_lemmas(clean_body)
vector_headline = transform_text(model,clean_headline,20)
vector_body = transform_text(model,clean_body,200)
X.append([vector_headline, vector_body])
return np.array(X)
def get_glove():
if os.path.isfile('rnn/data/glove.pickle'):
with open('rnn/data/glove.pickle', 'rb') as handle:
return pickle.load(handle)
else:
if os.path.isfile('rnn/data/glove.6B.100d.txt'):
glove_dict = {}
with open('rnn/data/glove.6B.100d.txt') as f:
i=0
for a in f:
glove_dict[a.split()[0]] = i
i = i+1
with open('rnn/data/glove.pickle', 'wb') as handle:
pickle.dump(glove_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
return glove_dict
def transform_text(w2vmodel, words, maxlen=20):
data = list()
for i in range(0, maxlen): #range(0, len(words)-1):
if i < len(words):
word = words[i]
if word in w2vmodel:
index = w2vmodel[word]
else:
index = w2vmodel["unk"]
else:
index = w2vmodel["unk"]
data.append(index)
return np.asarray(data)
def get_glove_matrix():
X = []
if os.path.isfile('rnn/data/glove.6B.100d.txt'):
with open('rnn/data/glove.6B.100d.txt') as f:
for a in f:
X.append(np.fromstring((" ".join(a.split()[1:])), dtype=float, sep=' '))
return np.array(X) | [
"vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer",
"pickle.dump",
"sklearn.metrics.pairwise.cosine_similarity",
"nltk.word_tokenize",
"nltk.WordNetLemmatizer",
"numpy.asarray",
"numpy.argmax",
"pickle.load",
"os.path.isfile",
"numpy.array",
"sklearn.feature_extraction.text.TfidfVectoriz... | [((312, 336), 'nltk.WordNetLemmatizer', 'nltk.WordNetLemmatizer', ([], {}), '()\n', (334, 336), False, 'import nltk\n'), ((1004, 1025), 'numpy.load', 'np.load', (['feature_file'], {}), '(feature_file)\n', (1011, 1025), True, 'import numpy as np\n'), ((3261, 3272), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (3269, 3272), True, 'import numpy as np\n'), ((6956, 6984), 'vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (6982, 6984), False, 'from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n'), ((7327, 7338), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (7335, 7338), True, 'import numpy as np\n'), ((7410, 7451), 'sklearn.feature_extraction.text.TfidfVectorizer', 'feature_extraction.text.TfidfVectorizer', ([], {}), '()\n', (7449, 7451), False, 'from sklearn import feature_extraction\n'), ((7740, 7751), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (7748, 7751), True, 'import numpy as np\n'), ((8070, 8081), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (8078, 8081), True, 'import numpy as np\n'), ((8605, 8616), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (8613, 8616), True, 'import numpy as np\n'), ((8642, 8681), 'os.path.isfile', 'os.path.isfile', (['"""rnn/data/glove.pickle"""'], {}), "('rnn/data/glove.pickle')\n", (8656, 8681), False, 'import os\n'), ((9636, 9652), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (9646, 9652), True, 'import numpy as np\n'), ((9696, 9740), 'os.path.isfile', 'os.path.isfile', (['"""rnn/data/glove.6B.100d.txt"""'], {}), "('rnn/data/glove.6B.100d.txt')\n", (9710, 9740), False, 'import os\n'), ((9920, 9931), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (9928, 9931), True, 'import numpy as np\n'), ((882, 910), 'os.path.isfile', 'os.path.isfile', (['feature_file'], {}), '(feature_file)\n', (896, 910), False, 'import os\n'), ((963, 991), 'numpy.save', 'np.save', (['feature_file', 'feats'], {}), '(feature_file, feats)\n', (970, 991), True, 'import numpy as np\n'), ((6895, 6928), 'numpy.argmax', 'np.argmax', (["[vs['neg'], vs['pos']]"], {}), "([vs['neg'], vs['pos']])\n", (6904, 6928), True, 'import numpy as np\n'), ((7966, 8033), 'nltk.translate.bleu_score.sentence_bleu', 'nltk.translate.bleu_score.sentence_bleu', (['clean_headline', 'clean_body'], {}), '(clean_headline, clean_body)\n', (8005, 8033), False, 'import nltk\n'), ((8803, 8847), 'os.path.isfile', 'os.path.isfile', (['"""rnn/data/glove.6B.100d.txt"""'], {}), "('rnn/data/glove.6B.100d.txt')\n", (8817, 8847), False, 'import os\n'), ((469, 490), 'nltk.word_tokenize', 'nltk.word_tokenize', (['s'], {}), '(s)\n', (487, 490), False, 'import nltk\n'), ((8762, 8781), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (8773, 8781), False, 'import pickle\n'), ((601, 640), 're.findall', 're.findall', (['"""\\\\w+"""', 's'], {'flags': 're.UNICODE'}), "('\\\\w+', s, flags=re.UNICODE)\n", (611, 640), False, 'import re\n'), ((9140, 9205), 'pickle.dump', 'pickle.dump', (['glove_dict', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(glove_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (9151, 9205), False, 'import pickle\n'), ((7682, 7721), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['matrix[0]', 'matrix[1]'], {}), '(matrix[0], matrix[1])\n', (7699, 7721), False, 'from sklearn.metrics.pairwise import cosine_similarity\n')] |
# coding: utf-8
# # Model
# In[551]:
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
from __future__ import division
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
from itertools import groupby
from math import sqrt
# ### Calculate mode of the data
# In[552]:
def getMode(x):
frequency = groupby(Counter(x).most_common(), lambda x:x[1])
mode = [val for val,count in frequency.next()[1]]
return mode
# ## Distance measure
# In[553]:
class DistanceMeasure:
@staticmethod
def EuclidianDistance(a, b):
return np.sqrt(np.sum((a - b)**2))
# ## Feature scaling
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/b0aa2e7d203db1526c577192f2d9102b718eafd5">
# In[554]:
def scaleFeature(x):
mean = np.mean(x)
stdDeviation = np.std(x)
return x.apply(lambda y: ((y * 1.0) - mean)/(stdDeviation))
# ## InvSortedLinkedList
# In[555]:
"""Node has been designed for storing targetValue and distance"""
class Node:
data = None
payload = None
nextNode = None
class InvSortedLinkedList:
head = None
tail = None
def insert(self, node):
"""First insertion"""
if self.head == None:
self.head = node
self.tail = node
else:
"""Next insertions"""
"""Insertion at head"""
if node.data > self.head.data:
node.nextNode = self.head
self.head = node
elif node.data < self.tail.data:
"""Insertion at tail"""
self.tail.nextNode = node
self.tail = node
else:
"""Insert at any other position"""
ptr = self.head
while ptr.nextNode.data > node.data:
ptr = ptr.nextNode
node.nextNode = ptr.nextNode
ptr.nextNode = node
def removeHead(self):
self.head = self.head.nextNode
"""Garbage collector will remove the node without any references"""
# ## Nearest Neighbour
# In[556]:
class NearestNeighbour:
__dataX = None
__dataY = None
__distanceMeasure = None
def __init__(self, x, y, distanceMeasure):
self.__dataX = x
self.__dataY = y
self.__distanceMeasure = distanceMeasure
def getTopKTargets(self, x, k):
distance = np.apply_along_axis(self.__distanceMeasure, 1, self.__dataX, x)
"""Implementing algorithm for finding top k closest nodes. Here instead of storing distance
I am storing the tuple of distance and target. So no back tracing from distance to target is required
Furthermore I am using linked list of size K.
Worst case senerio the insertion will be order of K but removal will be order of 1. So no need to shift the
values in the list and furthermore no need to keep on sorting it again and again with each insertion.
The list will be pre sorted. This will further optimize the process of finding the K candidates."""
"""Step-1 initialize the linked list with tuple and keep it sorted in descending
order this process in in O(K^^2)"""
invList = InvSortedLinkedList()
for i in range(0, k):
n = Node()
n.data = distance[i]
n.payload = self.__dataY[i]
invList.insert(n)
"""Step-2 check if any candidate distance is less than largest distance(head)"""
for i in range(k+1, len(distance)):
if distance[i] < invList.head.data:
n = Node()
n.data = distance[i]
n.payload = self.__dataY[i]
"""Add the candidate O(k)"""
invList.insert(n)
"""Remove the largest distance(head) from list O(1)"""
invList.removeHead()
ptr = invList.head
kTargets = []
kDistance = []
while ptr != None:
kDistance.append(ptr.data)
kTargets.append(ptr.payload)
ptr = ptr.nextNode
"""Sort the targets from best to worst"""
kTargets = kTargets[::-1]
kDistance = kDistance[::-1]
return kTargets, kDistance
# ## KNearestClassifier
# In[557]:
class KNearestClassifier(NearestNeighbour):
def predict(self, x, k):
candidates, candidateDistance = self.getTopKTargets(x, k)
mode = getMode(candidates)
if len(mode) == 1:
return mode[0]
else:
"""Ties found in the prediction. To solve this we will
remove least closest element and check for ties"""
while(True):
candidates = candidates[:-1]
mode = getMode(candidates)
"""Check if ties are broken"""
if len(mode) == 1:
return mode[0]
# ## KNearestRegressor
# In[558]:
class KNearestRegressor(NearestNeighbour):
def predict(self, x, k):
candidates, candidateDistance = self.getTopKTargets(x, k)
return np.mean(candidates)
# ## misclassificationRate
# In[559]:
def misclassificationRate(yTrue, yPrediction):
diff = yTrue - yPrediction
diff[diff != 0] = 1
return (1.0/len(diff))*np.sum(diff)
# ### RMSE procedure
# Will calculate root mean squared error for given Ytrue values and YPrediction.
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/fc187c3557d633423444d4c80a4a50cd6ecc3dd4">
#
# In[560]:
"""Model accuracy estimator RMSE"""
def RMSE(yTrue, yPrediction):
n = yTrue.shape[0]
return sqrt((1.0) * np.sum(np.square((yTrue - yPrediction))))/n
# ## Split data
# In[561]:
"""Splits the provided pandas dataframe into training and test dataset"""
def splitDataSet(inputDataframe, trainSetSize):
trainSet = inputDataframe.sample(frac = trainSetSize)
testSet = inputDataframe.drop(trainSet.index)
trainSet.reindex()
testSet.reindex()
return trainSet, testSet
# ### TextEncoder
#
# Here the data is mix of numbers and text. Text value cannot be directly used and should be converted to numeric data.<br>
# For this I have created a function text encoder which accepts a pandas series. Text encoder returns a lookUp dictionary for recreating the numeric value for text value and encoded text vector.
# For encoding I have applied a lambda function that will return value from dictionary.
# In[562]:
""" Converts the text features into numeric values so that they can be used by
the downstream algorithms.
Accepts pandas series and returns lookup dictionary and encoded vector"""
def textEncoder(textVector):
if type(textVector) == pd.core.series.Series:
lookUpDictionary = {}
lookupValue = 0
for key in textVector.unique():
lookUpDictionary[key] = lookupValue
lookupValue +=1
textVector = textVector.apply(lambda a: lookUpDictionary[a])
return lookUpDictionary,textVector
else:
raise TypeError("Expected a pandas series as an input")
# ### KFold analysis
# In[563]:
def kFoldAnalysis(xTrain, yTrain, model, modelParameters, nFolds, metric, gridParameters):
indices = np.array(range(0, len(xTrain)))
folds = np.array_split(indices, nFolds)
analysisMetricList = []
for i in range(0, len(folds)):
validationSet = folds[i]
"""Set difference"""
trainSet = np.setdiff1d(indices, validationSet)
modelParameters['x'] = np.take(xTrain, trainSet, axis = 0)
modelParameters['y'] = np.take(yTrain, trainSet, axis = 0)
validationX = np.take(xTrain, validationSet, axis = 0)
validationY = np.take(yTrain, validationSet, axis = 0)
mld = model(**modelParameters)
prediction = []
for xVal in validationX:
gridParameters['x'] = xVal
prediction.append(mld.predict(**gridParameters))
analysisMetricList.append(metric(validationY, prediction))
return analysisMetricList
# ## KNN for classification
# ### Iris dataset
# In[564]:
get_ipython().run_cell_magic('javascript', '', '<!-- Ignore this block -->\nIPython.OutputArea.prototype._should_scroll = function(lines) {\n return false;\n}')
# ### Load data
# In[610]:
""" File path change accordingly"""
directoryPath = "Data"
irisData = pd.read_csv(directoryPath+"/iris.data", names = ["sepalLength",
"sepalWidth", "petalLength", "petalWidth", "target"])
irisData.head()
# In[611]:
"""Generate numeric values for target"""
targetLookupDictionary, irisData['target'] = textEncoder(irisData['target'])
irisData.head()
# In[612]:
irisData.hist(figsize = (20, 20))
plt.suptitle("Data distribution")
plt.show()
# In[613]:
irisData.plot(kind='density', subplots=True, layout=(4,3), sharex=False, figsize = (20, 20))
plt.show()
# In[569]:
correlation = irisData.corr(method = "spearman")
correlation
# In[570]:
labels = list(irisData)
fig, ax = plt.subplots(1, 1, figsize=(25, 25))
ax.matshow(correlation)
ticks = range(0, len(labels))
cax = ax.matshow(correlation, vmin=-1, vmax=1)
fig.colorbar(cax)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(labels)
ax.set_yticklabels(labels)
plt.show()
# ### Split data
# In[614]:
"""No data is dropped"""
trainSet, testSet = splitDataSet(irisData, 0.7)
xTrain = trainSet.as_matrix(columns = ["sepalLength","sepalWidth", "petalLength", "petalWidth"])
xTest = testSet.as_matrix(columns = ["sepalLength","sepalWidth", "petalLength", "petalWidth"])
yTrain = trainSet["target"].as_matrix()
yTest = testSet["target"].as_matrix()
print(xTrain.shape)
print(xTest.shape)
print(yTrain.shape)
print(yTest.shape)
# ### Train model
# In[615]:
classifier = KNearestClassifier(xTrain, yTrain, DistanceMeasure.EuclidianDistance)
# ### Predict the values
# In[616]:
yPrediction = []
k = 15
for x in xTest:
yPrediction.append(classifier.predict(x, k))
# ### Calculate misclassification rate
# In[617]:
print ("Misclassification is " + str(misclassificationRate(yTest, yPrediction)))
# ### Using wine data set
# ### Load data
# In[575]:
""" File path change accordingly"""
directoryPath = "Data"
wineData = pd.read_csv(directoryPath+"/winequality-red.csv", sep=";")
wineData.head()
# In[576]:
wineData.describe().T
# In[577]:
for feature in wineData:
if feature != "quality":
wineData[feature] = scaleFeature(wineData[feature])
# ### Visualize data
# In[578]:
wineData.hist(figsize = (20, 20))
plt.show()
# In[579]:
wineData.plot(kind='density', subplots=True, layout=(4,3), sharex=False, figsize = (20, 20))
plt.show()
# In[580]:
correlation = wineData.corr(method = "spearman")
correlation
# In[581]:
labels = list(wineData)
fig, ax = plt.subplots(1, 1, figsize=(25, 25))
ax.matshow(correlation)
ticks = range(0, len(labels))
cax = ax.matshow(correlation, vmin=-1, vmax=1)
fig.colorbar(cax)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(labels)
ax.set_yticklabels(labels)
plt.show()
# There is a very high corelation between total sulfur dioxide and free sulfur dioxide. One of these features can be dropped. Correlation of free sulfur dioxide is very less with quality as compared to total sulfur dioxide. So we will drop free sulfur dioxide.
# In[582]:
selectedFeatures = list(wineData)
selectedFeatures.remove("quality")
selectedFeatures.remove('free sulfur dioxide')
# ### Split dataset
# In[583]:
trainSet, testSet = splitDataSet(wineData, 0.7)
xTrain = trainSet.as_matrix(columns = selectedFeatures)
xTest = testSet.as_matrix(columns = selectedFeatures)
yTrain = trainSet["quality"].as_matrix()
yTest = testSet["quality"].as_matrix()
print(xTrain.shape)
print(xTest.shape)
print(yTrain.shape)
print(yTest.shape)
# ### Train model
# In[584]:
classifier = KNearestClassifier(xTrain, yTrain, DistanceMeasure.EuclidianDistance)
# ### Predict the values
# In[585]:
yPrediction = []
k = 20
for x in xTest:
yPrediction.append(classifier.predict(x, k))
# ### Calculate misclassification rate
# In[586]:
print ("Misclassification is " + str(misclassificationRate(yTest, yPrediction)))
# ## KNN for regression
# ### Train model
# In[587]:
regressor = KNearestRegressor(xTrain, yTrain, DistanceMeasure.EuclidianDistance)
# ### Predict the values
# In[588]:
yPrediction = []
k = 20
for x in xTest:
yPrediction.append(regressor.predict(x, k))
# ### RMSE
# In[589]:
print ("RMSE is " + str(RMSE(yTest, yPrediction)))
# # Optimizing the value of K for classification
# Selecting optimal value of K is very important for KNN. If a very small value of K is selected then noise in data set will have very high impact on KNN algorithm. Very big values will be computationally expensive. Hence the value should be large enough that the noise has little effect and small enough to make calculation easy.<br>Many times the value K is selected as <br>sqrt(N), where N is the size of training set.<br>Here we will perform grid search to find the optimal value of K.
# In[590]:
trainSet, testSet = splitDataSet(wineData, 0.7)
xTrain = trainSet.as_matrix(columns = selectedFeatures)
xTest = testSet.as_matrix(columns = selectedFeatures)
yTrain = trainSet["quality"].as_matrix()
yTest = testSet["quality"].as_matrix()
print(xTrain.shape)
print(xTest.shape)
print(yTrain.shape)
print(yTest.shape)
# In[598]:
kGrid = range(2, 15)
avgLoss = None
optimalK = None
lossList = []
for k in kGrid:
loss = kFoldAnalysis(xTrain, yTrain, KNearestClassifier,
{"distanceMeasure":DistanceMeasure.EuclidianDistance}, 4, misclassificationRate, {"k":k})
lossList.append(np.average(loss))
if avgLoss == None or avgLoss > lossList[-1]:
avgLoss = lossList[-1]
optimalK = k
# In[599]:
print("Optimal value of K is "+ str(optimalK))
# In[605]:
fig, ax = plt.subplots(1, 1, figsize=(15, 10))
plt.suptitle("K vs misclassification rate")
ax.plot(kGrid, lossList)
ax.plot(kGrid, lossList, "go")
ax.grid()
#ax.xticks(kGrid)
plt.show()
# In[603]:
"""Retrain model"""
classifier = KNearestClassifier(xTrain, yTrain, DistanceMeasure.EuclidianDistance)
yPrediction = []
for x in xTest:
yPrediction.append(classifier.predict(x, optimalK))
print ("Misclassification is " + str(misclassificationRate(yTest, yPrediction)))
# ### Optimizing the value of K for regression
# In[606]:
kGrid = range(2, 15)
avgLoss = None
optimalK = None
lossList = []
for k in kGrid:
loss = kFoldAnalysis(xTrain, yTrain, KNearestRegressor,
{"distanceMeasure":DistanceMeasure.EuclidianDistance}, 4, RMSE, {"k":k})
lossList.append(np.average(loss))
if avgLoss == None or avgLoss > lossList[-1]:
avgLoss = lossList[-1]
optimalK = k
# In[607]:
print("Optimal value of K is "+ str(optimalK))
# In[608]:
fig, ax = plt.subplots(1, 1, figsize=(15, 10))
plt.suptitle("K vs RMSE rate")
ax.plot(kGrid, lossList)
ax.plot(kGrid, lossList, "go")
ax.grid()
#ax.xticks(kGrid)
plt.show()
# In[609]:
"""Retrain model"""
classifier = KNearestRegressor(xTrain, yTrain, DistanceMeasure.EuclidianDistance)
yPrediction = []
for x in xTest:
yPrediction.append(classifier.predict(x, optimalK))
print ("Misclassification is " + str(RMSE(yTest, yPrediction)))
| [
"numpy.mean",
"pandas.read_csv",
"numpy.average",
"numpy.square",
"matplotlib.pyplot.suptitle",
"numpy.array_split",
"numpy.sum",
"numpy.apply_along_axis",
"numpy.setdiff1d",
"numpy.take",
"collections.Counter",
"numpy.std",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((8777, 8898), 'pandas.read_csv', 'pd.read_csv', (["(directoryPath + '/iris.data')"], {'names': "['sepalLength', 'sepalWidth', 'petalLength', 'petalWidth', 'target']"}), "(directoryPath + '/iris.data', names=['sepalLength',\n 'sepalWidth', 'petalLength', 'petalWidth', 'target'])\n", (8788, 8898), True, 'import pandas as pd\n'), ((9171, 9204), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Data distribution"""'], {}), "('Data distribution')\n", (9183, 9204), True, 'import matplotlib.pyplot as plt\n'), ((9205, 9215), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9213, 9215), True, 'import matplotlib.pyplot as plt\n'), ((9324, 9334), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9332, 9334), True, 'import matplotlib.pyplot as plt\n'), ((9460, 9496), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(25, 25)'}), '(1, 1, figsize=(25, 25))\n', (9472, 9496), True, 'import matplotlib.pyplot as plt\n'), ((9714, 9724), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9722, 9724), True, 'import matplotlib.pyplot as plt\n'), ((10697, 10757), 'pandas.read_csv', 'pd.read_csv', (["(directoryPath + '/winequality-red.csv')"], {'sep': '""";"""'}), "(directoryPath + '/winequality-red.csv', sep=';')\n", (10708, 10757), True, 'import pandas as pd\n'), ((11009, 11019), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11017, 11019), True, 'import matplotlib.pyplot as plt\n'), ((11128, 11138), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11136, 11138), True, 'import matplotlib.pyplot as plt\n'), ((11264, 11300), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(25, 25)'}), '(1, 1, figsize=(25, 25))\n', (11276, 11300), True, 'import matplotlib.pyplot as plt\n'), ((11518, 11528), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11526, 11528), True, 'import matplotlib.pyplot as plt\n'), ((14381, 14417), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(15, 10)'}), '(1, 1, figsize=(15, 10))\n', (14393, 14417), True, 'import matplotlib.pyplot as plt\n'), ((14419, 14462), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""K vs misclassification rate"""'], {}), "('K vs misclassification rate')\n", (14431, 14462), True, 'import matplotlib.pyplot as plt\n'), ((14548, 14558), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14556, 14558), True, 'import matplotlib.pyplot as plt\n'), ((15377, 15413), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(15, 10)'}), '(1, 1, figsize=(15, 10))\n', (15389, 15413), True, 'import matplotlib.pyplot as plt\n'), ((15415, 15445), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""K vs RMSE rate"""'], {}), "('K vs RMSE rate')\n", (15427, 15445), True, 'import matplotlib.pyplot as plt\n'), ((15531, 15541), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15539, 15541), True, 'import matplotlib.pyplot as plt\n'), ((846, 856), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (853, 856), True, 'import numpy as np\n'), ((876, 885), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (882, 885), True, 'import numpy as np\n'), ((7613, 7644), 'numpy.array_split', 'np.array_split', (['indices', 'nFolds'], {}), '(indices, nFolds)\n', (7627, 7644), True, 'import numpy as np\n'), ((2539, 2602), 'numpy.apply_along_axis', 'np.apply_along_axis', (['self.__distanceMeasure', '(1)', 'self.__dataX', 'x'], {}), '(self.__distanceMeasure, 1, self.__dataX, x)\n', (2558, 2602), True, 'import numpy as np\n'), ((5343, 5362), 'numpy.mean', 'np.mean', (['candidates'], {}), '(candidates)\n', (5350, 5362), True, 'import numpy as np\n'), ((5553, 5565), 'numpy.sum', 'np.sum', (['diff'], {}), '(diff)\n', (5559, 5565), True, 'import numpy as np\n'), ((7801, 7837), 'numpy.setdiff1d', 'np.setdiff1d', (['indices', 'validationSet'], {}), '(indices, validationSet)\n', (7813, 7837), True, 'import numpy as np\n'), ((7878, 7911), 'numpy.take', 'np.take', (['xTrain', 'trainSet'], {'axis': '(0)'}), '(xTrain, trainSet, axis=0)\n', (7885, 7911), True, 'import numpy as np\n'), ((7945, 7978), 'numpy.take', 'np.take', (['yTrain', 'trainSet'], {'axis': '(0)'}), '(yTrain, trainSet, axis=0)\n', (7952, 7978), True, 'import numpy as np\n'), ((8012, 8050), 'numpy.take', 'np.take', (['xTrain', 'validationSet'], {'axis': '(0)'}), '(xTrain, validationSet, axis=0)\n', (8019, 8050), True, 'import numpy as np\n'), ((8075, 8113), 'numpy.take', 'np.take', (['yTrain', 'validationSet'], {'axis': '(0)'}), '(yTrain, validationSet, axis=0)\n', (8082, 8113), True, 'import numpy as np\n'), ((14169, 14185), 'numpy.average', 'np.average', (['loss'], {}), '(loss)\n', (14179, 14185), True, 'import numpy as np\n'), ((15165, 15181), 'numpy.average', 'np.average', (['loss'], {}), '(loss)\n', (15175, 15181), True, 'import numpy as np\n'), ((646, 666), 'numpy.sum', 'np.sum', (['((a - b) ** 2)'], {}), '((a - b) ** 2)\n', (652, 666), True, 'import numpy as np\n'), ((400, 410), 'collections.Counter', 'Counter', (['x'], {}), '(x)\n', (407, 410), False, 'from collections import Counter\n'), ((5919, 5949), 'numpy.square', 'np.square', (['(yTrue - yPrediction)'], {}), '(yTrue - yPrediction)\n', (5928, 5949), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import numpy as np
def mixup_data(x, y, alpha):
# https://github.com/vikasverma1077/manifold_mixup/blob/master/supervised/utils.py
'''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda'''
if alpha > 0.:
lam = np.random.beta(alpha, alpha)
if lam < 0.5:
lam = 1 - lam
else:
lam = 1.
if type(x) != tuple:
batch_size = x.size()[0]
index = torch.randperm(batch_size).cuda()
mixed_x = lam * x + (1 - lam) * x[index,:]
else:
batch_size = x[0].size()[0]
index = torch.randperm(batch_size).cuda()
mixed_x = []
for _x in x:
mixed_x.append(lam * _x + (1 - lam) * _x[index,:])
mixed_x = tuple(mixed_x)
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
class CalibrationMixup(nn.Module):
def __init__(self, layer_number=None):
super(CalibrationMixup, self).__init__()
self.p = Parameter(torch.zeros(1))
self.layer_number = layer_number
def forward(self, x, y, alpha):
if alpha > 0.:
lam = np.random.beta(alpha, alpha)
if lam < 0.5:
lam = 1 - lam
else:
lam = 1.
# caliblation
eps = 1e-6
m1_lam = np.clip((1 - lam), eps, 1 - eps)
#exponent = torch.exp(self.p)
exponent = torch.log(1 + torch.exp(self.p) * (np.exp(1) - 1))
lam_calib = torch.clamp(1 - torch.pow(2 * m1_lam, exponent) / 2, eps, 1 - eps)
if type(x) != tuple:
batch_size = x.size()[0]
index = torch.randperm(batch_size).cuda()
#mixed_x = lam * x + (1 - lam) * x[index,:]
mixed_x = lam_calib * x + (1 - lam_calib) * x[index,:]
else:
batch_size = x[0].size()[0]
index = torch.randperm(batch_size).cuda()
mixed_x = []
for _x in x:
#mixed_x.append(lam * _x + (1 - lam) * _x[index,:])
mixed_x.append(lam_calib * _x + (1 - lam_calib) * _x[index,:])
mixed_x = tuple(mixed_x)
y_a, y_b = y, y[index]
#return mixed_x, y_a, y_b, lam_calib
return mixed_x, y_a, y_b, lam
def CrossEntropyLossForMixup(num_class=100, label_smooth=0.0):
def loss_func(input, y_a, y_b, lam):
soft_target = _get_mixed_soft_target(y_a, y_b, lam, num_class=num_class, label_smooth=0.0)
loss = _CrossEntropyLossWithSoftTarget(input, soft_target)
return loss
return loss_func
def _get_mixed_soft_target(y_a, y_b, lam, num_class=100, label_smooth=0.0):
onehot_a = (torch.eye(num_class)[y_a] * (1- label_smooth) + label_smooth / num_class).cuda()
onehot_b = (torch.eye(num_class)[y_b] * (1- label_smooth) + label_smooth / num_class).cuda()
return lam * onehot_a + (1 - lam) * onehot_b
def _CrossEntropyLossWithSoftTarget(input, soft_target):
loss = - torch.mean(torch.sum(F.log_softmax(input, dim=1) * soft_target, dim=1))
return loss
| [
"numpy.clip",
"numpy.random.beta",
"torch.randperm",
"torch.eye",
"torch.exp",
"torch.pow",
"numpy.exp",
"torch.nn.functional.log_softmax",
"torch.zeros"
] | [((371, 399), 'numpy.random.beta', 'np.random.beta', (['alpha', 'alpha'], {}), '(alpha, alpha)\n', (385, 399), True, 'import numpy as np\n'), ((1441, 1471), 'numpy.clip', 'np.clip', (['(1 - lam)', 'eps', '(1 - eps)'], {}), '(1 - lam, eps, 1 - eps)\n', (1448, 1471), True, 'import numpy as np\n'), ((1114, 1128), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (1125, 1128), False, 'import torch\n'), ((1254, 1282), 'numpy.random.beta', 'np.random.beta', (['alpha', 'alpha'], {}), '(alpha, alpha)\n', (1268, 1282), True, 'import numpy as np\n'), ((558, 584), 'torch.randperm', 'torch.randperm', (['batch_size'], {}), '(batch_size)\n', (572, 584), False, 'import torch\n'), ((711, 737), 'torch.randperm', 'torch.randperm', (['batch_size'], {}), '(batch_size)\n', (725, 737), False, 'import torch\n'), ((1547, 1564), 'torch.exp', 'torch.exp', (['self.p'], {}), '(self.p)\n', (1556, 1564), False, 'import torch\n'), ((1621, 1652), 'torch.pow', 'torch.pow', (['(2 * m1_lam)', 'exponent'], {}), '(2 * m1_lam, exponent)\n', (1630, 1652), False, 'import torch\n'), ((1763, 1789), 'torch.randperm', 'torch.randperm', (['batch_size'], {}), '(batch_size)\n', (1777, 1789), False, 'import torch\n'), ((2001, 2027), 'torch.randperm', 'torch.randperm', (['batch_size'], {}), '(batch_size)\n', (2015, 2027), False, 'import torch\n'), ((3134, 3161), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['input'], {'dim': '(1)'}), '(input, dim=1)\n', (3147, 3161), True, 'import torch.nn.functional as F\n'), ((1568, 1577), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (1574, 1577), True, 'import numpy as np\n'), ((2810, 2830), 'torch.eye', 'torch.eye', (['num_class'], {}), '(num_class)\n', (2819, 2830), False, 'import torch\n'), ((2908, 2928), 'torch.eye', 'torch.eye', (['num_class'], {}), '(num_class)\n', (2917, 2928), False, 'import torch\n')] |
from array import array
import numpy as np
import struct
import sys
import os
class MNISTLoader:
def __init__(self, path):
self.path = path
self.train_img_fname = 'train-images-idx3-ubyte'
self.train_lbl_fname = 'train-labels-idx1-ubyte'
self.train_images, self.train_labels = [], []
self.test_img_fname = 't10k-images-idx3-ubyte'
self.test_lbl_fname = 't10k-labels-idx1-ubyte'
self.test_images, self.test_labels = [], []
self.num_classes = 10
self.rows = 28
self.cols = 28
self.channels = 1
self.load_data()
def load_data(self):
imgs, labels = self.load(os.path.join(self.path, self.train_img_fname),
os.path.join(self.path, self.train_lbl_fname))
self.train_images = self.process_images(imgs)
self.train_labels = self.process_labels(labels)
print('Train data:', self.train_images.shape, self.train_labels.shape)
imgs, labels = self.load(os.path.join(self.path, self.test_img_fname),
os.path.join(self.path, self.test_lbl_fname))
self.test_images = self.process_images(imgs)
self.test_labels = self.process_labels(labels)
print('Test data:', self.test_images.shape, self.test_labels.shape)
@classmethod
def load(cls, path_img, path_lbl):
with open(path_lbl, 'rb') as file:
magic, size = struct.unpack(">II", file.read(8))
if magic != 2049:
raise ValueError('Magic number mismatch, expected 2049,'
'got {}'.format(magic))
labels = array("B", file.read())
with open(path_img, 'rb') as file:
magic, size, rows, cols = struct.unpack(">IIII", file.read(16))
if magic != 2051:
raise ValueError('Magic number mismatch, expected 2051,'
'got {}'.format(magic))
image_data = array("B", file.read())
images = []
for i in range(size):
images.append([0] * rows * cols)
for i in range(size):
images[i][:] = image_data[i * rows * cols:(i + 1) * rows * cols]
return images, labels
@staticmethod
def process_images(images):
return np.array(images) / 255.
@staticmethod
def process_labels(labels):
return np.array(labels)[:, np.newaxis]
| [
"numpy.array",
"os.path.join"
] | [((674, 719), 'os.path.join', 'os.path.join', (['self.path', 'self.train_img_fname'], {}), '(self.path, self.train_img_fname)\n', (686, 719), False, 'import os\n'), ((733, 778), 'os.path.join', 'os.path.join', (['self.path', 'self.train_lbl_fname'], {}), '(self.path, self.train_lbl_fname)\n', (745, 778), False, 'import os\n'), ((1003, 1047), 'os.path.join', 'os.path.join', (['self.path', 'self.test_img_fname'], {}), '(self.path, self.test_img_fname)\n', (1015, 1047), False, 'import os\n'), ((1061, 1105), 'os.path.join', 'os.path.join', (['self.path', 'self.test_lbl_fname'], {}), '(self.path, self.test_lbl_fname)\n', (1073, 1105), False, 'import os\n'), ((2289, 2305), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (2297, 2305), True, 'import numpy as np\n'), ((2379, 2395), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (2387, 2395), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Aerodynamic loads
# <NAME>
class Loads:
def __init__(self):
self.ys = [] # spanwise stations
self.chds = [] # chord
self.data = {} # coordinates and pressure coefficient
self.cls = [] # lift
self.cms = [] # moment positive nose-up (clockwise)
self.cds = [] # pressure drag
def add(self, y, pts, cp):
'''Add data for a section along the span
'''
import numpy as np
self.ys.append(y)
c = max(pts[:,0]) - min(pts[:,0])
self.chds.append(c)
x_c = np.zeros(((pts.shape[0], 1)))
x_c[:,0] = (pts[:,0] - min(pts[:,0])) / c
self.data[len(self.ys)-1] = np.hstack((pts, x_c, cp))
def compute(self, alpha = 0):
'''Compute the sectional aerodynamic load coefficients
'''
import numpy as np
alpha = np.deg2rad(alpha)
for j in range(0, len(self.ys)):
x = self.data[j][:,0]
z = self.data[j][:,2]
cp = self.data[j][:,4]
c = self.chds[j] # chord
c_4 = np.min(x) + 0.25*c # quarter-chord position
# integrate pressure coefficient
i = 0
cz = 0
cx = 0
cm = 0
while i < (x.shape[0]-1):
dx = (x[i+1] - x[i]) / c
dz = -(z[i+1] - z[i]) / c
cz -= 0.5 * dx * (cp[i+1] + cp[i])
cx -= 0.5 * dz * (cp[i+1] + cp[i])
cm -= -0.5*(cp[i+1]*(x[i+1]-c_4) + cp[i]*(x[i]-c_4)) * dx/c + 0.5*(cp[i+1]*z[i+1] + cp[i]*z[i]) * dz/c # positive nose-up (clockwise)
i = i+1
# rotate to flow direction
cl = cz*np.cos(alpha) - cx*np.sin(alpha)
cd = cz*np.sin(alpha) + cx*np.cos(alpha)
self.cls.append(cl)
self.cms.append(cm)
self.cds.append(cd)
def display(self):
'''Display the results
'''
print('y = ', self.ys)
print('Cl = ', self.cls)
print('Cm = ', self.cms)
print('Cd = ', self.cds)
def plot(self):
'''Plot the sectional pressure and loads
'''
import matplotlib.pyplot as plt
# Pressure
fig, ax = plt.subplots()
ax.set_xlabel('x/c')
ax.set_ylabel('Cp')
ax.invert_yaxis()
for i in range(0, len(self.ys)):
ax.plot(self.data[i][:,3], self.data[i][:,4], label = 'y = '+str(self.ys[i]))
ax.legend()
plt.draw()
# Loads
fig, ax1 = plt.subplots()
# left axis
color = 'tab:blue'
ax1.set_xlabel('y')
ax1.set_ylabel('Cl', color=color)
ax1.plot(self.ys, self.cls, color=color)
ax1.tick_params(axis='y', labelcolor=color)
# right axis
ax2 = ax1.twinx()
color = 'tab:red'
ax2.set_ylabel('Cm', color=color)
ax2.plot(self.ys, self.cms, color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
def write(self):
'''Write to disk
'''
import numpy as np
# pressure
for j in range(0, len(self.ys)):
print('writing pressure data file in workspace directory: slice_' + str(j) + '.dat...')
np.savetxt('slice_'+str(j)+'.dat', self.data[j], fmt='%1.5e', delimiter=',', header='x, y, z, x/c, Cp @ y='+str(self.ys[j]), comments='')
# loads
loads = np.transpose(np.vstack((self.ys, self.cls, self.cms, self.cds)))
print('writing loads data file in workspace directory: loads.dat...')
np.savetxt('loads.dat', loads, fmt='%1.5e', delimiter=',', header='y, Cl, Cm, Cd', comments='')
| [
"numpy.hstack",
"numpy.sin",
"numpy.deg2rad",
"numpy.zeros",
"numpy.vstack",
"numpy.savetxt",
"numpy.min",
"numpy.cos",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((610, 637), 'numpy.zeros', 'np.zeros', (['(pts.shape[0], 1)'], {}), '((pts.shape[0], 1))\n', (618, 637), True, 'import numpy as np\n'), ((726, 751), 'numpy.hstack', 'np.hstack', (['(pts, x_c, cp)'], {}), '((pts, x_c, cp))\n', (735, 751), True, 'import numpy as np\n'), ((905, 922), 'numpy.deg2rad', 'np.deg2rad', (['alpha'], {}), '(alpha)\n', (915, 922), True, 'import numpy as np\n'), ((2293, 2307), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2305, 2307), True, 'import matplotlib.pyplot as plt\n'), ((2550, 2560), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2558, 2560), True, 'import matplotlib.pyplot as plt\n'), ((2596, 2610), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2608, 2610), True, 'import matplotlib.pyplot as plt\n'), ((3131, 3141), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3139, 3141), True, 'import matplotlib.pyplot as plt\n'), ((3721, 3821), 'numpy.savetxt', 'np.savetxt', (['"""loads.dat"""', 'loads'], {'fmt': '"""%1.5e"""', 'delimiter': '""","""', 'header': '"""y, Cl, Cm, Cd"""', 'comments': '""""""'}), "('loads.dat', loads, fmt='%1.5e', delimiter=',', header=\n 'y, Cl, Cm, Cd', comments='')\n", (3731, 3821), True, 'import numpy as np\n'), ((3583, 3633), 'numpy.vstack', 'np.vstack', (['(self.ys, self.cls, self.cms, self.cds)'], {}), '((self.ys, self.cls, self.cms, self.cds))\n', (3592, 3633), True, 'import numpy as np\n'), ((1122, 1131), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (1128, 1131), True, 'import numpy as np\n'), ((1743, 1756), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (1749, 1756), True, 'import numpy as np\n'), ((1762, 1775), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (1768, 1775), True, 'import numpy as np\n'), ((1796, 1809), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (1802, 1809), True, 'import numpy as np\n'), ((1815, 1828), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (1821, 1828), True, 'import numpy as np\n')] |
from sympy import *
from matplotlib import pyplot as plt
import numpy as np
from random import randint
y, n = symbols('y n')
f = cos(y*log(n+1))/cos(y*log(n))*(1+1/n)**(-1/2)
# print(limit(f,n,oo)) maximum recursion limit exceeded ....
print(
cos(y*log(n+1))/cos(y*log(n)) ==
cos(y*log(1+1/n))-tan(y*log(n))*sin(y*log(1+1/n))
)
Y = 13
M = 10000
n1 = np.linspace(2,M,M-1)
y1 = np.cos(Y*np.log(n1+1))/np.cos(Y*np.log(n1))*(1+1/n1)**(-1/2)
# y2 = (
# np.cos(Y*np.log(1+1/n1))-np.tan(Y*np.log(n1))*np.sin(Y*np.log(1+1/n1))
# )*(1+1/n1)**(-1/2)
y2 = np.sin(Y*np.log(n1+1))/np.sin(Y*np.log(n1))*(1+1/n1)**(-1/2)
plt.plot(n1,y1)
plt.plot(n1,y2)
plt.show()
| [
"numpy.log",
"numpy.linspace",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
] | [((359, 383), 'numpy.linspace', 'np.linspace', (['(2)', 'M', '(M - 1)'], {}), '(2, M, M - 1)\n', (370, 383), True, 'import numpy as np\n'), ((619, 635), 'matplotlib.pyplot.plot', 'plt.plot', (['n1', 'y1'], {}), '(n1, y1)\n', (627, 635), True, 'from matplotlib import pyplot as plt\n'), ((635, 651), 'matplotlib.pyplot.plot', 'plt.plot', (['n1', 'y2'], {}), '(n1, y2)\n', (643, 651), True, 'from matplotlib import pyplot as plt\n'), ((651, 661), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (659, 661), True, 'from matplotlib import pyplot as plt\n'), ((394, 408), 'numpy.log', 'np.log', (['(n1 + 1)'], {}), '(n1 + 1)\n', (400, 408), True, 'import numpy as np\n'), ((417, 427), 'numpy.log', 'np.log', (['n1'], {}), '(n1)\n', (423, 427), True, 'import numpy as np\n'), ((567, 581), 'numpy.log', 'np.log', (['(n1 + 1)'], {}), '(n1 + 1)\n', (573, 581), True, 'import numpy as np\n'), ((590, 600), 'numpy.log', 'np.log', (['n1'], {}), '(n1)\n', (596, 600), True, 'import numpy as np\n')] |
### tensorflow==2.3.0
### https://ai.googleblog.com/2020/08/on-device-real-time-body-pose-tracking.html
### https://google.github.io/mediapipe/solutions/pose
### https://www.tensorflow.org/api_docs/python/tf/keras/Model
### https://www.tensorflow.org/lite/guide/ops_compatibility
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/DepthwiseConv2D
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Add
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/ReLU
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool2D
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Reshape
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Concatenate
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer
### https://github.com/google/mediapipe/issues/245
### https://github.com/mvoelk/keras_layers
### How to initialize a convolution layer with an arbitrary kernel in Keras? https://stackoverrun.com/ja/q/12269118
### saved_model_cli show --dir saved_model/ --tag_set serve --signature_def serving_default
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer
from tensorflow.keras.initializers import Constant
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.ops import nn_ops
import numpy as np
import sys
import cv2
# tmp = np.load('weights/depthwise_conv2d_Kernel')
# print(tmp.shape)
# print(tmp)
# def init_f(shape, dtype=None):
# ker = np.load('weights/depthwise_conv2d_Kernel')
# print(shape)
# return ker
# sys.exit(0)
# class MaxPoolingWithArgmax2D(Layer):
# def __init__(self, pool_size=(2, 2), strides=(2, 2), padding='same', **kwargs):
# super(MaxPoolingWithArgmax2D, self).__init__(**kwargs)
# self.pool_size = conv_utils.normalize_tuple(pool_size, 2, 'pool_size')
# self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
# self.padding = conv_utils.normalize_padding(padding)
# def call(self, inputs, **kwargs):
# ksize = [1, self.pool_size[0], self.pool_size[1], 1]
# strides = [1, self.strides[0], self.strides[1], 1]
# padding = self.padding.upper()
# output, argmax = nn_ops.max_pool_with_argmax(inputs, ksize, strides, padding)
# # output, argmax = tf.raw_ops.MaxPoolWithArgmax(inputs, ksize, strides, padding)
# argmax = tf.cast(argmax, K.floatx())
# return [output, argmax]
# def compute_output_shape(self, input_shape):
# ratio = (1, 2, 2, 1)
# output_shape = [dim // ratio[idx] if dim is not None else None for idx, dim in enumerate(input_shape)]
# output_shape = tuple(output_shape)
# return [output_shape, output_shape]
# def compute_mask(self, inputs, mask=None):
# return 2 * [None]
# def get_config(self):
# config = super(MaxPoolingWithArgmax2D, self).get_config()
# config.update({
# 'pool_size': self.pool_size,
# 'strides': self.strides,
# 'padding': self.padding,
# })
# return config
def max_pooling_with_argmax2d(input):
net_main = tf.nn.max_pool(input,
ksize=[1,2,2,1],
strides=[1,2,2,1],
padding='SAME')
input_shape = input.get_shape().as_list()
mask_shape = [input_shape[0], input_shape [1]//2,input_shape[2]//2, input_shape[3]]
pooling_indices = tf.zeros(mask_shape, dtype=tf.int64)
for n in range(mask_shape[0]):
for i in range(mask_shape[1]):
for j in range(mask_shape[2]):
in_indices = [ [n, w, h] for w in range(i*2, i*2+2) for h in range(j*2, j*2+2)]
slice = tf.gather_nd(input, in_indices)
argmax = tf.argmax(slice, axis=0)
indices_location = [[n, i, j, d] for d in range(input_shape[3])]
sparse_indices = tf.SparseTensor(indices=indices_location, values=argmax, dense_shape=mask_shape)
pooling_indices = tf.compat.v1.sparse_add(pooling_indices, sparse_indices)
return [net_main, pooling_indices]
class MaxUnpooling2D(Layer):
def __init__(self, size=(2, 2), **kwargs):
super(MaxUnpooling2D, self).__init__(**kwargs)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
def call(self, inputs, output_shape=None):
updates, mask = inputs[0], inputs[1]
mask = tf.cast(mask, 'int32')
input_shape = tf.shape(updates, out_type='int32')
# calculation new shape
if output_shape is None:
output_shape = (input_shape[0], input_shape[1] * self.size[0], input_shape[2] * self.size[1], input_shape[3])
# calculation indices for batch, height, width and feature maps
one_like_mask = K.ones_like(mask, dtype='int32')
batch_shape = K.concatenate([[input_shape[0]], [1], [1], [1]], axis=0)
batch_range = K.reshape(tf.range(output_shape[0], dtype='int32'), shape=batch_shape)
b = one_like_mask * batch_range
y = mask // (output_shape[2] * output_shape[3])
x = (mask // output_shape[3]) % output_shape[2]
feature_range = tf.range(output_shape[3], dtype='int32')
f = one_like_mask * feature_range
# transpose indices & reshape update values to one dimension
updates_size = tf.size(updates)
indices = K.transpose(K.reshape(K.stack([b, y, x, f]), [4, updates_size]))
values = K.reshape(updates, [updates_size])
ret = tf.scatter_nd(indices, values, output_shape)
return ret
def compute_output_shape(self, input_shape):
mask_shape = input_shape[1]
output_shape = [mask_shape[0], mask_shape[1] * self.size[0], mask_shape[2] * self.size[1], mask_shape[3]]
return tuple(output_shape)
def get_config(self):
config = super(MaxUnpooling2D, self).get_config()
config.update({
'size': self.size,
})
return config
height = 512
width = 512
inputs = Input(shape=(height, width, 4), batch_size=1, name='input')
# Block_01
conv1_1 = Conv2D(filters=8, kernel_size=[2, 2], strides=[2, 2], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_Bias')))(inputs)
prelu1_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_Alpha')), shared_axes=[1, 2])(conv1_1)
conv1_2 = Conv2D(filters=32, kernel_size=[2, 2], strides=[2, 2], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_1_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_1_Bias')))(prelu1_1)
prelu1_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_1_Alpha')), shared_axes=[1, 2])(conv1_2)
# Block_02
conv2_1 = Conv2D(filters=16, kernel_size=[2, 2], strides=[2, 2], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_2_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_2_Bias')))(prelu1_2)
prelu2_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_2_Alpha')), shared_axes=[1, 2])(conv2_1)
depthconv2_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_Bias')))(prelu2_1)
conv2_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_3_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_3_Bias')))(depthconv2_1)
prelu2_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_3_Alpha')), shared_axes=[1, 2])(conv2_2)
depthconv2_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_1_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_1_Bias')))(prelu2_2)
prelu2_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_4_Alpha')), shared_axes=[1, 2])(depthconv2_2)
conv2_3 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_4_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_4_Bias')))(prelu2_3)
maxpoolarg2_1 = tf.raw_ops.MaxPoolWithArgmax(input=prelu1_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# maxpoolarg2_1 = max_pooling_with_argmax2d(prelu1_2)
conv2_4 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_5_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_5_Bias')))(maxpoolarg2_1[0])
add2_1 = Add()([conv2_3, conv2_4])
prelu2_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_5_Alpha')), shared_axes=[1, 2])(add2_1)
# Block_03
conv3_1 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_6_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_6_Bias')))(prelu2_4)
prelu3_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_6_Alpha')), shared_axes=[1, 2])(conv3_1)
depthconv3_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_2_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_2_Bias')))(prelu3_1)
conv3_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_7_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_7_Bias')))(depthconv3_1)
prelu3_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_7_Alpha')), shared_axes=[1, 2])(conv3_2)
depthconv3_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_3_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_3_Bias')))(prelu3_2)
prelu3_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_8_Alpha')), shared_axes=[1, 2])(depthconv3_2)
conv3_3 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_8_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_8_Bias')))(prelu3_3)
add3_1 = Add()([conv3_3, prelu2_4])
prelu3_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_9_Alpha')), shared_axes=[1, 2])(add3_1)
# Block_04
conv4_1 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_9_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_9_Bias')))(prelu3_4)
prelu4_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_10_Alpha')), shared_axes=[1, 2])(conv4_1)
depthconv4_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_4_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_4_Bias')))(prelu4_1)
conv4_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_10_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_10_Bias')))(depthconv4_1)
prelu4_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_11_Alpha')), shared_axes=[1, 2])(conv4_2)
depthconv4_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_5_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_5_Bias')))(prelu4_2)
prelu4_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_12_Alpha')), shared_axes=[1, 2])(depthconv4_2)
conv4_3 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_11_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_11_Bias')))(prelu4_3)
add4_1 = Add()([conv4_3, prelu3_4])
prelu4_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_13_Alpha')), shared_axes=[1, 2])(add4_1)
# Block_05
conv5_1 = Conv2D(filters=32, kernel_size=[2, 2], strides=[2, 2], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_12_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_12_Bias')))(prelu4_4)
prelu5_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_14_Alpha')), shared_axes=[1, 2])(conv5_1)
depthconv5_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_6_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_6_Bias')))(prelu5_1)
conv5_2 = Conv2D(filters=32, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_13_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_13_Bias')))(depthconv5_1)
prelu5_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_15_Alpha')), shared_axes=[1, 2])(conv5_2)
depthconv5_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_7_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_7_Bias')))(prelu5_2)
prelu5_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_16_Alpha')), shared_axes=[1, 2])(depthconv5_2)
conv5_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_14_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_14_Bias')))(prelu5_3)
maxpoolarg5_1 = tf.raw_ops.MaxPoolWithArgmax(input=prelu4_4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# maxpoolarg5_1 = max_pooling_with_argmax2d(prelu4_4)
conv5_4 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_15_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_15_Bias')))(maxpoolarg5_1[0])
add5_1 = Add()([conv5_3, conv5_4])
prelu5_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_17_Alpha')), shared_axes=[1, 2])(add5_1)
# Block_06
conv6_1 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_16_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_16_Bias')))(prelu5_4)
prelu6_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_18_Alpha')), shared_axes=[1, 2])(conv6_1)
depthconv6_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_8_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_8_Bias')))(prelu6_1)
conv6_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_17_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_17_Bias')))(depthconv6_1)
prelu6_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_19_Alpha')), shared_axes=[1, 2])(conv6_2)
depthconv6_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_9_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_9_Bias')))(prelu6_2)
prelu6_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_20_Alpha')), shared_axes=[1, 2])(depthconv6_2)
conv6_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_18_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_18_Bias')))(prelu6_3)
add6_1 = Add()([conv6_3, prelu5_4])
prelu6_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_21_Alpha')), shared_axes=[1, 2])(add6_1)
# Block_07
conv7_1 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_19_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_19_Bias')))(prelu6_4)
prelu7_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_22_Alpha')), shared_axes=[1, 2])(conv7_1)
depthconv7_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_10_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_10_Bias')))(prelu7_1)
conv7_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_20_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_20_Bias')))(depthconv7_1)
prelu7_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_23_Alpha')), shared_axes=[1, 2])(conv7_2)
depthconv7_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_11_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_11_Bias')))(prelu7_2)
prelu7_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_24_Alpha')), shared_axes=[1, 2])(depthconv7_2)
conv7_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_21_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_21_Bias')))(prelu7_3)
add7_1 = Add()([conv7_3, prelu6_4])
prelu7_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_25_Alpha')), shared_axes=[1, 2])(add7_1)
# Block_08
conv8_1 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_22_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_22_Bias')))(prelu7_4)
prelu8_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_26_Alpha')), shared_axes=[1, 2])(conv8_1)
depthconv8_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_12_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_12_Bias')))(prelu8_1)
conv8_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_23_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_23_Bias')))(depthconv8_1)
prelu8_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_27_Alpha')), shared_axes=[1, 2])(conv8_2)
depthconv8_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_13_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_13_Bias')))(prelu8_2)
prelu8_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_28_Alpha')), shared_axes=[1, 2])(depthconv8_2)
conv8_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_24_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_24_Bias')))(prelu8_3)
add8_1 = Add()([conv8_3, prelu7_4])
prelu8_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_29_Alpha')), shared_axes=[1, 2])(add8_1)
# Block_09
conv9_1 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_25_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_25_Bias')))(prelu8_4)
prelu9_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_30_Alpha')), shared_axes=[1, 2])(conv9_1)
depthconv9_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_14_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_14_Bias')))(prelu9_1)
conv9_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_26_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_26_Bias')))(depthconv9_1)
prelu9_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_31_Alpha')), shared_axes=[1, 2])(conv9_2)
depthconv9_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_15_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_15_Bias')))(prelu9_2)
prelu9_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_32_Alpha')), shared_axes=[1, 2])(depthconv9_2)
conv9_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_27_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_27_Bias')))(prelu9_3)
add9_1 = Add()([conv9_3, prelu8_4])
prelu9_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_33_Alpha')), shared_axes=[1, 2])(add9_1)
# Block_10
conv10_1 = Conv2D(filters=16, kernel_size=[2, 2], strides=[2, 2], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_28_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_28_Bias')))(prelu9_4)
prelu10_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_34_Alpha')), shared_axes=[1, 2])(conv10_1)
depthconv10_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_16_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_16_Bias')))(prelu10_1)
conv10_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_29_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_29_Bias')))(depthconv10_1)
prelu10_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_35_Alpha')), shared_axes=[1, 2])(conv10_2)
depthconv10_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_17_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_17_Bias')))(prelu10_2)
prelu10_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_36_Alpha')), shared_axes=[1, 2])(depthconv10_2)
conv10_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_30_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_30_Bias')))(prelu10_3)
maxpoolarg10_1 = tf.raw_ops.MaxPoolWithArgmax(input=prelu9_4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# maxpoolarg10_1 = max_pooling_with_argmax2d(prelu9_4)
add10_1 = Add()([conv10_3, maxpoolarg10_1[0]])
prelu10_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_37_Alpha')), shared_axes=[1, 2])(add10_1)
# Block_11
conv11_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_31_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_31_Bias')))(prelu10_4)
prelu11_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_38_Alpha')), shared_axes=[1, 2])(conv11_1)
depthconv11_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_18_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_18_Bias')))(prelu11_1)
conv11_2 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_32_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_32_Bias')))(depthconv11_1)
prelu11_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_39_Alpha')), shared_axes=[1, 2])(conv11_2)
depthconv11_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_19_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_19_Bias')))(prelu11_2)
prelu11_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_40_Alpha')), shared_axes=[1, 2])(depthconv11_2)
conv11_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_33_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_33_Bias')))(prelu11_3)
add11_1 = Add()([conv11_3, prelu10_4])
prelu11_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_41_Alpha')), shared_axes=[1, 2])(add11_1)
# Block_12
conv12_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_34_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_34_Bias')))(prelu11_4)
prelu12_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_42_Alpha')), shared_axes=[1, 2])(conv12_1)
conv12_2 = Conv2D(filters=8, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[2, 2],
kernel_initializer=Constant(np.load('weights/conv2d_35_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_35_Bias')))(prelu12_1)
prelu12_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_43_Alpha')), shared_axes=[1, 2])(conv12_2)
conv12_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_36_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_36_Bias')))(prelu12_2)
add12_1 = Add()([conv12_3, prelu11_4])
prelu12_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_44_Alpha')), shared_axes=[1, 2])(add12_1)
# Block_13
conv13_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_37_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_37_Bias')))(prelu12_3)
prelu13_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_45_Alpha')), shared_axes=[1, 2])(conv13_1)
depthconv13_1 = DepthwiseConv2D(kernel_size=[5, 5], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_20_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_20_Bias')))(prelu13_1)
conv13_2 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_38_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_38_Bias')))(depthconv13_1)
prelu13_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_46_Alpha')), shared_axes=[1, 2])(conv13_2)
conv13_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_39_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_39_Bias')))(prelu13_2)
add13_1 = Add()([conv13_3, prelu12_3])
prelu13_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_47_Alpha')), shared_axes=[1, 2])(add13_1)
# Block_14
conv14_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_40_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_40_Bias')))(prelu13_4)
prelu14_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_48_Alpha')), shared_axes=[1, 2])(conv14_1)
conv14_2 = Conv2D(filters=8, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[4, 4],
kernel_initializer=Constant(np.load('weights/conv2d_41_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_41_Bias')))(prelu14_1)
prelu14_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_49_Alpha')), shared_axes=[1, 2])(conv14_2)
conv14_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_42_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_42_Bias')))(prelu14_2)
add14_1 = Add()([conv14_3, prelu13_4])
prelu14_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_50_Alpha')), shared_axes=[1, 2])(add14_1)
# Block_15
conv15_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_43_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_43_Bias')))(prelu14_3)
prelu15_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_51_Alpha')), shared_axes=[1, 2])(conv15_1)
depthconv15_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_21_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_21_Bias')))(prelu15_1)
conv15_2 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_44_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_44_Bias')))(depthconv15_1)
prelu15_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_52_Alpha')), shared_axes=[1, 2])(conv15_2)
depthconv15_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_22_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_22_Bias')))(prelu15_2)
prelu15_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_53_Alpha')), shared_axes=[1, 2])(depthconv15_2)
conv15_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_45_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_45_Bias')))(prelu15_3)
add15_1 = Add()([conv15_3, prelu14_3])
prelu15_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_54_Alpha')), shared_axes=[1, 2])(add15_1)
# Block_16
conv16_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_46_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_46_Bias')))(prelu15_4)
prelu16_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_55_Alpha')), shared_axes=[1, 2])(conv16_1)
conv16_2 = Conv2D(filters=8, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[8, 8],
kernel_initializer=Constant(np.load('weights/conv2d_47_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_47_Bias')))(prelu16_1)
prelu16_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_56_Alpha')), shared_axes=[1, 2])(conv16_2)
conv16_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_48_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_48_Bias')))(prelu16_2)
add16_1 = Add()([conv16_3, prelu15_4])
prelu16_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_57_Alpha')), shared_axes=[1, 2])(add16_1)
# Block_17
conv17_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_49_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_49_Bias')))(prelu16_3)
prelu17_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_58_Alpha')), shared_axes=[1, 2])(conv17_1)
depthconv17_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_23_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_23_Bias')))(prelu17_1)
conv17_2 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_50_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_50_Bias')))(depthconv17_1)
prelu17_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_59_Alpha')), shared_axes=[1, 2])(conv17_2)
depthconv17_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_24_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_24_Bias')))(prelu17_2)
prelu17_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_60_Alpha')), shared_axes=[1, 2])(depthconv17_2)
conv17_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_51_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_51_Bias')))(prelu17_3)
add17_1 = Add()([conv17_3, prelu16_3])
prelu17_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_61_Alpha')), shared_axes=[1, 2])(add17_1)
# Block_18
conv18_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_46_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_46_Bias')))(prelu17_4)
prelu18_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_55_Alpha')), shared_axes=[1, 2])(conv18_1)
conv18_2 = Conv2D(filters=8, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[2, 2],
kernel_initializer=Constant(np.load('weights/conv2d_47_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_47_Bias')))(prelu18_1)
prelu18_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_56_Alpha')), shared_axes=[1, 2])(conv18_2)
conv18_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_48_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_48_Bias')))(prelu18_2)
add18_1 = Add()([conv18_3, prelu17_4])
prelu18_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_57_Alpha')), shared_axes=[1, 2])(add18_1)
# Block_19
conv19_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_55_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_55_Bias')))(prelu18_3)
prelu19_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_65_Alpha')), shared_axes=[1, 2])(conv19_1)
depthconv19_1 = DepthwiseConv2D(kernel_size=[5, 5], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_25_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_25_Bias')))(prelu19_1)
conv19_2 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_56_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_56_Bias')))(depthconv19_1)
prelu19_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_66_Alpha')), shared_axes=[1, 2])(conv19_2)
conv19_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_57_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_57_Bias')))(prelu19_2)
add19_1 = Add()([conv19_3, prelu18_3])
prelu19_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_67_Alpha')), shared_axes=[1, 2])(add19_1)
# Block_20
conv20_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_58_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_58_Bias')))(prelu19_4)
prelu20_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_68_Alpha')), shared_axes=[1, 2])(conv20_1)
conv20_2 = Conv2D(filters=8, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[4, 4],
kernel_initializer=Constant(np.load('weights/conv2d_59_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_59_Bias')))(prelu20_1)
prelu20_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_69_Alpha')), shared_axes=[1, 2])(conv20_2)
conv20_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_60_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_60_Bias')))(prelu20_2)
add20_1 = Add()([conv20_3, prelu19_4])
prelu20_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_70_Alpha')), shared_axes=[1, 2])(add20_1)
# Block_21
conv21_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_61_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_61_Bias')))(prelu20_3)
prelu21_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_71_Alpha')), shared_axes=[1, 2])(conv21_1)
depthconv21_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_26_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_26_Bias')))(prelu21_1)
conv21_2 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_62_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_62_Bias')))(depthconv21_1)
prelu21_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_72_Alpha')), shared_axes=[1, 2])(conv21_2)
depthconv21_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_27_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_27_Bias')))(prelu21_2)
prelu21_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_73_Alpha')), shared_axes=[1, 2])(depthconv21_2)
conv21_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_63_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_63_Bias')))(prelu21_3)
add21_1 = Add()([conv21_3, prelu20_3])
prelu21_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_74_Alpha')), shared_axes=[1, 2])(add21_1)
# Block_22
conv22_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_64_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_64_Bias')))(prelu21_4)
prelu22_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_75_Alpha')), shared_axes=[1, 2])(conv22_1)
conv22_2 = Conv2D(filters=8, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[8, 8],
kernel_initializer=Constant(np.load('weights/conv2d_65_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_65_Bias')))(prelu22_1)
prelu22_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_76_Alpha')), shared_axes=[1, 2])(conv22_2)
conv22_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_66_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_66_Bias')))(prelu22_2)
add22_1 = Add()([conv22_3, prelu21_4])
prelu22_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_77_Alpha')), shared_axes=[1, 2])(add22_1)
# Block_23
conv23_1 = Conv2D(filters=4, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_67_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_67_Bias')))(prelu22_3)
prelu23_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_78_Alpha')), shared_axes=[1, 2])(conv23_1)
conv23_2 = Conv2D(filters=4, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[8, 8],
kernel_initializer=Constant(np.load('weights/conv2d_68_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_68_Bias')))(prelu23_1)
prelu23_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_79_Alpha')), shared_axes=[1, 2])(conv23_2)
conv23_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_69_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_69_Bias')))(prelu23_2)
add23_1 = Add()([conv23_3, prelu22_3])
prelu23_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_80_Alpha')), shared_axes=[1, 2])(add23_1)
# Block_24
conv24_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_70_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_70_Bias')))(prelu23_3)
prelu24_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_81_Alpha')), shared_axes=[1, 2])(conv24_1)
convtransbias24_1 = Conv2DTranspose(filters=8, kernel_size=(3, 3), strides=(2, 2), padding='same',
kernel_initializer=Constant(np.load('weights/conv2d_transpose_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_transpose_Bias')))(prelu24_1)
prelu24_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_82_Alpha')), shared_axes=[1, 2])(convtransbias24_1)
conv24_2 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_71_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_71_Bias')))(prelu24_2)
conv24_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_72_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_72_Bias')))(prelu23_3)
maxunpool24_1 = MaxUnpooling2D(size=[2, 2])([conv24_3, maxpoolarg10_1[1]])
add24_1 = Add()([conv24_2, maxunpool24_1])
prelu24_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_77_Alpha')), shared_axes=[1, 2])(add24_1)
concat24_1 = Concatenate()([prelu24_3, prelu5_4])
# Block_25
conv25_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_73_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_73_Bias')))(concat24_1)
prelu25_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_84_Alpha')), shared_axes=[1, 2])(conv25_1)
conv25_2 = Conv2D(filters=8, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[8, 8],
kernel_initializer=Constant(np.load('weights/conv2d_74_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_74_Bias')))(prelu25_1)
prelu25_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_85_Alpha')), shared_axes=[1, 2])(conv25_2)
conv25_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_75_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_75_Bias')))(prelu25_2)
conv25_4 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_76_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_76_Bias')))(concat24_1)
add25_1 = Add()([conv25_3, conv25_4])
prelu25_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_86_Alpha')), shared_axes=[1, 2])(add25_1)
# Block_26
conv26_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_77_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_77_Bias')))(prelu25_3)
prelu26_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_87_Alpha')), shared_axes=[1, 2])(conv26_1)
convtransbias26_1 = Conv2DTranspose(filters=8, kernel_size=(3, 3), strides=(2, 2), padding='same',
kernel_initializer=Constant(np.load('weights/conv2d_transpose_1_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_transpose_1_Bias')))(prelu26_1)
prelu26_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_88_Alpha')), shared_axes=[1, 2])(convtransbias26_1)
conv26_2 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_78_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_78_Bias')))(prelu26_2)
conv26_3 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_79_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_79_Bias')))(prelu25_3)
maxunpool26_1 = MaxUnpooling2D(size=[2, 2])([conv26_3, maxpoolarg5_1[1]])
add26_1 = Add()([conv26_2, maxunpool26_1])
prelu26_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_89_Alpha')), shared_axes=[1, 2])(add26_1)
concat26_1 = Concatenate()([prelu26_3, prelu2_4])
# Block_27
conv27_1 = Conv2D(filters=4, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_80_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_80_Bias')))(concat26_1)
prelu27_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_90_Alpha')), shared_axes=[1, 2])(conv27_1)
conv27_2 = Conv2D(filters=4, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[8, 8],
kernel_initializer=Constant(np.load('weights/conv2d_81_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_81_Bias')))(prelu27_1)
prelu27_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_91_Alpha')), shared_axes=[1, 2])(conv27_2)
conv27_3 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_82_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_82_Bias')))(prelu27_2)
conv27_4 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_83_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_83_Bias')))(concat26_1)
add27_1 = Add()([conv27_3, conv27_4])
prelu27_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_92_Alpha')), shared_axes=[1, 2])(add27_1)
# Block_28
conv28_1 = Conv2D(filters=4, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_84_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_84_Bias')))(prelu27_3)
prelu28_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_93_Alpha')), shared_axes=[1, 2])(conv28_1)
convtransbias28_1 = Conv2DTranspose(filters=4, kernel_size=(3, 3), strides=(2, 2), padding='same',
kernel_initializer=Constant(np.load('weights/conv2d_transpose_2_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_transpose_2_Bias')))(prelu28_1)
prelu28_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_94_Alpha')), shared_axes=[1, 2])(convtransbias28_1)
conv28_2 = Conv2D(filters=32, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_85_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_85_Bias')))(prelu28_2)
conv28_3 = Conv2D(filters=32, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_86_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_86_Bias')))(prelu27_3)
maxunpool28_1 = MaxUnpooling2D(size=[2, 2])([conv28_3, maxpoolarg2_1[1]])
add28_1 = Add()([conv28_2, maxunpool28_1])
prelu28_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_95_Alpha')), shared_axes=[1, 2])(add28_1)
# Block_29
conv29_1 = Conv2D(filters=4, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_87_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_87_Bias')))(prelu28_3)
prelu29_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_96_Alpha')), shared_axes=[1, 2])(conv29_1)
conv29_2 = Conv2D(filters=4, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_88_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_88_Bias')))(prelu29_1)
prelu29_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_97_Alpha')), shared_axes=[1, 2])(conv29_2)
conv29_3 = Conv2D(filters=32, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_89_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_89_Bias')))(prelu29_2)
add29_1 = Add()([conv29_3, prelu28_3])
prelu29_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_98_Alpha')), shared_axes=[1, 2])(add29_1)
# Block_30
convtransbias30_1 = Conv2DTranspose(filters=8, kernel_size=(2, 2), strides=(2, 2), padding='same',
kernel_initializer=Constant(np.load('weights/conv2d_transpose_3_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_transpose_3_Bias')))(prelu29_3)
prelu30_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_99_Alpha')), shared_axes=[1, 2])(convtransbias30_1)
convtransbias30_2 = Conv2DTranspose(filters=2, kernel_size=(2, 2), strides=(2, 2), padding='same',
kernel_initializer=Constant(np.load('weights/conv2d_transpose_4_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_transpose_4_Bias')), name='conv2d_transpose_4')(prelu30_1)
# model = Model(inputs=inputs, outputs=[prelu2_4])
model = Model(inputs=inputs, outputs=[convtransbias30_2])
model.summary()
tf.saved_model.save(model, 'saved_model_{}x{}'.format(height, width))
model.save('hair_segmentation_{}x{}.h5'.format(height, width))
full_model = tf.function(lambda inputs: model(inputs))
full_model = full_model.get_concrete_function(inputs = (tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype)))
frozen_func = convert_variables_to_constants_v2(full_model, lower_control_flow=False)
frozen_func.graph.as_graph_def()
tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
logdir=".",
name="hair_segmentation_{}x{}_float32.pb".format(height, width),
as_text=False)
# No Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
tflite_model = converter.convert()
with open('hair_segmentation_{}x{}_float32.tflite'.format(height, width), 'wb') as w:
w.write(tflite_model)
print("tflite convert complete! - hair_segmentation_{}x{}_float32.tflite".format(height, width))
# Weight Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_model = converter.convert()
with open('hair_segmentation_{}x{}_weight_quant.tflite'.format(height, width), 'wb') as w:
w.write(tflite_model)
print("Weight Quantization complete! - hair_segmentation_{}x{}_weight_quant.tflite".format(height, width))
# def representative_dataset_gen():
# for image in raw_test_data:
# image = cv2.cvtColor(image, cv2.COLOR_RGB2RGBA)
# image = tf.image.resize(image, (height, width))
# image = image[np.newaxis,:,:,:]
# print('image.shape:', image.shape)
# yield [image]
# raw_test_data = np.load('calibration_data_img_person.npy', allow_pickle=True)
# # Integer Quantization - Input/Output=float32
# converter = tf.lite.TFLiteConverter.from_keras_model(model)
# converter.optimizations = [tf.lite.Optimize.DEFAULT]
# converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8, tf.lite.OpsSet.SELECT_TF_OPS]
# converter.representative_dataset = representative_dataset_gen
# tflite_quant_model = converter.convert()
# with open('hair_segmentation_{}x{}_integer_quant.tflite'.format(height, width), 'wb') as w:
# w.write(tflite_quant_model)
# print("Integer Quantization complete! - hair_segmentation_{}x{}_integer_quant.tflite".format(height, width))
# # Full Integer Quantization - Input/Output=int8
# converter = tf.lite.TFLiteConverter.from_keras_model(model)
# converter.optimizations = [tf.lite.Optimize.DEFAULT]
# converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8, tf.lite.OpsSet.SELECT_TF_OPS]
# converter.inference_input_type = tf.uint8
# converter.inference_output_type = tf.uint8
# converter.representative_dataset = representative_dataset_gen
# tflite_quant_model = converter.convert()
# with open('hair_segmentation_{}x{}_full_integer_quant.tflite'.format(height, width), 'wb') as w:
# w.write(tflite_quant_model)
# print("Full Integer Quantization complete! - hair_segmentation_{}x{}_full_integer_quant.tflite".format(height, width))
# # Float16 Quantization - Input/Output=float32
# converter = tf.lite.TFLiteConverter.from_keras_model(model)
# converter.optimizations = [tf.lite.Optimize.DEFAULT]
# converter.target_spec.supported_types = [tf.float16, tf.lite.OpsSet.SELECT_TF_OPS]
# tflite_quant_model = converter.convert()
# with open('hair_segmentation_{}x{}_float16_quant.tflite'.format(height, width), 'wb') as w:
# w.write(tflite_quant_model)
# print("Float16 Quantization complete! - hair_segmentation_{}x{}_float16_quant.tflite".format(height, width))
# # EdgeTPU
# import subprocess
# result = subprocess.check_output(["edgetpu_compiler", "-s", "hair_segmentation_{}x{}_full_integer_quant.tflite".format(height, width)])
# print(result)
| [
"tensorflow.python.keras.backend.concatenate",
"tensorflow.shape",
"tensorflow.raw_ops.MaxPoolWithArgmax",
"tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2",
"tensorflow.cast",
"tensorflow.python.keras.utils.conv_utils.normalize_tuple",
"tensorflow.size",
"tensorflow... | [((6431, 6490), 'tensorflow.keras.Input', 'Input', ([], {'shape': '(height, width, 4)', 'batch_size': '(1)', 'name': '"""input"""'}), "(shape=(height, width, 4), batch_size=1, name='input')\n", (6436, 6490), False, 'from tensorflow.keras import Model, Input\n'), ((9183, 9289), 'tensorflow.raw_ops.MaxPoolWithArgmax', 'tf.raw_ops.MaxPoolWithArgmax', ([], {'input': 'prelu1_2', 'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(input=prelu1_2, ksize=[1, 2, 2, 1], strides=[1,\n 2, 2, 1], padding='SAME')\n", (9211, 9289), True, 'import tensorflow as tf\n'), ((15722, 15828), 'tensorflow.raw_ops.MaxPoolWithArgmax', 'tf.raw_ops.MaxPoolWithArgmax', ([], {'input': 'prelu4_4', 'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(input=prelu4_4, ksize=[1, 2, 2, 1], strides=[1,\n 2, 2, 1], padding='SAME')\n", (15750, 15828), True, 'import tensorflow as tf\n'), ((26362, 26468), 'tensorflow.raw_ops.MaxPoolWithArgmax', 'tf.raw_ops.MaxPoolWithArgmax', ([], {'input': 'prelu9_4', 'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(input=prelu9_4, ksize=[1, 2, 2, 1], strides=[1,\n 2, 2, 1], padding='SAME')\n", (26390, 26468), True, 'import tensorflow as tf\n'), ((57466, 57515), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': '[convtransbias30_2]'}), '(inputs=inputs, outputs=[convtransbias30_2])\n', (57471, 57515), False, 'from tensorflow.keras import Model, Input\n'), ((57855, 57926), 'tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2', 'convert_variables_to_constants_v2', (['full_model'], {'lower_control_flow': '(False)'}), '(full_model, lower_control_flow=False)\n', (57888, 57926), False, 'from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2\n'), ((58222, 58269), 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['model'], {}), '(model)\n', (58262, 58269), True, 'import tensorflow as tf\n'), ((58674, 58721), 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['model'], {}), '(model)\n', (58714, 58721), True, 'import tensorflow as tf\n'), ((3511, 3590), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['input'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(input, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (3525, 3590), True, 'import tensorflow as tf\n'), ((3814, 3850), 'tensorflow.zeros', 'tf.zeros', (['mask_shape'], {'dtype': 'tf.int64'}), '(mask_shape, dtype=tf.int64)\n', (3822, 3850), True, 'import tensorflow as tf\n'), ((9653, 9658), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (9656, 9658), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((11667, 11672), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (11670, 11672), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((13689, 13694), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (13692, 13694), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((16194, 16199), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (16197, 16199), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((18219, 18224), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (18222, 18224), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((20249, 20254), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (20252, 20254), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((22279, 22284), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (22282, 22284), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((24309, 24314), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (24312, 24314), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((26531, 26536), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (26534, 26536), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((28588, 28593), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (28591, 28593), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((29870, 29875), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (29873, 29875), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((31478, 31483), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (31481, 31483), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((32760, 32765), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (32763, 32765), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((34809, 34814), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (34812, 34814), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((36091, 36096), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (36094, 36096), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((38140, 38145), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (38143, 38145), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((39422, 39427), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (39425, 39427), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((41030, 41035), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (41033, 41035), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((42312, 42317), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (42315, 42317), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((44361, 44366), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (44364, 44366), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((45643, 45648), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (45646, 45648), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((46923, 46928), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (46926, 46928), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((48593, 48598), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (48596, 48598), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((48752, 48765), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (48763, 48765), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((50228, 50233), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (50231, 50233), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((51898, 51903), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (51901, 51903), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((52057, 52070), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (52068, 52070), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((53531, 53536), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (53534, 53536), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((55201, 55206), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (55204, 55206), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((56484, 56489), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (56487, 56489), False, 'from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer\n'), ((4648, 4691), 'tensorflow.python.keras.utils.conv_utils.normalize_tuple', 'conv_utils.normalize_tuple', (['size', '(2)', '"""size"""'], {}), "(size, 2, 'size')\n", (4674, 4691), False, 'from tensorflow.python.keras.utils import conv_utils\n'), ((4809, 4831), 'tensorflow.cast', 'tf.cast', (['mask', '"""int32"""'], {}), "(mask, 'int32')\n", (4816, 4831), True, 'import tensorflow as tf\n'), ((4854, 4889), 'tensorflow.shape', 'tf.shape', (['updates'], {'out_type': '"""int32"""'}), "(updates, out_type='int32')\n", (4862, 4889), True, 'import tensorflow as tf\n'), ((5183, 5215), 'tensorflow.python.keras.backend.ones_like', 'K.ones_like', (['mask'], {'dtype': '"""int32"""'}), "(mask, dtype='int32')\n", (5194, 5215), True, 'from tensorflow.python.keras import backend as K\n'), ((5238, 5294), 'tensorflow.python.keras.backend.concatenate', 'K.concatenate', (['[[input_shape[0]], [1], [1], [1]]'], {'axis': '(0)'}), '([[input_shape[0]], [1], [1], [1]], axis=0)\n', (5251, 5294), True, 'from tensorflow.python.keras import backend as K\n'), ((5564, 5604), 'tensorflow.range', 'tf.range', (['output_shape[3]'], {'dtype': '"""int32"""'}), "(output_shape[3], dtype='int32')\n", (5572, 5604), True, 'import tensorflow as tf\n'), ((5748, 5764), 'tensorflow.size', 'tf.size', (['updates'], {}), '(updates)\n', (5755, 5764), True, 'import tensorflow as tf\n'), ((5865, 5899), 'tensorflow.python.keras.backend.reshape', 'K.reshape', (['updates', '[updates_size]'], {}), '(updates, [updates_size])\n', (5874, 5899), True, 'from tensorflow.python.keras import backend as K\n'), ((5914, 5958), 'tensorflow.scatter_nd', 'tf.scatter_nd', (['indices', 'values', 'output_shape'], {}), '(indices, values, output_shape)\n', (5927, 5958), True, 'import tensorflow as tf\n'), ((57779, 57838), 'tensorflow.TensorSpec', 'tf.TensorSpec', (['model.inputs[0].shape', 'model.inputs[0].dtype'], {}), '(model.inputs[0].shape, model.inputs[0].dtype)\n', (57792, 57838), True, 'import tensorflow as tf\n'), ((5327, 5367), 'tensorflow.range', 'tf.range', (['output_shape[0]'], {'dtype': '"""int32"""'}), "(output_shape[0], dtype='int32')\n", (5335, 5367), True, 'import tensorflow as tf\n'), ((4088, 4119), 'tensorflow.gather_nd', 'tf.gather_nd', (['input', 'in_indices'], {}), '(input, in_indices)\n', (4100, 4119), True, 'import tensorflow as tf\n'), ((4145, 4169), 'tensorflow.argmax', 'tf.argmax', (['slice'], {'axis': '(0)'}), '(slice, axis=0)\n', (4154, 4169), True, 'import tensorflow as tf\n'), ((4284, 4369), 'tensorflow.SparseTensor', 'tf.SparseTensor', ([], {'indices': 'indices_location', 'values': 'argmax', 'dense_shape': 'mask_shape'}), '(indices=indices_location, values=argmax, dense_shape=mask_shape\n )\n', (4299, 4369), True, 'import tensorflow as tf\n'), ((4399, 4455), 'tensorflow.compat.v1.sparse_add', 'tf.compat.v1.sparse_add', (['pooling_indices', 'sparse_indices'], {}), '(pooling_indices, sparse_indices)\n', (4422, 4455), True, 'import tensorflow as tf\n'), ((5805, 5826), 'tensorflow.python.keras.backend.stack', 'K.stack', (['[b, y, x, f]'], {}), '([b, y, x, f])\n', (5812, 5826), True, 'from tensorflow.python.keras import backend as K\n'), ((6748, 6778), 'numpy.load', 'np.load', (['"""weights/conv2d_Bias"""'], {}), "('weights/conv2d_Bias')\n", (6755, 6778), True, 'import numpy as np\n'), ((6833, 6865), 'numpy.load', 'np.load', (['"""weights/p_re_lu_Alpha"""'], {}), "('weights/p_re_lu_Alpha')\n", (6840, 6865), True, 'import numpy as np\n'), ((7146, 7178), 'numpy.load', 'np.load', (['"""weights/conv2d_1_Bias"""'], {}), "('weights/conv2d_1_Bias')\n", (7153, 7178), True, 'import numpy as np\n'), ((7235, 7269), 'numpy.load', 'np.load', (['"""weights/p_re_lu_1_Alpha"""'], {}), "('weights/p_re_lu_1_Alpha')\n", (7242, 7269), True, 'import numpy as np\n'), ((7562, 7594), 'numpy.load', 'np.load', (['"""weights/conv2d_2_Bias"""'], {}), "('weights/conv2d_2_Bias')\n", (7569, 7594), True, 'import numpy as np\n'), ((7651, 7685), 'numpy.load', 'np.load', (['"""weights/p_re_lu_2_Alpha"""'], {}), "('weights/p_re_lu_2_Alpha')\n", (7658, 7685), True, 'import numpy as np\n'), ((7890, 7932), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_Kernel"""'], {}), "('weights/depthwise_conv2d_Kernel')\n", (7897, 7932), True, 'import numpy as np\n'), ((7978, 8018), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_Bias"""'], {}), "('weights/depthwise_conv2d_Bias')\n", (7985, 8018), True, 'import numpy as np\n'), ((8280, 8312), 'numpy.load', 'np.load', (['"""weights/conv2d_3_Bias"""'], {}), "('weights/conv2d_3_Bias')\n", (8287, 8312), True, 'import numpy as np\n'), ((8373, 8407), 'numpy.load', 'np.load', (['"""weights/p_re_lu_3_Alpha"""'], {}), "('weights/p_re_lu_3_Alpha')\n", (8380, 8407), True, 'import numpy as np\n'), ((8612, 8656), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_1_Kernel"""'], {}), "('weights/depthwise_conv2d_1_Kernel')\n", (8619, 8656), True, 'import numpy as np\n'), ((8702, 8744), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_1_Bias"""'], {}), "('weights/depthwise_conv2d_1_Bias')\n", (8709, 8744), True, 'import numpy as np\n'), ((8802, 8836), 'numpy.load', 'np.load', (['"""weights/p_re_lu_4_Alpha"""'], {}), "('weights/p_re_lu_4_Alpha')\n", (8809, 8836), True, 'import numpy as np\n'), ((9121, 9153), 'numpy.load', 'np.load', (['"""weights/conv2d_4_Bias"""'], {}), "('weights/conv2d_4_Bias')\n", (9128, 9153), True, 'import numpy as np\n'), ((9590, 9622), 'numpy.load', 'np.load', (['"""weights/conv2d_5_Bias"""'], {}), "('weights/conv2d_5_Bias')\n", (9597, 9622), True, 'import numpy as np\n'), ((9723, 9757), 'numpy.load', 'np.load', (['"""weights/p_re_lu_5_Alpha"""'], {}), "('weights/p_re_lu_5_Alpha')\n", (9730, 9757), True, 'import numpy as np\n'), ((10049, 10081), 'numpy.load', 'np.load', (['"""weights/conv2d_6_Bias"""'], {}), "('weights/conv2d_6_Bias')\n", (10056, 10081), True, 'import numpy as np\n'), ((10138, 10172), 'numpy.load', 'np.load', (['"""weights/p_re_lu_6_Alpha"""'], {}), "('weights/p_re_lu_6_Alpha')\n", (10145, 10172), True, 'import numpy as np\n'), ((10377, 10421), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_2_Kernel"""'], {}), "('weights/depthwise_conv2d_2_Kernel')\n", (10384, 10421), True, 'import numpy as np\n'), ((10467, 10509), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_2_Bias"""'], {}), "('weights/depthwise_conv2d_2_Bias')\n", (10474, 10509), True, 'import numpy as np\n'), ((10771, 10803), 'numpy.load', 'np.load', (['"""weights/conv2d_7_Bias"""'], {}), "('weights/conv2d_7_Bias')\n", (10778, 10803), True, 'import numpy as np\n'), ((10864, 10898), 'numpy.load', 'np.load', (['"""weights/p_re_lu_7_Alpha"""'], {}), "('weights/p_re_lu_7_Alpha')\n", (10871, 10898), True, 'import numpy as np\n'), ((11103, 11147), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_3_Kernel"""'], {}), "('weights/depthwise_conv2d_3_Kernel')\n", (11110, 11147), True, 'import numpy as np\n'), ((11193, 11235), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_3_Bias"""'], {}), "('weights/depthwise_conv2d_3_Bias')\n", (11200, 11235), True, 'import numpy as np\n'), ((11293, 11327), 'numpy.load', 'np.load', (['"""weights/p_re_lu_8_Alpha"""'], {}), "('weights/p_re_lu_8_Alpha')\n", (11300, 11327), True, 'import numpy as np\n'), ((11612, 11644), 'numpy.load', 'np.load', (['"""weights/conv2d_8_Bias"""'], {}), "('weights/conv2d_8_Bias')\n", (11619, 11644), True, 'import numpy as np\n'), ((11738, 11772), 'numpy.load', 'np.load', (['"""weights/p_re_lu_9_Alpha"""'], {}), "('weights/p_re_lu_9_Alpha')\n", (11745, 11772), True, 'import numpy as np\n'), ((12064, 12096), 'numpy.load', 'np.load', (['"""weights/conv2d_9_Bias"""'], {}), "('weights/conv2d_9_Bias')\n", (12071, 12096), True, 'import numpy as np\n'), ((12153, 12188), 'numpy.load', 'np.load', (['"""weights/p_re_lu_10_Alpha"""'], {}), "('weights/p_re_lu_10_Alpha')\n", (12160, 12188), True, 'import numpy as np\n'), ((12393, 12437), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_4_Kernel"""'], {}), "('weights/depthwise_conv2d_4_Kernel')\n", (12400, 12437), True, 'import numpy as np\n'), ((12483, 12525), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_4_Bias"""'], {}), "('weights/depthwise_conv2d_4_Bias')\n", (12490, 12525), True, 'import numpy as np\n'), ((12788, 12821), 'numpy.load', 'np.load', (['"""weights/conv2d_10_Bias"""'], {}), "('weights/conv2d_10_Bias')\n", (12795, 12821), True, 'import numpy as np\n'), ((12882, 12917), 'numpy.load', 'np.load', (['"""weights/p_re_lu_11_Alpha"""'], {}), "('weights/p_re_lu_11_Alpha')\n", (12889, 12917), True, 'import numpy as np\n'), ((13122, 13166), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_5_Kernel"""'], {}), "('weights/depthwise_conv2d_5_Kernel')\n", (13129, 13166), True, 'import numpy as np\n'), ((13212, 13254), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_5_Bias"""'], {}), "('weights/depthwise_conv2d_5_Bias')\n", (13219, 13254), True, 'import numpy as np\n'), ((13312, 13347), 'numpy.load', 'np.load', (['"""weights/p_re_lu_12_Alpha"""'], {}), "('weights/p_re_lu_12_Alpha')\n", (13319, 13347), True, 'import numpy as np\n'), ((13633, 13666), 'numpy.load', 'np.load', (['"""weights/conv2d_11_Bias"""'], {}), "('weights/conv2d_11_Bias')\n", (13640, 13666), True, 'import numpy as np\n'), ((13760, 13795), 'numpy.load', 'np.load', (['"""weights/p_re_lu_13_Alpha"""'], {}), "('weights/p_re_lu_13_Alpha')\n", (13767, 13795), True, 'import numpy as np\n'), ((14088, 14121), 'numpy.load', 'np.load', (['"""weights/conv2d_12_Bias"""'], {}), "('weights/conv2d_12_Bias')\n", (14095, 14121), True, 'import numpy as np\n'), ((14178, 14213), 'numpy.load', 'np.load', (['"""weights/p_re_lu_14_Alpha"""'], {}), "('weights/p_re_lu_14_Alpha')\n", (14185, 14213), True, 'import numpy as np\n'), ((14418, 14462), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_6_Kernel"""'], {}), "('weights/depthwise_conv2d_6_Kernel')\n", (14425, 14462), True, 'import numpy as np\n'), ((14508, 14550), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_6_Bias"""'], {}), "('weights/depthwise_conv2d_6_Bias')\n", (14515, 14550), True, 'import numpy as np\n'), ((14813, 14846), 'numpy.load', 'np.load', (['"""weights/conv2d_13_Bias"""'], {}), "('weights/conv2d_13_Bias')\n", (14820, 14846), True, 'import numpy as np\n'), ((14907, 14942), 'numpy.load', 'np.load', (['"""weights/p_re_lu_15_Alpha"""'], {}), "('weights/p_re_lu_15_Alpha')\n", (14914, 14942), True, 'import numpy as np\n'), ((15147, 15191), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_7_Kernel"""'], {}), "('weights/depthwise_conv2d_7_Kernel')\n", (15154, 15191), True, 'import numpy as np\n'), ((15237, 15279), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_7_Bias"""'], {}), "('weights/depthwise_conv2d_7_Bias')\n", (15244, 15279), True, 'import numpy as np\n'), ((15337, 15372), 'numpy.load', 'np.load', (['"""weights/p_re_lu_16_Alpha"""'], {}), "('weights/p_re_lu_16_Alpha')\n", (15344, 15372), True, 'import numpy as np\n'), ((15659, 15692), 'numpy.load', 'np.load', (['"""weights/conv2d_14_Bias"""'], {}), "('weights/conv2d_14_Bias')\n", (15666, 15692), True, 'import numpy as np\n'), ((16130, 16163), 'numpy.load', 'np.load', (['"""weights/conv2d_15_Bias"""'], {}), "('weights/conv2d_15_Bias')\n", (16137, 16163), True, 'import numpy as np\n'), ((16264, 16299), 'numpy.load', 'np.load', (['"""weights/p_re_lu_17_Alpha"""'], {}), "('weights/p_re_lu_17_Alpha')\n", (16271, 16299), True, 'import numpy as np\n'), ((16592, 16625), 'numpy.load', 'np.load', (['"""weights/conv2d_16_Bias"""'], {}), "('weights/conv2d_16_Bias')\n", (16599, 16625), True, 'import numpy as np\n'), ((16682, 16717), 'numpy.load', 'np.load', (['"""weights/p_re_lu_18_Alpha"""'], {}), "('weights/p_re_lu_18_Alpha')\n", (16689, 16717), True, 'import numpy as np\n'), ((16922, 16966), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_8_Kernel"""'], {}), "('weights/depthwise_conv2d_8_Kernel')\n", (16929, 16966), True, 'import numpy as np\n'), ((17012, 17054), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_8_Bias"""'], {}), "('weights/depthwise_conv2d_8_Bias')\n", (17019, 17054), True, 'import numpy as np\n'), ((17317, 17350), 'numpy.load', 'np.load', (['"""weights/conv2d_17_Bias"""'], {}), "('weights/conv2d_17_Bias')\n", (17324, 17350), True, 'import numpy as np\n'), ((17411, 17446), 'numpy.load', 'np.load', (['"""weights/p_re_lu_19_Alpha"""'], {}), "('weights/p_re_lu_19_Alpha')\n", (17418, 17446), True, 'import numpy as np\n'), ((17651, 17695), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_9_Kernel"""'], {}), "('weights/depthwise_conv2d_9_Kernel')\n", (17658, 17695), True, 'import numpy as np\n'), ((17741, 17783), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_9_Bias"""'], {}), "('weights/depthwise_conv2d_9_Bias')\n", (17748, 17783), True, 'import numpy as np\n'), ((17841, 17876), 'numpy.load', 'np.load', (['"""weights/p_re_lu_20_Alpha"""'], {}), "('weights/p_re_lu_20_Alpha')\n", (17848, 17876), True, 'import numpy as np\n'), ((18163, 18196), 'numpy.load', 'np.load', (['"""weights/conv2d_18_Bias"""'], {}), "('weights/conv2d_18_Bias')\n", (18170, 18196), True, 'import numpy as np\n'), ((18290, 18325), 'numpy.load', 'np.load', (['"""weights/p_re_lu_21_Alpha"""'], {}), "('weights/p_re_lu_21_Alpha')\n", (18297, 18325), True, 'import numpy as np\n'), ((18618, 18651), 'numpy.load', 'np.load', (['"""weights/conv2d_19_Bias"""'], {}), "('weights/conv2d_19_Bias')\n", (18625, 18651), True, 'import numpy as np\n'), ((18708, 18743), 'numpy.load', 'np.load', (['"""weights/p_re_lu_22_Alpha"""'], {}), "('weights/p_re_lu_22_Alpha')\n", (18715, 18743), True, 'import numpy as np\n'), ((18948, 18993), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_10_Kernel"""'], {}), "('weights/depthwise_conv2d_10_Kernel')\n", (18955, 18993), True, 'import numpy as np\n'), ((19039, 19082), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_10_Bias"""'], {}), "('weights/depthwise_conv2d_10_Bias')\n", (19046, 19082), True, 'import numpy as np\n'), ((19345, 19378), 'numpy.load', 'np.load', (['"""weights/conv2d_20_Bias"""'], {}), "('weights/conv2d_20_Bias')\n", (19352, 19378), True, 'import numpy as np\n'), ((19439, 19474), 'numpy.load', 'np.load', (['"""weights/p_re_lu_23_Alpha"""'], {}), "('weights/p_re_lu_23_Alpha')\n", (19446, 19474), True, 'import numpy as np\n'), ((19679, 19724), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_11_Kernel"""'], {}), "('weights/depthwise_conv2d_11_Kernel')\n", (19686, 19724), True, 'import numpy as np\n'), ((19770, 19813), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_11_Bias"""'], {}), "('weights/depthwise_conv2d_11_Bias')\n", (19777, 19813), True, 'import numpy as np\n'), ((19871, 19906), 'numpy.load', 'np.load', (['"""weights/p_re_lu_24_Alpha"""'], {}), "('weights/p_re_lu_24_Alpha')\n", (19878, 19906), True, 'import numpy as np\n'), ((20193, 20226), 'numpy.load', 'np.load', (['"""weights/conv2d_21_Bias"""'], {}), "('weights/conv2d_21_Bias')\n", (20200, 20226), True, 'import numpy as np\n'), ((20320, 20355), 'numpy.load', 'np.load', (['"""weights/p_re_lu_25_Alpha"""'], {}), "('weights/p_re_lu_25_Alpha')\n", (20327, 20355), True, 'import numpy as np\n'), ((20648, 20681), 'numpy.load', 'np.load', (['"""weights/conv2d_22_Bias"""'], {}), "('weights/conv2d_22_Bias')\n", (20655, 20681), True, 'import numpy as np\n'), ((20738, 20773), 'numpy.load', 'np.load', (['"""weights/p_re_lu_26_Alpha"""'], {}), "('weights/p_re_lu_26_Alpha')\n", (20745, 20773), True, 'import numpy as np\n'), ((20978, 21023), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_12_Kernel"""'], {}), "('weights/depthwise_conv2d_12_Kernel')\n", (20985, 21023), True, 'import numpy as np\n'), ((21069, 21112), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_12_Bias"""'], {}), "('weights/depthwise_conv2d_12_Bias')\n", (21076, 21112), True, 'import numpy as np\n'), ((21375, 21408), 'numpy.load', 'np.load', (['"""weights/conv2d_23_Bias"""'], {}), "('weights/conv2d_23_Bias')\n", (21382, 21408), True, 'import numpy as np\n'), ((21469, 21504), 'numpy.load', 'np.load', (['"""weights/p_re_lu_27_Alpha"""'], {}), "('weights/p_re_lu_27_Alpha')\n", (21476, 21504), True, 'import numpy as np\n'), ((21709, 21754), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_13_Kernel"""'], {}), "('weights/depthwise_conv2d_13_Kernel')\n", (21716, 21754), True, 'import numpy as np\n'), ((21800, 21843), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_13_Bias"""'], {}), "('weights/depthwise_conv2d_13_Bias')\n", (21807, 21843), True, 'import numpy as np\n'), ((21901, 21936), 'numpy.load', 'np.load', (['"""weights/p_re_lu_28_Alpha"""'], {}), "('weights/p_re_lu_28_Alpha')\n", (21908, 21936), True, 'import numpy as np\n'), ((22223, 22256), 'numpy.load', 'np.load', (['"""weights/conv2d_24_Bias"""'], {}), "('weights/conv2d_24_Bias')\n", (22230, 22256), True, 'import numpy as np\n'), ((22350, 22385), 'numpy.load', 'np.load', (['"""weights/p_re_lu_29_Alpha"""'], {}), "('weights/p_re_lu_29_Alpha')\n", (22357, 22385), True, 'import numpy as np\n'), ((22678, 22711), 'numpy.load', 'np.load', (['"""weights/conv2d_25_Bias"""'], {}), "('weights/conv2d_25_Bias')\n", (22685, 22711), True, 'import numpy as np\n'), ((22768, 22803), 'numpy.load', 'np.load', (['"""weights/p_re_lu_30_Alpha"""'], {}), "('weights/p_re_lu_30_Alpha')\n", (22775, 22803), True, 'import numpy as np\n'), ((23008, 23053), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_14_Kernel"""'], {}), "('weights/depthwise_conv2d_14_Kernel')\n", (23015, 23053), True, 'import numpy as np\n'), ((23099, 23142), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_14_Bias"""'], {}), "('weights/depthwise_conv2d_14_Bias')\n", (23106, 23142), True, 'import numpy as np\n'), ((23405, 23438), 'numpy.load', 'np.load', (['"""weights/conv2d_26_Bias"""'], {}), "('weights/conv2d_26_Bias')\n", (23412, 23438), True, 'import numpy as np\n'), ((23499, 23534), 'numpy.load', 'np.load', (['"""weights/p_re_lu_31_Alpha"""'], {}), "('weights/p_re_lu_31_Alpha')\n", (23506, 23534), True, 'import numpy as np\n'), ((23739, 23784), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_15_Kernel"""'], {}), "('weights/depthwise_conv2d_15_Kernel')\n", (23746, 23784), True, 'import numpy as np\n'), ((23830, 23873), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_15_Bias"""'], {}), "('weights/depthwise_conv2d_15_Bias')\n", (23837, 23873), True, 'import numpy as np\n'), ((23931, 23966), 'numpy.load', 'np.load', (['"""weights/p_re_lu_32_Alpha"""'], {}), "('weights/p_re_lu_32_Alpha')\n", (23938, 23966), True, 'import numpy as np\n'), ((24253, 24286), 'numpy.load', 'np.load', (['"""weights/conv2d_27_Bias"""'], {}), "('weights/conv2d_27_Bias')\n", (24260, 24286), True, 'import numpy as np\n'), ((24380, 24415), 'numpy.load', 'np.load', (['"""weights/p_re_lu_33_Alpha"""'], {}), "('weights/p_re_lu_33_Alpha')\n", (24387, 24415), True, 'import numpy as np\n'), ((24709, 24742), 'numpy.load', 'np.load', (['"""weights/conv2d_28_Bias"""'], {}), "('weights/conv2d_28_Bias')\n", (24716, 24742), True, 'import numpy as np\n'), ((24800, 24835), 'numpy.load', 'np.load', (['"""weights/p_re_lu_34_Alpha"""'], {}), "('weights/p_re_lu_34_Alpha')\n", (24807, 24835), True, 'import numpy as np\n'), ((25042, 25087), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_16_Kernel"""'], {}), "('weights/depthwise_conv2d_16_Kernel')\n", (25049, 25087), True, 'import numpy as np\n'), ((25133, 25176), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_16_Bias"""'], {}), "('weights/depthwise_conv2d_16_Bias')\n", (25140, 25176), True, 'import numpy as np\n'), ((25441, 25474), 'numpy.load', 'np.load', (['"""weights/conv2d_29_Bias"""'], {}), "('weights/conv2d_29_Bias')\n", (25448, 25474), True, 'import numpy as np\n'), ((25537, 25572), 'numpy.load', 'np.load', (['"""weights/p_re_lu_35_Alpha"""'], {}), "('weights/p_re_lu_35_Alpha')\n", (25544, 25572), True, 'import numpy as np\n'), ((25779, 25824), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_17_Kernel"""'], {}), "('weights/depthwise_conv2d_17_Kernel')\n", (25786, 25824), True, 'import numpy as np\n'), ((25870, 25913), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_17_Bias"""'], {}), "('weights/depthwise_conv2d_17_Bias')\n", (25877, 25913), True, 'import numpy as np\n'), ((25973, 26008), 'numpy.load', 'np.load', (['"""weights/p_re_lu_36_Alpha"""'], {}), "('weights/p_re_lu_36_Alpha')\n", (25980, 26008), True, 'import numpy as np\n'), ((26297, 26330), 'numpy.load', 'np.load', (['"""weights/conv2d_30_Bias"""'], {}), "('weights/conv2d_30_Bias')\n", (26304, 26330), True, 'import numpy as np\n'), ((26613, 26648), 'numpy.load', 'np.load', (['"""weights/p_re_lu_37_Alpha"""'], {}), "('weights/p_re_lu_37_Alpha')\n", (26620, 26648), True, 'import numpy as np\n'), ((26942, 26975), 'numpy.load', 'np.load', (['"""weights/conv2d_31_Bias"""'], {}), "('weights/conv2d_31_Bias')\n", (26949, 26975), True, 'import numpy as np\n'), ((27034, 27069), 'numpy.load', 'np.load', (['"""weights/p_re_lu_38_Alpha"""'], {}), "('weights/p_re_lu_38_Alpha')\n", (27041, 27069), True, 'import numpy as np\n'), ((27276, 27321), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_18_Kernel"""'], {}), "('weights/depthwise_conv2d_18_Kernel')\n", (27283, 27321), True, 'import numpy as np\n'), ((27367, 27410), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_18_Bias"""'], {}), "('weights/depthwise_conv2d_18_Bias')\n", (27374, 27410), True, 'import numpy as np\n'), ((27674, 27707), 'numpy.load', 'np.load', (['"""weights/conv2d_32_Bias"""'], {}), "('weights/conv2d_32_Bias')\n", (27681, 27707), True, 'import numpy as np\n'), ((27770, 27805), 'numpy.load', 'np.load', (['"""weights/p_re_lu_39_Alpha"""'], {}), "('weights/p_re_lu_39_Alpha')\n", (27777, 27805), True, 'import numpy as np\n'), ((28012, 28057), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_19_Kernel"""'], {}), "('weights/depthwise_conv2d_19_Kernel')\n", (28019, 28057), True, 'import numpy as np\n'), ((28103, 28146), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_19_Bias"""'], {}), "('weights/depthwise_conv2d_19_Bias')\n", (28110, 28146), True, 'import numpy as np\n'), ((28206, 28241), 'numpy.load', 'np.load', (['"""weights/p_re_lu_40_Alpha"""'], {}), "('weights/p_re_lu_40_Alpha')\n", (28213, 28241), True, 'import numpy as np\n'), ((28530, 28563), 'numpy.load', 'np.load', (['"""weights/conv2d_33_Bias"""'], {}), "('weights/conv2d_33_Bias')\n", (28537, 28563), True, 'import numpy as np\n'), ((28662, 28697), 'numpy.load', 'np.load', (['"""weights/p_re_lu_41_Alpha"""'], {}), "('weights/p_re_lu_41_Alpha')\n", (28669, 28697), True, 'import numpy as np\n'), ((28991, 29024), 'numpy.load', 'np.load', (['"""weights/conv2d_34_Bias"""'], {}), "('weights/conv2d_34_Bias')\n", (28998, 29024), True, 'import numpy as np\n'), ((29083, 29118), 'numpy.load', 'np.load', (['"""weights/p_re_lu_42_Alpha"""'], {}), "('weights/p_re_lu_42_Alpha')\n", (29090, 29118), True, 'import numpy as np\n'), ((29400, 29433), 'numpy.load', 'np.load', (['"""weights/conv2d_35_Bias"""'], {}), "('weights/conv2d_35_Bias')\n", (29407, 29433), True, 'import numpy as np\n'), ((29492, 29527), 'numpy.load', 'np.load', (['"""weights/p_re_lu_43_Alpha"""'], {}), "('weights/p_re_lu_43_Alpha')\n", (29499, 29527), True, 'import numpy as np\n'), ((29812, 29845), 'numpy.load', 'np.load', (['"""weights/conv2d_36_Bias"""'], {}), "('weights/conv2d_36_Bias')\n", (29819, 29845), True, 'import numpy as np\n'), ((29944, 29979), 'numpy.load', 'np.load', (['"""weights/p_re_lu_44_Alpha"""'], {}), "('weights/p_re_lu_44_Alpha')\n", (29951, 29979), True, 'import numpy as np\n'), ((30273, 30306), 'numpy.load', 'np.load', (['"""weights/conv2d_37_Bias"""'], {}), "('weights/conv2d_37_Bias')\n", (30280, 30306), True, 'import numpy as np\n'), ((30365, 30400), 'numpy.load', 'np.load', (['"""weights/p_re_lu_45_Alpha"""'], {}), "('weights/p_re_lu_45_Alpha')\n", (30372, 30400), True, 'import numpy as np\n'), ((30607, 30652), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_20_Kernel"""'], {}), "('weights/depthwise_conv2d_20_Kernel')\n", (30614, 30652), True, 'import numpy as np\n'), ((30698, 30741), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_20_Bias"""'], {}), "('weights/depthwise_conv2d_20_Bias')\n", (30705, 30741), True, 'import numpy as np\n'), ((31005, 31038), 'numpy.load', 'np.load', (['"""weights/conv2d_38_Bias"""'], {}), "('weights/conv2d_38_Bias')\n", (31012, 31038), True, 'import numpy as np\n'), ((31101, 31136), 'numpy.load', 'np.load', (['"""weights/p_re_lu_46_Alpha"""'], {}), "('weights/p_re_lu_46_Alpha')\n", (31108, 31136), True, 'import numpy as np\n'), ((31420, 31453), 'numpy.load', 'np.load', (['"""weights/conv2d_39_Bias"""'], {}), "('weights/conv2d_39_Bias')\n", (31427, 31453), True, 'import numpy as np\n'), ((31552, 31587), 'numpy.load', 'np.load', (['"""weights/p_re_lu_47_Alpha"""'], {}), "('weights/p_re_lu_47_Alpha')\n", (31559, 31587), True, 'import numpy as np\n'), ((31881, 31914), 'numpy.load', 'np.load', (['"""weights/conv2d_40_Bias"""'], {}), "('weights/conv2d_40_Bias')\n", (31888, 31914), True, 'import numpy as np\n'), ((31973, 32008), 'numpy.load', 'np.load', (['"""weights/p_re_lu_48_Alpha"""'], {}), "('weights/p_re_lu_48_Alpha')\n", (31980, 32008), True, 'import numpy as np\n'), ((32290, 32323), 'numpy.load', 'np.load', (['"""weights/conv2d_41_Bias"""'], {}), "('weights/conv2d_41_Bias')\n", (32297, 32323), True, 'import numpy as np\n'), ((32382, 32417), 'numpy.load', 'np.load', (['"""weights/p_re_lu_49_Alpha"""'], {}), "('weights/p_re_lu_49_Alpha')\n", (32389, 32417), True, 'import numpy as np\n'), ((32702, 32735), 'numpy.load', 'np.load', (['"""weights/conv2d_42_Bias"""'], {}), "('weights/conv2d_42_Bias')\n", (32709, 32735), True, 'import numpy as np\n'), ((32834, 32869), 'numpy.load', 'np.load', (['"""weights/p_re_lu_50_Alpha"""'], {}), "('weights/p_re_lu_50_Alpha')\n", (32841, 32869), True, 'import numpy as np\n'), ((33163, 33196), 'numpy.load', 'np.load', (['"""weights/conv2d_43_Bias"""'], {}), "('weights/conv2d_43_Bias')\n", (33170, 33196), True, 'import numpy as np\n'), ((33255, 33290), 'numpy.load', 'np.load', (['"""weights/p_re_lu_51_Alpha"""'], {}), "('weights/p_re_lu_51_Alpha')\n", (33262, 33290), True, 'import numpy as np\n'), ((33497, 33542), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_21_Kernel"""'], {}), "('weights/depthwise_conv2d_21_Kernel')\n", (33504, 33542), True, 'import numpy as np\n'), ((33588, 33631), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_21_Bias"""'], {}), "('weights/depthwise_conv2d_21_Bias')\n", (33595, 33631), True, 'import numpy as np\n'), ((33895, 33928), 'numpy.load', 'np.load', (['"""weights/conv2d_44_Bias"""'], {}), "('weights/conv2d_44_Bias')\n", (33902, 33928), True, 'import numpy as np\n'), ((33991, 34026), 'numpy.load', 'np.load', (['"""weights/p_re_lu_52_Alpha"""'], {}), "('weights/p_re_lu_52_Alpha')\n", (33998, 34026), True, 'import numpy as np\n'), ((34233, 34278), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_22_Kernel"""'], {}), "('weights/depthwise_conv2d_22_Kernel')\n", (34240, 34278), True, 'import numpy as np\n'), ((34324, 34367), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_22_Bias"""'], {}), "('weights/depthwise_conv2d_22_Bias')\n", (34331, 34367), True, 'import numpy as np\n'), ((34427, 34462), 'numpy.load', 'np.load', (['"""weights/p_re_lu_53_Alpha"""'], {}), "('weights/p_re_lu_53_Alpha')\n", (34434, 34462), True, 'import numpy as np\n'), ((34751, 34784), 'numpy.load', 'np.load', (['"""weights/conv2d_45_Bias"""'], {}), "('weights/conv2d_45_Bias')\n", (34758, 34784), True, 'import numpy as np\n'), ((34883, 34918), 'numpy.load', 'np.load', (['"""weights/p_re_lu_54_Alpha"""'], {}), "('weights/p_re_lu_54_Alpha')\n", (34890, 34918), True, 'import numpy as np\n'), ((35212, 35245), 'numpy.load', 'np.load', (['"""weights/conv2d_46_Bias"""'], {}), "('weights/conv2d_46_Bias')\n", (35219, 35245), True, 'import numpy as np\n'), ((35304, 35339), 'numpy.load', 'np.load', (['"""weights/p_re_lu_55_Alpha"""'], {}), "('weights/p_re_lu_55_Alpha')\n", (35311, 35339), True, 'import numpy as np\n'), ((35621, 35654), 'numpy.load', 'np.load', (['"""weights/conv2d_47_Bias"""'], {}), "('weights/conv2d_47_Bias')\n", (35628, 35654), True, 'import numpy as np\n'), ((35713, 35748), 'numpy.load', 'np.load', (['"""weights/p_re_lu_56_Alpha"""'], {}), "('weights/p_re_lu_56_Alpha')\n", (35720, 35748), True, 'import numpy as np\n'), ((36033, 36066), 'numpy.load', 'np.load', (['"""weights/conv2d_48_Bias"""'], {}), "('weights/conv2d_48_Bias')\n", (36040, 36066), True, 'import numpy as np\n'), ((36165, 36200), 'numpy.load', 'np.load', (['"""weights/p_re_lu_57_Alpha"""'], {}), "('weights/p_re_lu_57_Alpha')\n", (36172, 36200), True, 'import numpy as np\n'), ((36494, 36527), 'numpy.load', 'np.load', (['"""weights/conv2d_49_Bias"""'], {}), "('weights/conv2d_49_Bias')\n", (36501, 36527), True, 'import numpy as np\n'), ((36586, 36621), 'numpy.load', 'np.load', (['"""weights/p_re_lu_58_Alpha"""'], {}), "('weights/p_re_lu_58_Alpha')\n", (36593, 36621), True, 'import numpy as np\n'), ((36828, 36873), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_23_Kernel"""'], {}), "('weights/depthwise_conv2d_23_Kernel')\n", (36835, 36873), True, 'import numpy as np\n'), ((36919, 36962), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_23_Bias"""'], {}), "('weights/depthwise_conv2d_23_Bias')\n", (36926, 36962), True, 'import numpy as np\n'), ((37226, 37259), 'numpy.load', 'np.load', (['"""weights/conv2d_50_Bias"""'], {}), "('weights/conv2d_50_Bias')\n", (37233, 37259), True, 'import numpy as np\n'), ((37322, 37357), 'numpy.load', 'np.load', (['"""weights/p_re_lu_59_Alpha"""'], {}), "('weights/p_re_lu_59_Alpha')\n", (37329, 37357), True, 'import numpy as np\n'), ((37564, 37609), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_24_Kernel"""'], {}), "('weights/depthwise_conv2d_24_Kernel')\n", (37571, 37609), True, 'import numpy as np\n'), ((37655, 37698), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_24_Bias"""'], {}), "('weights/depthwise_conv2d_24_Bias')\n", (37662, 37698), True, 'import numpy as np\n'), ((37758, 37793), 'numpy.load', 'np.load', (['"""weights/p_re_lu_60_Alpha"""'], {}), "('weights/p_re_lu_60_Alpha')\n", (37765, 37793), True, 'import numpy as np\n'), ((38082, 38115), 'numpy.load', 'np.load', (['"""weights/conv2d_51_Bias"""'], {}), "('weights/conv2d_51_Bias')\n", (38089, 38115), True, 'import numpy as np\n'), ((38214, 38249), 'numpy.load', 'np.load', (['"""weights/p_re_lu_61_Alpha"""'], {}), "('weights/p_re_lu_61_Alpha')\n", (38221, 38249), True, 'import numpy as np\n'), ((38543, 38576), 'numpy.load', 'np.load', (['"""weights/conv2d_46_Bias"""'], {}), "('weights/conv2d_46_Bias')\n", (38550, 38576), True, 'import numpy as np\n'), ((38635, 38670), 'numpy.load', 'np.load', (['"""weights/p_re_lu_55_Alpha"""'], {}), "('weights/p_re_lu_55_Alpha')\n", (38642, 38670), True, 'import numpy as np\n'), ((38952, 38985), 'numpy.load', 'np.load', (['"""weights/conv2d_47_Bias"""'], {}), "('weights/conv2d_47_Bias')\n", (38959, 38985), True, 'import numpy as np\n'), ((39044, 39079), 'numpy.load', 'np.load', (['"""weights/p_re_lu_56_Alpha"""'], {}), "('weights/p_re_lu_56_Alpha')\n", (39051, 39079), True, 'import numpy as np\n'), ((39364, 39397), 'numpy.load', 'np.load', (['"""weights/conv2d_48_Bias"""'], {}), "('weights/conv2d_48_Bias')\n", (39371, 39397), True, 'import numpy as np\n'), ((39496, 39531), 'numpy.load', 'np.load', (['"""weights/p_re_lu_57_Alpha"""'], {}), "('weights/p_re_lu_57_Alpha')\n", (39503, 39531), True, 'import numpy as np\n'), ((39825, 39858), 'numpy.load', 'np.load', (['"""weights/conv2d_55_Bias"""'], {}), "('weights/conv2d_55_Bias')\n", (39832, 39858), True, 'import numpy as np\n'), ((39917, 39952), 'numpy.load', 'np.load', (['"""weights/p_re_lu_65_Alpha"""'], {}), "('weights/p_re_lu_65_Alpha')\n", (39924, 39952), True, 'import numpy as np\n'), ((40159, 40204), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_25_Kernel"""'], {}), "('weights/depthwise_conv2d_25_Kernel')\n", (40166, 40204), True, 'import numpy as np\n'), ((40250, 40293), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_25_Bias"""'], {}), "('weights/depthwise_conv2d_25_Bias')\n", (40257, 40293), True, 'import numpy as np\n'), ((40557, 40590), 'numpy.load', 'np.load', (['"""weights/conv2d_56_Bias"""'], {}), "('weights/conv2d_56_Bias')\n", (40564, 40590), True, 'import numpy as np\n'), ((40653, 40688), 'numpy.load', 'np.load', (['"""weights/p_re_lu_66_Alpha"""'], {}), "('weights/p_re_lu_66_Alpha')\n", (40660, 40688), True, 'import numpy as np\n'), ((40972, 41005), 'numpy.load', 'np.load', (['"""weights/conv2d_57_Bias"""'], {}), "('weights/conv2d_57_Bias')\n", (40979, 41005), True, 'import numpy as np\n'), ((41104, 41139), 'numpy.load', 'np.load', (['"""weights/p_re_lu_67_Alpha"""'], {}), "('weights/p_re_lu_67_Alpha')\n", (41111, 41139), True, 'import numpy as np\n'), ((41433, 41466), 'numpy.load', 'np.load', (['"""weights/conv2d_58_Bias"""'], {}), "('weights/conv2d_58_Bias')\n", (41440, 41466), True, 'import numpy as np\n'), ((41525, 41560), 'numpy.load', 'np.load', (['"""weights/p_re_lu_68_Alpha"""'], {}), "('weights/p_re_lu_68_Alpha')\n", (41532, 41560), True, 'import numpy as np\n'), ((41842, 41875), 'numpy.load', 'np.load', (['"""weights/conv2d_59_Bias"""'], {}), "('weights/conv2d_59_Bias')\n", (41849, 41875), True, 'import numpy as np\n'), ((41934, 41969), 'numpy.load', 'np.load', (['"""weights/p_re_lu_69_Alpha"""'], {}), "('weights/p_re_lu_69_Alpha')\n", (41941, 41969), True, 'import numpy as np\n'), ((42254, 42287), 'numpy.load', 'np.load', (['"""weights/conv2d_60_Bias"""'], {}), "('weights/conv2d_60_Bias')\n", (42261, 42287), True, 'import numpy as np\n'), ((42386, 42421), 'numpy.load', 'np.load', (['"""weights/p_re_lu_70_Alpha"""'], {}), "('weights/p_re_lu_70_Alpha')\n", (42393, 42421), True, 'import numpy as np\n'), ((42715, 42748), 'numpy.load', 'np.load', (['"""weights/conv2d_61_Bias"""'], {}), "('weights/conv2d_61_Bias')\n", (42722, 42748), True, 'import numpy as np\n'), ((42807, 42842), 'numpy.load', 'np.load', (['"""weights/p_re_lu_71_Alpha"""'], {}), "('weights/p_re_lu_71_Alpha')\n", (42814, 42842), True, 'import numpy as np\n'), ((43049, 43094), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_26_Kernel"""'], {}), "('weights/depthwise_conv2d_26_Kernel')\n", (43056, 43094), True, 'import numpy as np\n'), ((43140, 43183), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_26_Bias"""'], {}), "('weights/depthwise_conv2d_26_Bias')\n", (43147, 43183), True, 'import numpy as np\n'), ((43447, 43480), 'numpy.load', 'np.load', (['"""weights/conv2d_62_Bias"""'], {}), "('weights/conv2d_62_Bias')\n", (43454, 43480), True, 'import numpy as np\n'), ((43543, 43578), 'numpy.load', 'np.load', (['"""weights/p_re_lu_72_Alpha"""'], {}), "('weights/p_re_lu_72_Alpha')\n", (43550, 43578), True, 'import numpy as np\n'), ((43785, 43830), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_27_Kernel"""'], {}), "('weights/depthwise_conv2d_27_Kernel')\n", (43792, 43830), True, 'import numpy as np\n'), ((43876, 43919), 'numpy.load', 'np.load', (['"""weights/depthwise_conv2d_27_Bias"""'], {}), "('weights/depthwise_conv2d_27_Bias')\n", (43883, 43919), True, 'import numpy as np\n'), ((43979, 44014), 'numpy.load', 'np.load', (['"""weights/p_re_lu_73_Alpha"""'], {}), "('weights/p_re_lu_73_Alpha')\n", (43986, 44014), True, 'import numpy as np\n'), ((44303, 44336), 'numpy.load', 'np.load', (['"""weights/conv2d_63_Bias"""'], {}), "('weights/conv2d_63_Bias')\n", (44310, 44336), True, 'import numpy as np\n'), ((44435, 44470), 'numpy.load', 'np.load', (['"""weights/p_re_lu_74_Alpha"""'], {}), "('weights/p_re_lu_74_Alpha')\n", (44442, 44470), True, 'import numpy as np\n'), ((44764, 44797), 'numpy.load', 'np.load', (['"""weights/conv2d_64_Bias"""'], {}), "('weights/conv2d_64_Bias')\n", (44771, 44797), True, 'import numpy as np\n'), ((44856, 44891), 'numpy.load', 'np.load', (['"""weights/p_re_lu_75_Alpha"""'], {}), "('weights/p_re_lu_75_Alpha')\n", (44863, 44891), True, 'import numpy as np\n'), ((45173, 45206), 'numpy.load', 'np.load', (['"""weights/conv2d_65_Bias"""'], {}), "('weights/conv2d_65_Bias')\n", (45180, 45206), True, 'import numpy as np\n'), ((45265, 45300), 'numpy.load', 'np.load', (['"""weights/p_re_lu_76_Alpha"""'], {}), "('weights/p_re_lu_76_Alpha')\n", (45272, 45300), True, 'import numpy as np\n'), ((45585, 45618), 'numpy.load', 'np.load', (['"""weights/conv2d_66_Bias"""'], {}), "('weights/conv2d_66_Bias')\n", (45592, 45618), True, 'import numpy as np\n'), ((45717, 45752), 'numpy.load', 'np.load', (['"""weights/p_re_lu_77_Alpha"""'], {}), "('weights/p_re_lu_77_Alpha')\n", (45724, 45752), True, 'import numpy as np\n'), ((46045, 46078), 'numpy.load', 'np.load', (['"""weights/conv2d_67_Bias"""'], {}), "('weights/conv2d_67_Bias')\n", (46052, 46078), True, 'import numpy as np\n'), ((46137, 46172), 'numpy.load', 'np.load', (['"""weights/p_re_lu_78_Alpha"""'], {}), "('weights/p_re_lu_78_Alpha')\n", (46144, 46172), True, 'import numpy as np\n'), ((46454, 46487), 'numpy.load', 'np.load', (['"""weights/conv2d_68_Bias"""'], {}), "('weights/conv2d_68_Bias')\n", (46461, 46487), True, 'import numpy as np\n'), ((46546, 46581), 'numpy.load', 'np.load', (['"""weights/p_re_lu_79_Alpha"""'], {}), "('weights/p_re_lu_79_Alpha')\n", (46553, 46581), True, 'import numpy as np\n'), ((46865, 46898), 'numpy.load', 'np.load', (['"""weights/conv2d_69_Bias"""'], {}), "('weights/conv2d_69_Bias')\n", (46872, 46898), True, 'import numpy as np\n'), ((46997, 47032), 'numpy.load', 'np.load', (['"""weights/p_re_lu_80_Alpha"""'], {}), "('weights/p_re_lu_80_Alpha')\n", (47004, 47032), True, 'import numpy as np\n'), ((47325, 47358), 'numpy.load', 'np.load', (['"""weights/conv2d_70_Bias"""'], {}), "('weights/conv2d_70_Bias')\n", (47332, 47358), True, 'import numpy as np\n'), ((47417, 47452), 'numpy.load', 'np.load', (['"""weights/p_re_lu_81_Alpha"""'], {}), "('weights/p_re_lu_81_Alpha')\n", (47424, 47452), True, 'import numpy as np\n'), ((47736, 47776), 'numpy.load', 'np.load', (['"""weights/conv2d_transpose_Bias"""'], {}), "('weights/conv2d_transpose_Bias')\n", (47743, 47776), True, 'import numpy as np\n'), ((47835, 47870), 'numpy.load', 'np.load', (['"""weights/p_re_lu_82_Alpha"""'], {}), "('weights/p_re_lu_82_Alpha')\n", (47842, 47870), True, 'import numpy as np\n'), ((48162, 48195), 'numpy.load', 'np.load', (['"""weights/conv2d_71_Bias"""'], {}), "('weights/conv2d_71_Bias')\n", (48169, 48195), True, 'import numpy as np\n'), ((48460, 48493), 'numpy.load', 'np.load', (['"""weights/conv2d_72_Bias"""'], {}), "('weights/conv2d_72_Bias')\n", (48467, 48493), True, 'import numpy as np\n'), ((48671, 48706), 'numpy.load', 'np.load', (['"""weights/p_re_lu_77_Alpha"""'], {}), "('weights/p_re_lu_77_Alpha')\n", (48678, 48706), True, 'import numpy as np\n'), ((49050, 49083), 'numpy.load', 'np.load', (['"""weights/conv2d_73_Bias"""'], {}), "('weights/conv2d_73_Bias')\n", (49057, 49083), True, 'import numpy as np\n'), ((49143, 49178), 'numpy.load', 'np.load', (['"""weights/p_re_lu_84_Alpha"""'], {}), "('weights/p_re_lu_84_Alpha')\n", (49150, 49178), True, 'import numpy as np\n'), ((49460, 49493), 'numpy.load', 'np.load', (['"""weights/conv2d_74_Bias"""'], {}), "('weights/conv2d_74_Bias')\n", (49467, 49493), True, 'import numpy as np\n'), ((49552, 49587), 'numpy.load', 'np.load', (['"""weights/p_re_lu_85_Alpha"""'], {}), "('weights/p_re_lu_85_Alpha')\n", (49559, 49587), True, 'import numpy as np\n'), ((49871, 49904), 'numpy.load', 'np.load', (['"""weights/conv2d_75_Bias"""'], {}), "('weights/conv2d_75_Bias')\n", (49878, 49904), True, 'import numpy as np\n'), ((50169, 50202), 'numpy.load', 'np.load', (['"""weights/conv2d_76_Bias"""'], {}), "('weights/conv2d_76_Bias')\n", (50176, 50202), True, 'import numpy as np\n'), ((50301, 50336), 'numpy.load', 'np.load', (['"""weights/p_re_lu_86_Alpha"""'], {}), "('weights/p_re_lu_86_Alpha')\n", (50308, 50336), True, 'import numpy as np\n'), ((50629, 50662), 'numpy.load', 'np.load', (['"""weights/conv2d_77_Bias"""'], {}), "('weights/conv2d_77_Bias')\n", (50636, 50662), True, 'import numpy as np\n'), ((50721, 50756), 'numpy.load', 'np.load', (['"""weights/p_re_lu_87_Alpha"""'], {}), "('weights/p_re_lu_87_Alpha')\n", (50728, 50756), True, 'import numpy as np\n'), ((51042, 51084), 'numpy.load', 'np.load', (['"""weights/conv2d_transpose_1_Bias"""'], {}), "('weights/conv2d_transpose_1_Bias')\n", (51049, 51084), True, 'import numpy as np\n'), ((51143, 51178), 'numpy.load', 'np.load', (['"""weights/p_re_lu_88_Alpha"""'], {}), "('weights/p_re_lu_88_Alpha')\n", (51150, 51178), True, 'import numpy as np\n'), ((51469, 51502), 'numpy.load', 'np.load', (['"""weights/conv2d_78_Bias"""'], {}), "('weights/conv2d_78_Bias')\n", (51476, 51502), True, 'import numpy as np\n'), ((51766, 51799), 'numpy.load', 'np.load', (['"""weights/conv2d_79_Bias"""'], {}), "('weights/conv2d_79_Bias')\n", (51773, 51799), True, 'import numpy as np\n'), ((51976, 52011), 'numpy.load', 'np.load', (['"""weights/p_re_lu_89_Alpha"""'], {}), "('weights/p_re_lu_89_Alpha')\n", (51983, 52011), True, 'import numpy as np\n'), ((52355, 52388), 'numpy.load', 'np.load', (['"""weights/conv2d_80_Bias"""'], {}), "('weights/conv2d_80_Bias')\n", (52362, 52388), True, 'import numpy as np\n'), ((52448, 52483), 'numpy.load', 'np.load', (['"""weights/p_re_lu_90_Alpha"""'], {}), "('weights/p_re_lu_90_Alpha')\n", (52455, 52483), True, 'import numpy as np\n'), ((52765, 52798), 'numpy.load', 'np.load', (['"""weights/conv2d_81_Bias"""'], {}), "('weights/conv2d_81_Bias')\n", (52772, 52798), True, 'import numpy as np\n'), ((52857, 52892), 'numpy.load', 'np.load', (['"""weights/p_re_lu_91_Alpha"""'], {}), "('weights/p_re_lu_91_Alpha')\n", (52864, 52892), True, 'import numpy as np\n'), ((53175, 53208), 'numpy.load', 'np.load', (['"""weights/conv2d_82_Bias"""'], {}), "('weights/conv2d_82_Bias')\n", (53182, 53208), True, 'import numpy as np\n'), ((53472, 53505), 'numpy.load', 'np.load', (['"""weights/conv2d_83_Bias"""'], {}), "('weights/conv2d_83_Bias')\n", (53479, 53505), True, 'import numpy as np\n'), ((53604, 53639), 'numpy.load', 'np.load', (['"""weights/p_re_lu_92_Alpha"""'], {}), "('weights/p_re_lu_92_Alpha')\n", (53611, 53639), True, 'import numpy as np\n'), ((53932, 53965), 'numpy.load', 'np.load', (['"""weights/conv2d_84_Bias"""'], {}), "('weights/conv2d_84_Bias')\n", (53939, 53965), True, 'import numpy as np\n'), ((54024, 54059), 'numpy.load', 'np.load', (['"""weights/p_re_lu_93_Alpha"""'], {}), "('weights/p_re_lu_93_Alpha')\n", (54031, 54059), True, 'import numpy as np\n'), ((54345, 54387), 'numpy.load', 'np.load', (['"""weights/conv2d_transpose_2_Bias"""'], {}), "('weights/conv2d_transpose_2_Bias')\n", (54352, 54387), True, 'import numpy as np\n'), ((54446, 54481), 'numpy.load', 'np.load', (['"""weights/p_re_lu_94_Alpha"""'], {}), "('weights/p_re_lu_94_Alpha')\n", (54453, 54481), True, 'import numpy as np\n'), ((54772, 54805), 'numpy.load', 'np.load', (['"""weights/conv2d_85_Bias"""'], {}), "('weights/conv2d_85_Bias')\n", (54779, 54805), True, 'import numpy as np\n'), ((55069, 55102), 'numpy.load', 'np.load', (['"""weights/conv2d_86_Bias"""'], {}), "('weights/conv2d_86_Bias')\n", (55076, 55102), True, 'import numpy as np\n'), ((55279, 55314), 'numpy.load', 'np.load', (['"""weights/p_re_lu_95_Alpha"""'], {}), "('weights/p_re_lu_95_Alpha')\n", (55286, 55314), True, 'import numpy as np\n'), ((55607, 55640), 'numpy.load', 'np.load', (['"""weights/conv2d_87_Bias"""'], {}), "('weights/conv2d_87_Bias')\n", (55614, 55640), True, 'import numpy as np\n'), ((55699, 55734), 'numpy.load', 'np.load', (['"""weights/p_re_lu_96_Alpha"""'], {}), "('weights/p_re_lu_96_Alpha')\n", (55706, 55734), True, 'import numpy as np\n'), ((56016, 56049), 'numpy.load', 'np.load', (['"""weights/conv2d_88_Bias"""'], {}), "('weights/conv2d_88_Bias')\n", (56023, 56049), True, 'import numpy as np\n'), ((56108, 56143), 'numpy.load', 'np.load', (['"""weights/p_re_lu_97_Alpha"""'], {}), "('weights/p_re_lu_97_Alpha')\n", (56115, 56143), True, 'import numpy as np\n'), ((56426, 56459), 'numpy.load', 'np.load', (['"""weights/conv2d_89_Bias"""'], {}), "('weights/conv2d_89_Bias')\n", (56433, 56459), True, 'import numpy as np\n'), ((56558, 56593), 'numpy.load', 'np.load', (['"""weights/p_re_lu_98_Alpha"""'], {}), "('weights/p_re_lu_98_Alpha')\n", (56565, 56593), True, 'import numpy as np\n'), ((56891, 56933), 'numpy.load', 'np.load', (['"""weights/conv2d_transpose_3_Bias"""'], {}), "('weights/conv2d_transpose_3_Bias')\n", (56898, 56933), True, 'import numpy as np\n'), ((56992, 57027), 'numpy.load', 'np.load', (['"""weights/p_re_lu_99_Alpha"""'], {}), "('weights/p_re_lu_99_Alpha')\n", (56999, 57027), True, 'import numpy as np\n'), ((57322, 57364), 'numpy.load', 'np.load', (['"""weights/conv2d_transpose_4_Bias"""'], {}), "('weights/conv2d_transpose_4_Bias')\n", (57329, 57364), True, 'import numpy as np\n'), ((6651, 6683), 'numpy.load', 'np.load', (['"""weights/conv2d_Kernel"""'], {}), "('weights/conv2d_Kernel')\n", (6658, 6683), True, 'import numpy as np\n'), ((7047, 7081), 'numpy.load', 'np.load', (['"""weights/conv2d_1_Kernel"""'], {}), "('weights/conv2d_1_Kernel')\n", (7054, 7081), True, 'import numpy as np\n'), ((7463, 7497), 'numpy.load', 'np.load', (['"""weights/conv2d_2_Kernel"""'], {}), "('weights/conv2d_2_Kernel')\n", (7470, 7497), True, 'import numpy as np\n'), ((8181, 8215), 'numpy.load', 'np.load', (['"""weights/conv2d_3_Kernel"""'], {}), "('weights/conv2d_3_Kernel')\n", (8188, 8215), True, 'import numpy as np\n'), ((9022, 9056), 'numpy.load', 'np.load', (['"""weights/conv2d_4_Kernel"""'], {}), "('weights/conv2d_4_Kernel')\n", (9029, 9056), True, 'import numpy as np\n'), ((9491, 9525), 'numpy.load', 'np.load', (['"""weights/conv2d_5_Kernel"""'], {}), "('weights/conv2d_5_Kernel')\n", (9498, 9525), True, 'import numpy as np\n'), ((9950, 9984), 'numpy.load', 'np.load', (['"""weights/conv2d_6_Kernel"""'], {}), "('weights/conv2d_6_Kernel')\n", (9957, 9984), True, 'import numpy as np\n'), ((10672, 10706), 'numpy.load', 'np.load', (['"""weights/conv2d_7_Kernel"""'], {}), "('weights/conv2d_7_Kernel')\n", (10679, 10706), True, 'import numpy as np\n'), ((11513, 11547), 'numpy.load', 'np.load', (['"""weights/conv2d_8_Kernel"""'], {}), "('weights/conv2d_8_Kernel')\n", (11520, 11547), True, 'import numpy as np\n'), ((11965, 11999), 'numpy.load', 'np.load', (['"""weights/conv2d_9_Kernel"""'], {}), "('weights/conv2d_9_Kernel')\n", (11972, 11999), True, 'import numpy as np\n'), ((12688, 12723), 'numpy.load', 'np.load', (['"""weights/conv2d_10_Kernel"""'], {}), "('weights/conv2d_10_Kernel')\n", (12695, 12723), True, 'import numpy as np\n'), ((13533, 13568), 'numpy.load', 'np.load', (['"""weights/conv2d_11_Kernel"""'], {}), "('weights/conv2d_11_Kernel')\n", (13540, 13568), True, 'import numpy as np\n'), ((13988, 14023), 'numpy.load', 'np.load', (['"""weights/conv2d_12_Kernel"""'], {}), "('weights/conv2d_12_Kernel')\n", (13995, 14023), True, 'import numpy as np\n'), ((14713, 14748), 'numpy.load', 'np.load', (['"""weights/conv2d_13_Kernel"""'], {}), "('weights/conv2d_13_Kernel')\n", (14720, 14748), True, 'import numpy as np\n'), ((15559, 15594), 'numpy.load', 'np.load', (['"""weights/conv2d_14_Kernel"""'], {}), "('weights/conv2d_14_Kernel')\n", (15566, 15594), True, 'import numpy as np\n'), ((16030, 16065), 'numpy.load', 'np.load', (['"""weights/conv2d_15_Kernel"""'], {}), "('weights/conv2d_15_Kernel')\n", (16037, 16065), True, 'import numpy as np\n'), ((16492, 16527), 'numpy.load', 'np.load', (['"""weights/conv2d_16_Kernel"""'], {}), "('weights/conv2d_16_Kernel')\n", (16499, 16527), True, 'import numpy as np\n'), ((17217, 17252), 'numpy.load', 'np.load', (['"""weights/conv2d_17_Kernel"""'], {}), "('weights/conv2d_17_Kernel')\n", (17224, 17252), True, 'import numpy as np\n'), ((18063, 18098), 'numpy.load', 'np.load', (['"""weights/conv2d_18_Kernel"""'], {}), "('weights/conv2d_18_Kernel')\n", (18070, 18098), True, 'import numpy as np\n'), ((18518, 18553), 'numpy.load', 'np.load', (['"""weights/conv2d_19_Kernel"""'], {}), "('weights/conv2d_19_Kernel')\n", (18525, 18553), True, 'import numpy as np\n'), ((19245, 19280), 'numpy.load', 'np.load', (['"""weights/conv2d_20_Kernel"""'], {}), "('weights/conv2d_20_Kernel')\n", (19252, 19280), True, 'import numpy as np\n'), ((20093, 20128), 'numpy.load', 'np.load', (['"""weights/conv2d_21_Kernel"""'], {}), "('weights/conv2d_21_Kernel')\n", (20100, 20128), True, 'import numpy as np\n'), ((20548, 20583), 'numpy.load', 'np.load', (['"""weights/conv2d_22_Kernel"""'], {}), "('weights/conv2d_22_Kernel')\n", (20555, 20583), True, 'import numpy as np\n'), ((21275, 21310), 'numpy.load', 'np.load', (['"""weights/conv2d_23_Kernel"""'], {}), "('weights/conv2d_23_Kernel')\n", (21282, 21310), True, 'import numpy as np\n'), ((22123, 22158), 'numpy.load', 'np.load', (['"""weights/conv2d_24_Kernel"""'], {}), "('weights/conv2d_24_Kernel')\n", (22130, 22158), True, 'import numpy as np\n'), ((22578, 22613), 'numpy.load', 'np.load', (['"""weights/conv2d_25_Kernel"""'], {}), "('weights/conv2d_25_Kernel')\n", (22585, 22613), True, 'import numpy as np\n'), ((23305, 23340), 'numpy.load', 'np.load', (['"""weights/conv2d_26_Kernel"""'], {}), "('weights/conv2d_26_Kernel')\n", (23312, 23340), True, 'import numpy as np\n'), ((24153, 24188), 'numpy.load', 'np.load', (['"""weights/conv2d_27_Kernel"""'], {}), "('weights/conv2d_27_Kernel')\n", (24160, 24188), True, 'import numpy as np\n'), ((24609, 24644), 'numpy.load', 'np.load', (['"""weights/conv2d_28_Kernel"""'], {}), "('weights/conv2d_28_Kernel')\n", (24616, 24644), True, 'import numpy as np\n'), ((25341, 25376), 'numpy.load', 'np.load', (['"""weights/conv2d_29_Kernel"""'], {}), "('weights/conv2d_29_Kernel')\n", (25348, 25376), True, 'import numpy as np\n'), ((26197, 26232), 'numpy.load', 'np.load', (['"""weights/conv2d_30_Kernel"""'], {}), "('weights/conv2d_30_Kernel')\n", (26204, 26232), True, 'import numpy as np\n'), ((26842, 26877), 'numpy.load', 'np.load', (['"""weights/conv2d_31_Kernel"""'], {}), "('weights/conv2d_31_Kernel')\n", (26849, 26877), True, 'import numpy as np\n'), ((27574, 27609), 'numpy.load', 'np.load', (['"""weights/conv2d_32_Kernel"""'], {}), "('weights/conv2d_32_Kernel')\n", (27581, 27609), True, 'import numpy as np\n'), ((28430, 28465), 'numpy.load', 'np.load', (['"""weights/conv2d_33_Kernel"""'], {}), "('weights/conv2d_33_Kernel')\n", (28437, 28465), True, 'import numpy as np\n'), ((28891, 28926), 'numpy.load', 'np.load', (['"""weights/conv2d_34_Kernel"""'], {}), "('weights/conv2d_34_Kernel')\n", (28898, 28926), True, 'import numpy as np\n'), ((29300, 29335), 'numpy.load', 'np.load', (['"""weights/conv2d_35_Kernel"""'], {}), "('weights/conv2d_35_Kernel')\n", (29307, 29335), True, 'import numpy as np\n'), ((29712, 29747), 'numpy.load', 'np.load', (['"""weights/conv2d_36_Kernel"""'], {}), "('weights/conv2d_36_Kernel')\n", (29719, 29747), True, 'import numpy as np\n'), ((30173, 30208), 'numpy.load', 'np.load', (['"""weights/conv2d_37_Kernel"""'], {}), "('weights/conv2d_37_Kernel')\n", (30180, 30208), True, 'import numpy as np\n'), ((30905, 30940), 'numpy.load', 'np.load', (['"""weights/conv2d_38_Kernel"""'], {}), "('weights/conv2d_38_Kernel')\n", (30912, 30940), True, 'import numpy as np\n'), ((31320, 31355), 'numpy.load', 'np.load', (['"""weights/conv2d_39_Kernel"""'], {}), "('weights/conv2d_39_Kernel')\n", (31327, 31355), True, 'import numpy as np\n'), ((31781, 31816), 'numpy.load', 'np.load', (['"""weights/conv2d_40_Kernel"""'], {}), "('weights/conv2d_40_Kernel')\n", (31788, 31816), True, 'import numpy as np\n'), ((32190, 32225), 'numpy.load', 'np.load', (['"""weights/conv2d_41_Kernel"""'], {}), "('weights/conv2d_41_Kernel')\n", (32197, 32225), True, 'import numpy as np\n'), ((32602, 32637), 'numpy.load', 'np.load', (['"""weights/conv2d_42_Kernel"""'], {}), "('weights/conv2d_42_Kernel')\n", (32609, 32637), True, 'import numpy as np\n'), ((33063, 33098), 'numpy.load', 'np.load', (['"""weights/conv2d_43_Kernel"""'], {}), "('weights/conv2d_43_Kernel')\n", (33070, 33098), True, 'import numpy as np\n'), ((33795, 33830), 'numpy.load', 'np.load', (['"""weights/conv2d_44_Kernel"""'], {}), "('weights/conv2d_44_Kernel')\n", (33802, 33830), True, 'import numpy as np\n'), ((34651, 34686), 'numpy.load', 'np.load', (['"""weights/conv2d_45_Kernel"""'], {}), "('weights/conv2d_45_Kernel')\n", (34658, 34686), True, 'import numpy as np\n'), ((35112, 35147), 'numpy.load', 'np.load', (['"""weights/conv2d_46_Kernel"""'], {}), "('weights/conv2d_46_Kernel')\n", (35119, 35147), True, 'import numpy as np\n'), ((35521, 35556), 'numpy.load', 'np.load', (['"""weights/conv2d_47_Kernel"""'], {}), "('weights/conv2d_47_Kernel')\n", (35528, 35556), True, 'import numpy as np\n'), ((35933, 35968), 'numpy.load', 'np.load', (['"""weights/conv2d_48_Kernel"""'], {}), "('weights/conv2d_48_Kernel')\n", (35940, 35968), True, 'import numpy as np\n'), ((36394, 36429), 'numpy.load', 'np.load', (['"""weights/conv2d_49_Kernel"""'], {}), "('weights/conv2d_49_Kernel')\n", (36401, 36429), True, 'import numpy as np\n'), ((37126, 37161), 'numpy.load', 'np.load', (['"""weights/conv2d_50_Kernel"""'], {}), "('weights/conv2d_50_Kernel')\n", (37133, 37161), True, 'import numpy as np\n'), ((37982, 38017), 'numpy.load', 'np.load', (['"""weights/conv2d_51_Kernel"""'], {}), "('weights/conv2d_51_Kernel')\n", (37989, 38017), True, 'import numpy as np\n'), ((38443, 38478), 'numpy.load', 'np.load', (['"""weights/conv2d_46_Kernel"""'], {}), "('weights/conv2d_46_Kernel')\n", (38450, 38478), True, 'import numpy as np\n'), ((38852, 38887), 'numpy.load', 'np.load', (['"""weights/conv2d_47_Kernel"""'], {}), "('weights/conv2d_47_Kernel')\n", (38859, 38887), True, 'import numpy as np\n'), ((39264, 39299), 'numpy.load', 'np.load', (['"""weights/conv2d_48_Kernel"""'], {}), "('weights/conv2d_48_Kernel')\n", (39271, 39299), True, 'import numpy as np\n'), ((39725, 39760), 'numpy.load', 'np.load', (['"""weights/conv2d_55_Kernel"""'], {}), "('weights/conv2d_55_Kernel')\n", (39732, 39760), True, 'import numpy as np\n'), ((40457, 40492), 'numpy.load', 'np.load', (['"""weights/conv2d_56_Kernel"""'], {}), "('weights/conv2d_56_Kernel')\n", (40464, 40492), True, 'import numpy as np\n'), ((40872, 40907), 'numpy.load', 'np.load', (['"""weights/conv2d_57_Kernel"""'], {}), "('weights/conv2d_57_Kernel')\n", (40879, 40907), True, 'import numpy as np\n'), ((41333, 41368), 'numpy.load', 'np.load', (['"""weights/conv2d_58_Kernel"""'], {}), "('weights/conv2d_58_Kernel')\n", (41340, 41368), True, 'import numpy as np\n'), ((41742, 41777), 'numpy.load', 'np.load', (['"""weights/conv2d_59_Kernel"""'], {}), "('weights/conv2d_59_Kernel')\n", (41749, 41777), True, 'import numpy as np\n'), ((42154, 42189), 'numpy.load', 'np.load', (['"""weights/conv2d_60_Kernel"""'], {}), "('weights/conv2d_60_Kernel')\n", (42161, 42189), True, 'import numpy as np\n'), ((42615, 42650), 'numpy.load', 'np.load', (['"""weights/conv2d_61_Kernel"""'], {}), "('weights/conv2d_61_Kernel')\n", (42622, 42650), True, 'import numpy as np\n'), ((43347, 43382), 'numpy.load', 'np.load', (['"""weights/conv2d_62_Kernel"""'], {}), "('weights/conv2d_62_Kernel')\n", (43354, 43382), True, 'import numpy as np\n'), ((44203, 44238), 'numpy.load', 'np.load', (['"""weights/conv2d_63_Kernel"""'], {}), "('weights/conv2d_63_Kernel')\n", (44210, 44238), True, 'import numpy as np\n'), ((44664, 44699), 'numpy.load', 'np.load', (['"""weights/conv2d_64_Kernel"""'], {}), "('weights/conv2d_64_Kernel')\n", (44671, 44699), True, 'import numpy as np\n'), ((45073, 45108), 'numpy.load', 'np.load', (['"""weights/conv2d_65_Kernel"""'], {}), "('weights/conv2d_65_Kernel')\n", (45080, 45108), True, 'import numpy as np\n'), ((45485, 45520), 'numpy.load', 'np.load', (['"""weights/conv2d_66_Kernel"""'], {}), "('weights/conv2d_66_Kernel')\n", (45492, 45520), True, 'import numpy as np\n'), ((45945, 45980), 'numpy.load', 'np.load', (['"""weights/conv2d_67_Kernel"""'], {}), "('weights/conv2d_67_Kernel')\n", (45952, 45980), True, 'import numpy as np\n'), ((46354, 46389), 'numpy.load', 'np.load', (['"""weights/conv2d_68_Kernel"""'], {}), "('weights/conv2d_68_Kernel')\n", (46361, 46389), True, 'import numpy as np\n'), ((46765, 46800), 'numpy.load', 'np.load', (['"""weights/conv2d_69_Kernel"""'], {}), "('weights/conv2d_69_Kernel')\n", (46772, 46800), True, 'import numpy as np\n'), ((47225, 47260), 'numpy.load', 'np.load', (['"""weights/conv2d_70_Kernel"""'], {}), "('weights/conv2d_70_Kernel')\n", (47232, 47260), True, 'import numpy as np\n'), ((47629, 47671), 'numpy.load', 'np.load', (['"""weights/conv2d_transpose_Kernel"""'], {}), "('weights/conv2d_transpose_Kernel')\n", (47636, 47671), True, 'import numpy as np\n'), ((48062, 48097), 'numpy.load', 'np.load', (['"""weights/conv2d_71_Kernel"""'], {}), "('weights/conv2d_71_Kernel')\n", (48069, 48097), True, 'import numpy as np\n'), ((48360, 48395), 'numpy.load', 'np.load', (['"""weights/conv2d_72_Kernel"""'], {}), "('weights/conv2d_72_Kernel')\n", (48367, 48395), True, 'import numpy as np\n'), ((48950, 48985), 'numpy.load', 'np.load', (['"""weights/conv2d_73_Kernel"""'], {}), "('weights/conv2d_73_Kernel')\n", (48957, 48985), True, 'import numpy as np\n'), ((49360, 49395), 'numpy.load', 'np.load', (['"""weights/conv2d_74_Kernel"""'], {}), "('weights/conv2d_74_Kernel')\n", (49367, 49395), True, 'import numpy as np\n'), ((49771, 49806), 'numpy.load', 'np.load', (['"""weights/conv2d_75_Kernel"""'], {}), "('weights/conv2d_75_Kernel')\n", (49778, 49806), True, 'import numpy as np\n'), ((50069, 50104), 'numpy.load', 'np.load', (['"""weights/conv2d_76_Kernel"""'], {}), "('weights/conv2d_76_Kernel')\n", (50076, 50104), True, 'import numpy as np\n'), ((50529, 50564), 'numpy.load', 'np.load', (['"""weights/conv2d_77_Kernel"""'], {}), "('weights/conv2d_77_Kernel')\n", (50536, 50564), True, 'import numpy as np\n'), ((50933, 50977), 'numpy.load', 'np.load', (['"""weights/conv2d_transpose_1_Kernel"""'], {}), "('weights/conv2d_transpose_1_Kernel')\n", (50940, 50977), True, 'import numpy as np\n'), ((51369, 51404), 'numpy.load', 'np.load', (['"""weights/conv2d_78_Kernel"""'], {}), "('weights/conv2d_78_Kernel')\n", (51376, 51404), True, 'import numpy as np\n'), ((51666, 51701), 'numpy.load', 'np.load', (['"""weights/conv2d_79_Kernel"""'], {}), "('weights/conv2d_79_Kernel')\n", (51673, 51701), True, 'import numpy as np\n'), ((52255, 52290), 'numpy.load', 'np.load', (['"""weights/conv2d_80_Kernel"""'], {}), "('weights/conv2d_80_Kernel')\n", (52262, 52290), True, 'import numpy as np\n'), ((52665, 52700), 'numpy.load', 'np.load', (['"""weights/conv2d_81_Kernel"""'], {}), "('weights/conv2d_81_Kernel')\n", (52672, 52700), True, 'import numpy as np\n'), ((53075, 53110), 'numpy.load', 'np.load', (['"""weights/conv2d_82_Kernel"""'], {}), "('weights/conv2d_82_Kernel')\n", (53082, 53110), True, 'import numpy as np\n'), ((53372, 53407), 'numpy.load', 'np.load', (['"""weights/conv2d_83_Kernel"""'], {}), "('weights/conv2d_83_Kernel')\n", (53379, 53407), True, 'import numpy as np\n'), ((53832, 53867), 'numpy.load', 'np.load', (['"""weights/conv2d_84_Kernel"""'], {}), "('weights/conv2d_84_Kernel')\n", (53839, 53867), True, 'import numpy as np\n'), ((54236, 54280), 'numpy.load', 'np.load', (['"""weights/conv2d_transpose_2_Kernel"""'], {}), "('weights/conv2d_transpose_2_Kernel')\n", (54243, 54280), True, 'import numpy as np\n'), ((54672, 54707), 'numpy.load', 'np.load', (['"""weights/conv2d_85_Kernel"""'], {}), "('weights/conv2d_85_Kernel')\n", (54679, 54707), True, 'import numpy as np\n'), ((54969, 55004), 'numpy.load', 'np.load', (['"""weights/conv2d_86_Kernel"""'], {}), "('weights/conv2d_86_Kernel')\n", (54976, 55004), True, 'import numpy as np\n'), ((55507, 55542), 'numpy.load', 'np.load', (['"""weights/conv2d_87_Kernel"""'], {}), "('weights/conv2d_87_Kernel')\n", (55514, 55542), True, 'import numpy as np\n'), ((55916, 55951), 'numpy.load', 'np.load', (['"""weights/conv2d_88_Kernel"""'], {}), "('weights/conv2d_88_Kernel')\n", (55923, 55951), True, 'import numpy as np\n'), ((56326, 56361), 'numpy.load', 'np.load', (['"""weights/conv2d_89_Kernel"""'], {}), "('weights/conv2d_89_Kernel')\n", (56333, 56361), True, 'import numpy as np\n'), ((56782, 56826), 'numpy.load', 'np.load', (['"""weights/conv2d_transpose_3_Kernel"""'], {}), "('weights/conv2d_transpose_3_Kernel')\n", (56789, 56826), True, 'import numpy as np\n'), ((57213, 57257), 'numpy.load', 'np.load', (['"""weights/conv2d_transpose_4_Kernel"""'], {}), "('weights/conv2d_transpose_4_Kernel')\n", (57220, 57257), True, 'import numpy as np\n')] |
import pickle
import numpy as np
import pytest
import pandas as pd
from copy import deepcopy
from veritastool.metrics.modelrates import *
from veritastool.model.model_container import ModelContainer
from veritastool.fairness.customer_marketing import CustomerMarketing
import sys
sys.path.append("veritastool/examples/customer_marketing_example")
import selection, uplift, util
#Load Credit Scoring Test Data
file = "veritastool/examples/data/credit_score_dict.pickle"
input_file = open(file, "rb")
cs = pickle.load(input_file)
y_true = np.array(cs["y_test"])
y_prob = cs["y_prob"]
def test_ModelRateClassify_init():
sample_weight = np.random.choice(10, 7500, replace=True)
modelrate_obj = ModelRateClassify(y_true, y_prob, sample_weight = sample_weight)
assert modelrate_obj.tpr([0.5])[0] <= 0.7 and modelrate_obj.tpr([0.5])[0] >= 0.6
assert modelrate_obj.fpr([0.5])[0] <= 0.37 and modelrate_obj.fpr([0.5])[0] >= 0.30
assert modelrate_obj.ppv([0.5])[0] <= 0.92 and modelrate_obj.ppv([0.5])[0] >= 0.82
assert modelrate_obj.forr([0.5])[0] <= 2 and modelrate_obj.forr([0.5])[0] >= 1.75
assert modelrate_obj.selection_rate([0.5])[0] <= 0.65 and modelrate_obj.selection_rate([0.5])[0] >= 0.5
assert round(modelrate_obj.base_selection_rate,2) <= 0.79 and round(modelrate_obj.base_selection_rate,2) >= 0.77
def test_compute_rates():
ModelRateClassify.compute_rates(y_true, y_prob, sample_weight = None)
ths, tpr, fpr, ppv, forr, base_selection_rate, selection_rate = ModelRateClassify.compute_rates(y_true, y_prob, sample_weight = None)
assert ths.shape == (2174,)
assert tpr.shape == (2174,)
assert tpr.mean() == 0.6417562335342573
assert fpr.shape == (2174,)
assert fpr.mean() == 0.40266439975312385
assert ppv.shape == (2174,)
assert ppv.mean() == 0.8627624081302739
assert forr.shape == (2174,)
assert forr.mean() == 9.395396004091237
assert base_selection_rate == 0.7788
assert selection_rate.shape == (2174,)
assert selection_rate.mean() == 0.5888691199018706
def test_ModelRateUplift_init():
#Load Phase 1-Customer Marketing Uplift Model Data, Results and Related Functions
# file_prop = r"C:\Users\brian.zheng\OneDrive - Accenture\Desktop\Veritas\Development\veritas_v1\pickle_files\mktg_uplift_acq_dict.pickle"
# file_rej = r"C:\Users\brian.zheng\OneDrive - Accenture\Desktop\Veritas\Development\veritas_v1\pickle_files\mktg_uplift_rej_dict.pickle"
file_prop = "veritastool/examples/data/mktg_uplift_acq_dict.pickle"
file_rej = "veritastool/examples/data/mktg_uplift_rej_dict.pickle"
input_prop = open(file_prop, "rb")
input_rej = open(file_rej, "rb")
cm_prop = pickle.load(input_prop)
cm_rej = pickle.load(input_rej)
#Model Container Parameters
#Rejection Model
y_true_rej = cm_rej["y_test"]
y_pred_rej = cm_rej["y_test"]
y_train_rej = cm_rej["y_train"]
p_var_rej = ['isforeign', 'isfemale']
p_grp_rej = {'isforeign':[0], 'isfemale':[0]}
x_train_rej = cm_rej["X_train"].drop(['ID'], axis = 1)
x_test_rej = cm_rej["X_test"].drop(['ID'], axis = 1)
model_object_rej = cm_rej['model']
model_name_rej = "cm_rejection"
model_type_rej = "uplift"
y_prob_rej = cm_rej["y_prob"]
data = {"FEATURE" :['income', 'noproducts', 'didrespond', 'age', 'isfemale',
'isforeign'],
"VALUE":[0.3, 0.2, 0.15, 0.1, 0.05, 0.03]}
feature_importance_prop = pd.DataFrame(data)
#Propensity Model
y_true_prop = cm_prop["y_test"]
y_pred_prop = cm_prop["y_test"]
y_train_prop = cm_prop["y_train"]
p_var_prop = ['isforeign', 'isfemale']
p_grp_prop = {'isforeign':[0], 'isfemale':[0]}
x_train_prop = cm_prop["X_train"].drop(['ID'], axis = 1)
x_test_prop = cm_prop["X_test"].drop(['ID'], axis = 1)
model_object_prop = cm_prop['model']
model_name_prop = "cm_propensity"
model_type_prop = "uplift"
y_prob_prop = cm_prop["y_prob"]
data = {"FEATURE" :['income', 'noproducts', 'didrespond', 'age', 'isfemale',
'isforeign'],
"VALUE":[0.3, 0.2, 0.15, 0.1, 0.05, 0.03]}
feature_importance_rej = pd.DataFrame(data)
PROFIT_RESPOND = 190
COST_TREATMENT =20
container_rej = ModelContainer(y_true = y_true_rej, y_pred = y_pred_rej, y_prob = y_prob_rej, y_train= y_train_rej, p_var = p_var_rej, p_grp = p_grp_rej, x_train = x_train_rej, x_test = x_test_rej, model_object = model_object_rej, model_name = model_name_rej, model_type = model_type_rej, pos_label=[['TR'], ['CR']], neg_label=[['TN'], ['CN']], predict_op_name = "predict_proba", feature_imp = feature_importance_rej)
container_prop = container_rej.clone(y_true = y_true_prop, y_pred = y_pred_prop, y_prob = y_prob_prop, y_train= y_train_prop,\
model_object = model_object_prop, pos_label=[['TR'], ['CR']], neg_label=[['TN'], ['CN']], \
predict_op_name = "predict_proba", feature_imp = feature_importance_prop)
cm_uplift_obj = CustomerMarketing(model_params = [container_rej, container_prop], fair_threshold = 0.2, fair_concern = "eligible", fair_priority = "benefit", fair_impact = "significant", perf_metric_name = "expected_profit", revenue = PROFIT_RESPOND, treatment_cost =COST_TREATMENT)
#cm_uplift_obj.tradeoff(output=False, n_threads =4)
modelrateuplift_obj = ModelRateUplift([model.y_true for model in cm_uplift_obj.model_params], cm_uplift_obj.pred_outcome, cm_uplift_obj.e_lift, cm_uplift_obj.feature_mask['isforeign'], \
cm_uplift_obj.spl_params["treatment_cost"],\
cm_uplift_obj.spl_params["revenue"], cm_uplift_obj.proportion_of_interpolation_fitting, 2)
assert abs(modelrateuplift_obj.harm([0])[0] - 0.014796284418302946) <= 0.001
assert abs(modelrateuplift_obj.profit([0])[0] - 73633.37972946254) <= 50
assert abs(modelrateuplift_obj.emp_lift_tr([0])[0] - 0.50814332247557) <= 0.01
assert abs(modelrateuplift_obj.emp_lift_cn([0])[0] - 0.3188806045090305) <= 0.01
| [
"numpy.random.choice",
"veritastool.model.model_container.ModelContainer",
"pickle.load",
"numpy.array",
"pandas.DataFrame",
"veritastool.fairness.customer_marketing.CustomerMarketing",
"sys.path.append"
] | [((280, 346), 'sys.path.append', 'sys.path.append', (['"""veritastool/examples/customer_marketing_example"""'], {}), "('veritastool/examples/customer_marketing_example')\n", (295, 346), False, 'import sys\n'), ((505, 528), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (516, 528), False, 'import pickle\n'), ((539, 561), 'numpy.array', 'np.array', (["cs['y_test']"], {}), "(cs['y_test'])\n", (547, 561), True, 'import numpy as np\n'), ((641, 681), 'numpy.random.choice', 'np.random.choice', (['(10)', '(7500)'], {'replace': '(True)'}), '(10, 7500, replace=True)\n', (657, 681), True, 'import numpy as np\n'), ((2696, 2719), 'pickle.load', 'pickle.load', (['input_prop'], {}), '(input_prop)\n', (2707, 2719), False, 'import pickle\n'), ((2733, 2755), 'pickle.load', 'pickle.load', (['input_rej'], {}), '(input_rej)\n', (2744, 2755), False, 'import pickle\n'), ((3452, 3470), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (3464, 3470), True, 'import pandas as pd\n'), ((4156, 4174), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (4168, 4174), True, 'import pandas as pd\n'), ((4245, 4642), 'veritastool.model.model_container.ModelContainer', 'ModelContainer', ([], {'y_true': 'y_true_rej', 'y_pred': 'y_pred_rej', 'y_prob': 'y_prob_rej', 'y_train': 'y_train_rej', 'p_var': 'p_var_rej', 'p_grp': 'p_grp_rej', 'x_train': 'x_train_rej', 'x_test': 'x_test_rej', 'model_object': 'model_object_rej', 'model_name': 'model_name_rej', 'model_type': 'model_type_rej', 'pos_label': "[['TR'], ['CR']]", 'neg_label': "[['TN'], ['CN']]", 'predict_op_name': '"""predict_proba"""', 'feature_imp': 'feature_importance_rej'}), "(y_true=y_true_rej, y_pred=y_pred_rej, y_prob=y_prob_rej,\n y_train=y_train_rej, p_var=p_var_rej, p_grp=p_grp_rej, x_train=\n x_train_rej, x_test=x_test_rej, model_object=model_object_rej,\n model_name=model_name_rej, model_type=model_type_rej, pos_label=[['TR'],\n ['CR']], neg_label=[['TN'], ['CN']], predict_op_name='predict_proba',\n feature_imp=feature_importance_rej)\n", (4259, 4642), False, 'from veritastool.model.model_container import ModelContainer\n'), ((5043, 5307), 'veritastool.fairness.customer_marketing.CustomerMarketing', 'CustomerMarketing', ([], {'model_params': '[container_rej, container_prop]', 'fair_threshold': '(0.2)', 'fair_concern': '"""eligible"""', 'fair_priority': '"""benefit"""', 'fair_impact': '"""significant"""', 'perf_metric_name': '"""expected_profit"""', 'revenue': 'PROFIT_RESPOND', 'treatment_cost': 'COST_TREATMENT'}), "(model_params=[container_rej, container_prop],\n fair_threshold=0.2, fair_concern='eligible', fair_priority='benefit',\n fair_impact='significant', perf_metric_name='expected_profit', revenue=\n PROFIT_RESPOND, treatment_cost=COST_TREATMENT)\n", (5060, 5307), False, 'from veritastool.fairness.customer_marketing import CustomerMarketing\n')] |
import copy
import numpy as np
import timeit
import torch
import torch.nn as nn
from torch.utils.data import BatchSampler, SubsetRandomSampler
import rl_sandbox.constants as c
from rl_sandbox.algorithms.cem.cem import CEMQ
from rl_sandbox.auxiliary_tasks.auxiliary_tasks import AuxiliaryTask
class GRAC:
def __init__(self, model, policy_opt, qs_opt, buffer, algo_params, aux_tasks=AuxiliaryTask()):
""" GRAC Algorithm: https://arxiv.org/abs/2009.08973
"""
self.model = model
self.policy_opt = policy_opt
self.qs_opt = qs_opt
self.buffer = buffer
self.algo_params = algo_params
self.step = 0
self.action_dim = algo_params[c.ACTION_DIM]
# TODO: They have a scehduler for alpha
self._alpha = algo_params.get(
c.ALPHA, c.DEFAULT_GRAC_PARAMS[c.ALPHA])
self._cov_noise_init = algo_params.get(
c.COV_NOISE_INIT, c.DEFAULT_GRAC_PARAMS[c.COV_NOISE_INIT])
self._cov_noise_end = algo_params.get(
c.COV_NOISE_END, c.DEFAULT_GRAC_PARAMS[c.COV_NOISE_END])
self._cov_noise_tau = algo_params.get(
c.COV_NOISE_TAU, c.DEFAULT_GRAC_PARAMS[c.COV_NOISE_TAU])
self._num_iters = algo_params.get(
c.NUM_ITERS, c.DEFAULT_GRAC_PARAMS[c.NUM_ITERS])
self._pop_size = algo_params.get(
c.POP_SIZE, c.DEFAULT_GRAC_PARAMS[c.POP_SIZE])
self._elite_size = algo_params.get(
c.ELITE_SIZE, c.DEFAULT_GRAC_PARAMS[c.ELITE_SIZE])
self._min_action = algo_params.get(
c.MIN_ACTION, c.DEFAULT_GRAC_PARAMS[c.MIN_ACTION])
self._max_action = algo_params.get(
c.MAX_ACTION, c.DEFAULT_GRAC_PARAMS[c.MAX_ACTION])
self._update_num = algo_params.get(c.UPDATE_NUM, 0)
self.device = algo_params.get(c.DEVICE, torch.device(c.CPU))
self._num_q_updates = algo_params.get(
c.NUM_Q_UPDATES, c.DEFAULT_GRAC_PARAMS[c.NUM_Q_UPDATES])
self._steps_between_update = algo_params.get(
c.STEPS_BETWEEN_UPDATE, c.DEFAULT_GRAC_PARAMS[c.STEPS_BETWEEN_UPDATE])
self._buffer_warmup = algo_params.get(
c.BUFFER_WARMUP, c.DEFAULT_GRAC_PARAMS[c.BUFFER_WARMUP])
self._reward_scaling = algo_params.get(
c.REWARD_SCALING, c.DEFAULT_GRAC_PARAMS[c.REWARD_SCALING])
self._gamma = algo_params.get(c.GAMMA, c.DEFAULT_GRAC_PARAMS[c.GAMMA])
self._num_gradient_updates = algo_params.get(
c.NUM_GRADIENT_UPDATES, c.DEFAULT_GRAC_PARAMS[c.NUM_GRADIENT_UPDATES])
self._batch_size = algo_params.get(
c.BATCH_SIZE, c.DEFAULT_GRAC_PARAMS[c.BATCH_SIZE])
self._accum_num_grad = algo_params.get(
c.ACCUM_NUM_GRAD, c.DEFAULT_GRAC_PARAMS[c.ACCUM_NUM_GRAD])
self._num_prefetch = algo_params.get(
c.NUM_PREFETCH, 1)
self._aux_tasks = aux_tasks
assert self._batch_size % self._accum_num_grad == 0
assert self._num_gradient_updates % self._num_prefetch == 0
self._num_samples_per_accum = self._batch_size // self._accum_num_grad
self._max_grad_norm = algo_params.get(
c.MAX_GRAD_NORM, c.DEFAULT_GRAC_PARAMS[c.MAX_GRAD_NORM])
self.train_preprocessing = algo_params[c.TRAIN_PREPROCESSING]
self.cem = CEMQ(cov_noise_init=self._cov_noise_init,
cov_noise_end=self._cov_noise_end,
cov_noise_tau=self._cov_noise_tau,
action_dim=self.action_dim,
batch_size=self._num_samples_per_accum,
num_iters=self._num_iters,
pop_size=self._pop_size,
elite_size=self._elite_size,
device=self.device,
min_action=self._min_action,
max_action=self._max_action,)
def state_dict(self):
state_dict = {}
state_dict[c.STATE_DICT] = self.model.state_dict()
state_dict[c.POLICY_OPTIMIZER] = self.policy_opt.state_dict()
state_dict[c.QS_OPTIMIZER] = self.qs_opt.state_dict()
if hasattr(self.model, c.OBS_RMS):
state_dict[c.OBS_RMS] = self.model.obs_rms
if hasattr(self.model, c.VALUE_RMS):
state_dict[c.VALUE_RMS] = self.model.value_rms
return state_dict
def load_state_dict(self, state_dict):
self.model.load_state_dict(state_dict[c.STATE_DICT])
self.policy_opt.load_state_dict(state_dict[c.POLICY_OPTIMIZER])
self.qs_opt.load_state_dict(state_dict[c.QS_OPTIMIZER])
if hasattr(self.model, c.OBS_RMS) and c.OBS_RMS in state_dict:
self.model.obs_rms = state_dict[c.OBS_RMS]
if hasattr(self.model, c.VALUE_RMS) and c.VALUE_RMS in state_dict:
self.model.value_rms = state_dict[c.VALUE_RMS]
def construct_q_function(self, q_i):
def q_function(obss, h_states, acts, lengths):
res = self.model.q_vals(obss, h_states, acts, lengths=lengths)
return res[q_i + 1]
return q_function
def _compute_qs_loss(self, obss, h_states, acts, dones, best_next_acts, target, targ_q1_best, targ_q2_best, next_obss, lengths):
best_next_acts, target, targ_q1_best, targ_q2_best, dones = best_next_acts.to(self.device), target.to(self.device), targ_q1_best.to(self.device), targ_q2_best.to(self.device), dones.to(self.device)
_, q1_val, q2_val, next_h_states = self.model.q_vals(obss, h_states, acts, lengths=lengths)
_, q1_best, q2_best, _ = self.model.q_vals(next_obss, next_h_states, best_next_acts)
q1_loss = ((q1_val - target) ** 2).sum()
q2_loss = ((q2_val - target) ** 2).sum()
# NOTE: Supposedly we shouldn't be concerned about state at timestep T + 1, assuming the episode ends at timestep T.
q1_reg = (((1 - dones) * (q1_best - targ_q1_best)) ** 2).sum()
q2_reg = (((1 - dones) * (q2_best - targ_q2_best)) ** 2).sum()
return q1_loss, q2_loss, q1_reg, q2_reg
def _compute_acts_targets(self, obss, h_states, acts, rews, dones, next_obss, discounting, lengths):
with torch.no_grad():
rews, dones, discounting = rews.to(self.device), dones.to(self.device), discounting.to(self.device)
_, q1_val, q2_val, next_h_states = self.model.q_vals(obss, h_states, acts, lengths=lengths)
# Compute next actions with policy and CEM
next_acts_pi, next_acts_pi_mean, next_acts_pi_var, _, _ = self.model.act_stats(next_obss, next_h_states)
# NOTE: It is important to clip this action. Otherwise the Q-function gets OOD data
next_acts_pi = torch.clamp(next_acts_pi, min=self._min_action[0], max=self._max_action[0])
best_next_acts = self.cem.compute_action(self.construct_q_function(q_i=1),
next_obss,
next_h_states,
next_acts_pi_mean,
next_acts_pi_var,
lengths=None)
# Get best actions and best Q values
min_q_targs_pi, _, _, _ = self.model.q_vals(next_obss, next_h_states, next_acts_pi)
min_q_targs_cem, _, _, _ = self.model.q_vals(next_obss, next_h_states, best_next_acts)
best_q_targs = torch.max(min_q_targs_pi, min_q_targs_cem)
target = rews + (self._gamma ** discounting) * (1 - dones) * best_q_targs
replace_idxes = (min_q_targs_pi > min_q_targs_cem).squeeze()
best_next_acts[replace_idxes] = next_acts_pi[replace_idxes]
_, q1_best, q2_best, _ = self.model.q_vals(next_obss, next_h_states, best_next_acts)
return best_next_acts.cpu().detach(), target.cpu().detach(), q1_best.cpu().detach(), q2_best.cpu().detach(), (q2_val - q1_val).sum().cpu().detach(), q1_best.max().cpu().detach(), q2_best.max().cpu().detach()
def _compute_pi_loss(self, obss, h_states, acts, lengths):
acts_pi, acts_pi_mean, acts_pi_var, entropies, v_pi = self.model.act_stats(obss, h_states, lengths=lengths)
_, q1_pi, _, _ = self.model.q_vals(obss, h_states, acts_pi, lengths=lengths)
acts_cem = self.cem.compute_action(self.construct_q_function(q_i=0),
obss,
h_states,
acts_pi_mean,
acts_pi_var,
lengths=lengths)
with torch.no_grad():
_, q1_cem, _, _ = self.model.q_vals(obss, h_states, acts_cem, lengths=lengths)
score = q1_cem - v_pi
score = torch.clamp(score.detach(), min=0.)
acts_cem_lprob = self.model.lprob(obss, h_states, acts_cem, lengths=lengths)
cem_loss = (score * acts_cem_lprob).sum() / self.action_dim
q_loss = q1_pi.sum()
pi_loss = -(q_loss + cem_loss)
return pi_loss, acts_cem_lprob.max().detach().cpu(), acts_cem_lprob.min().detach().cpu()
def update_qs(self, batch_start_idx, obss, h_states, acts, rews, dones, next_obss, next_h_states, discounting, infos, lengths, update_info):
init_qs_loss = None
best_next_acts = []
targets = []
q1_bests = []
q2_bests = []
total_qs_descrepancy = 0.
total_q2_val = 0.
max_q1 = -np.inf
max_q2 = -np.inf
for grad_i in range(self._accum_num_grad):
opt_idxes = range(batch_start_idx + grad_i * self._num_samples_per_accum,
batch_start_idx + (grad_i + 1) * self._num_samples_per_accum)
best_next_act, target, q1_best, q2_best, qs_descrepancy, q1_max, q2_max = self._compute_acts_targets(obss[opt_idxes],
h_states[opt_idxes],
acts[opt_idxes],
rews[opt_idxes],
dones[opt_idxes],
next_obss[opt_idxes],
discounting[opt_idxes],
lengths[opt_idxes])
best_next_acts.append(best_next_act)
targets.append(target)
q1_bests.append(q1_best)
q2_bests.append(q2_best)
total_qs_descrepancy += qs_descrepancy
max_q1 = max(q1_max, max_q1)
max_q2 = max(q2_max, max_q2)
best_next_acts = torch.cat(best_next_acts, dim=0)
targets = torch.cat(targets, dim=0)
q1_bests = torch.cat(q1_bests, dim=0)
q2_bests = torch.cat(q2_bests, dim=0)
q1_losses = []
q2_losses = []
total_update_time = 0.
q1_regs = []
q2_regs = []
for update_i in range(self._num_q_updates):
tic = timeit.default_timer()
self.qs_opt.zero_grad()
total_q1_loss = 0.
total_q2_loss = 0.
total_q1_reg = 0.
total_q2_reg = 0.
for grad_i in range(self._accum_num_grad):
q1_loss, q2_loss, q1_reg, q2_reg = self._compute_qs_loss(obss[opt_idxes],
h_states[opt_idxes],
acts[opt_idxes],
dones[opt_idxes],
best_next_acts[opt_idxes],
target[opt_idxes],
q1_best[opt_idxes],
q2_best[opt_idxes],
next_obss[opt_idxes],
lengths[opt_idxes])
q1_loss /= self._batch_size
q2_loss /= self._batch_size
q1_reg /= self._batch_size
q2_reg /= self._batch_size
qs_loss = q1_loss + q2_loss + q1_reg + q2_reg
total_q1_loss += q1_loss.detach().cpu()
total_q2_loss += q2_loss.detach().cpu()
total_q1_reg += q1_reg.detach().cpu()
total_q2_reg += q2_reg.detach().cpu()
qs_loss.backward()
nn.utils.clip_grad_norm_(self.model.qs_parameters,
self._max_grad_norm)
self.qs_opt.step()
total_update_time += timeit.default_timer() - tic
q1_losses.append(total_q1_loss.numpy())
q2_losses.append(total_q2_loss.numpy())
q1_regs.append(total_q1_reg.numpy())
q2_regs.append(total_q2_reg.numpy())
if init_qs_loss is None:
init_qs_loss = qs_loss.detach()
# This seems to be a hack for not overfitting?
if qs_loss.detach() < init_qs_loss * self._alpha:
break
update_info[c.Q1_MAX].append(max_q1)
update_info[c.Q2_MAX].append(max_q2)
update_info[c.Q_UPDATE_TIME].append(total_update_time)
update_info[c.Q1_LOSS].append(np.mean(q1_losses))
update_info[c.Q2_LOSS].append(np.mean(q2_losses))
update_info[c.Q1_REG].append(np.mean(q1_regs))
update_info[c.Q2_REG].append(np.mean(q2_regs))
update_info[c.AVG_Q_DISCREPANCY].append(qs_descrepancy / self._batch_size)
def update_policy(self, batch_start_idx, obss, h_states, acts, rews, dones, next_obss, next_h_states, discounting, infos, lengths, update_info):
tic = timeit.default_timer()
self.policy_opt.zero_grad()
total_pi_loss = 0.
max_lprob = -np.inf
min_lprob = np.inf
for grad_i in range(self._accum_num_grad):
opt_idxes = range(batch_start_idx + grad_i * self._num_samples_per_accum,
batch_start_idx + (grad_i + 1) * self._num_samples_per_accum)
pi_loss, lprob_max, lprob_min = self._compute_pi_loss(obss[opt_idxes],
h_states[opt_idxes],
acts[opt_idxes],
lengths[opt_idxes])
max_lprob = max(lprob_max, max_lprob)
min_lprob = min(lprob_min, min_lprob)
pi_loss /= self._batch_size
total_pi_loss += pi_loss.detach().cpu()
pi_loss.backward()
nn.utils.clip_grad_norm_(self.model.policy_parameters,
self._max_grad_norm)
self.policy_opt.step()
update_info[c.LPROB_MAX].append(max_lprob)
update_info[c.LPROB_MIN].append(min_lprob)
update_info[c.POLICY_UPDATE_TIME].append(timeit.default_timer() - tic)
update_info[c.PI_LOSS].append(total_pi_loss.numpy())
def _store_to_buffer(self, curr_obs, curr_h_state, act, rew, done, info, next_obs, next_h_state):
self.buffer.push(curr_obs, curr_h_state, act, rew, [done], info, next_obs=next_obs, next_h_state=next_h_state)
def update(self, curr_obs, curr_h_state, act, rew, done, info, next_obs, next_h_state):
self._store_to_buffer(curr_obs, curr_h_state, act, rew, done, info, next_obs, next_h_state)
self.step += 1
update_info = {}
if hasattr(self.model, c.OBS_RMS):
self.model.obs_rms.update(self.eval_preprocessing(torch.tensor(curr_obs)))
# Perform SAC update
if self.step >= self._buffer_warmup and self.step % self._steps_between_update == 0:
update_info[c.PI_LOSS] = []
update_info[c.Q1_LOSS] = []
update_info[c.Q2_LOSS] = []
update_info[c.Q1_REG] = []
update_info[c.Q2_REG] = []
update_info[c.SAMPLE_TIME] = []
update_info[c.Q_UPDATE_TIME] = []
update_info[c.POLICY_UPDATE_TIME] = []
update_info[c.AVG_Q1_VAL] = []
update_info[c.AVG_Q2_VAL] = []
update_info[c.AVG_Q_DISCREPANCY] = []
update_info[c.LPROB_MAX] = []
update_info[c.LPROB_MIN] = []
update_info[c.Q1_MAX] = []
update_info[c.Q2_MAX] = []
for _ in range(self._num_gradient_updates // self._num_prefetch):
tic = timeit.default_timer()
obss, h_states, acts, rews, dones, next_obss, next_h_states, infos, lengths = self.buffer.sample_with_next_obs(
self._batch_size * self._num_prefetch, next_obs, next_h_state)
obss = self.train_preprocessing(obss)
next_obss = self.train_preprocessing(next_obss)
rews = rews * self._reward_scaling
discounting = infos[c.DISCOUNTING]
update_info[c.SAMPLE_TIME].append(timeit.default_timer() - tic)
for batch_i in range(self._num_prefetch):
self._update_num += 1
batch_start_idx = batch_i * self._batch_size
# Update Q functions
# Auxiliary tasks are usually for shared layers, which is updated along with Q
aux_loss, aux_update_info = self._aux_tasks.compute_loss(next_obs, next_h_state)
if hasattr(aux_loss, c.BACKWARD):
aux_loss.backward()
self.update_qs(batch_start_idx,
obss,
h_states,
acts,
rews,
dones,
next_obss,
next_h_states,
discounting,
infos,
lengths,
update_info)
self._aux_tasks.step()
update_info.update(aux_update_info)
# Update policy
self.update_policy(batch_start_idx,
obss,
h_states,
acts,
rews,
dones,
next_obss,
next_h_states,
discounting,
infos,
lengths,
update_info)
if hasattr(self.model, c.VALUE_RMS):
update_info[f"{c.VALUE_RMS}/{c.MEAN}"] = self.model.value_rms.mean.numpy()
update_info[f"{c.VALUE_RMS}/{c.VARIANCE}"] = self.model.value_rms.var.numpy()
return True, update_info
return False, update_info
| [
"rl_sandbox.auxiliary_tasks.auxiliary_tasks.AuxiliaryTask",
"numpy.mean",
"timeit.default_timer",
"torch.nn.utils.clip_grad_norm_",
"torch.max",
"torch.tensor",
"rl_sandbox.algorithms.cem.cem.CEMQ",
"torch.no_grad",
"torch.clamp",
"torch.cat",
"torch.device"
] | [((390, 405), 'rl_sandbox.auxiliary_tasks.auxiliary_tasks.AuxiliaryTask', 'AuxiliaryTask', ([], {}), '()\n', (403, 405), False, 'from rl_sandbox.auxiliary_tasks.auxiliary_tasks import AuxiliaryTask\n'), ((3331, 3686), 'rl_sandbox.algorithms.cem.cem.CEMQ', 'CEMQ', ([], {'cov_noise_init': 'self._cov_noise_init', 'cov_noise_end': 'self._cov_noise_end', 'cov_noise_tau': 'self._cov_noise_tau', 'action_dim': 'self.action_dim', 'batch_size': 'self._num_samples_per_accum', 'num_iters': 'self._num_iters', 'pop_size': 'self._pop_size', 'elite_size': 'self._elite_size', 'device': 'self.device', 'min_action': 'self._min_action', 'max_action': 'self._max_action'}), '(cov_noise_init=self._cov_noise_init, cov_noise_end=self._cov_noise_end,\n cov_noise_tau=self._cov_noise_tau, action_dim=self.action_dim,\n batch_size=self._num_samples_per_accum, num_iters=self._num_iters,\n pop_size=self._pop_size, elite_size=self._elite_size, device=self.\n device, min_action=self._min_action, max_action=self._max_action)\n', (3335, 3686), False, 'from rl_sandbox.algorithms.cem.cem import CEMQ\n'), ((11230, 11262), 'torch.cat', 'torch.cat', (['best_next_acts'], {'dim': '(0)'}), '(best_next_acts, dim=0)\n', (11239, 11262), False, 'import torch\n'), ((11281, 11306), 'torch.cat', 'torch.cat', (['targets'], {'dim': '(0)'}), '(targets, dim=0)\n', (11290, 11306), False, 'import torch\n'), ((11326, 11352), 'torch.cat', 'torch.cat', (['q1_bests'], {'dim': '(0)'}), '(q1_bests, dim=0)\n', (11335, 11352), False, 'import torch\n'), ((11372, 11398), 'torch.cat', 'torch.cat', (['q2_bests'], {'dim': '(0)'}), '(q2_bests, dim=0)\n', (11381, 11398), False, 'import torch\n'), ((14523, 14545), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (14543, 14545), False, 'import timeit\n'), ((15397, 15472), 'torch.nn.utils.clip_grad_norm_', 'nn.utils.clip_grad_norm_', (['self.model.policy_parameters', 'self._max_grad_norm'], {}), '(self.model.policy_parameters, self._max_grad_norm)\n', (15421, 15472), True, 'import torch.nn as nn\n'), ((1846, 1865), 'torch.device', 'torch.device', (['c.CPU'], {}), '(c.CPU)\n', (1858, 1865), False, 'import torch\n'), ((6180, 6195), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6193, 6195), False, 'import torch\n'), ((6710, 6785), 'torch.clamp', 'torch.clamp', (['next_acts_pi'], {'min': 'self._min_action[0]', 'max': 'self._max_action[0]'}), '(next_acts_pi, min=self._min_action[0], max=self._max_action[0])\n', (6721, 6785), False, 'import torch\n'), ((7501, 7543), 'torch.max', 'torch.max', (['min_q_targs_pi', 'min_q_targs_cem'], {}), '(min_q_targs_pi, min_q_targs_cem)\n', (7510, 7543), False, 'import torch\n'), ((8721, 8736), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8734, 8736), False, 'import torch\n'), ((11590, 11612), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (11610, 11612), False, 'import timeit\n'), ((13262, 13333), 'torch.nn.utils.clip_grad_norm_', 'nn.utils.clip_grad_norm_', (['self.model.qs_parameters', 'self._max_grad_norm'], {}), '(self.model.qs_parameters, self._max_grad_norm)\n', (13286, 13333), True, 'import torch.nn as nn\n'), ((14087, 14105), 'numpy.mean', 'np.mean', (['q1_losses'], {}), '(q1_losses)\n', (14094, 14105), True, 'import numpy as np\n'), ((14145, 14163), 'numpy.mean', 'np.mean', (['q2_losses'], {}), '(q2_losses)\n', (14152, 14163), True, 'import numpy as np\n'), ((14202, 14218), 'numpy.mean', 'np.mean', (['q1_regs'], {}), '(q1_regs)\n', (14209, 14218), True, 'import numpy as np\n'), ((14257, 14273), 'numpy.mean', 'np.mean', (['q2_regs'], {}), '(q2_regs)\n', (14264, 14273), True, 'import numpy as np\n'), ((13434, 13456), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (13454, 13456), False, 'import timeit\n'), ((15688, 15710), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (15708, 15710), False, 'import timeit\n'), ((17235, 17257), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (17255, 17257), False, 'import timeit\n'), ((16349, 16371), 'torch.tensor', 'torch.tensor', (['curr_obs'], {}), '(curr_obs)\n', (16361, 16371), False, 'import torch\n'), ((17756, 17778), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (17776, 17778), False, 'import timeit\n')] |
"""
Example: basic integration
Basic example using the vegas_wrapper helper
"""
from vegasflow.configflow import DTYPE
import time
import numpy as np
import tensorflow as tf
from vegasflow.vflow import vegas_wrapper
from vegasflow.plain import plain_wrapper
# MC integration setup
dim = 4
ncalls = np.int32(1e5)
n_iter = 5
@tf.function
def symgauss(xarr, **kwargs):
"""symgauss test function"""
n_dim = xarr.shape[-1]
a = tf.constant(0.1, dtype=DTYPE)
n100 = tf.cast(100 * n_dim, dtype=DTYPE)
pref = tf.pow(1.0 / a / np.sqrt(np.pi), n_dim)
coef = tf.reduce_sum(tf.range(n100 + 1))
coef += tf.reduce_sum(tf.square((xarr - 1.0 / 2.0) / a), axis=1)
coef -= (n100 + 1) * n100 / 2.0
return pref * tf.exp(-coef)
if __name__ == "__main__":
"""Testing several different integrations"""
print(f"VEGAS MC, ncalls={ncalls}:")
start = time.time()
ncalls = 10*ncalls
r = vegas_wrapper(symgauss, dim, n_iter, ncalls)
end = time.time()
print(f"Vegas took: time (s): {end-start}")
# print(f"Plain MC, ncalls={ncalls}:")
# start = time.time()
# r = plain_wrapper(symgauss, dim, n_iter, ncalls)
# end = time.time()
# print(f"Plain took: time (s): {end-start}")
| [
"numpy.sqrt",
"vegasflow.vflow.vegas_wrapper",
"numpy.int32",
"tensorflow.range",
"tensorflow.constant",
"tensorflow.square",
"tensorflow.cast",
"time.time",
"tensorflow.exp"
] | [((311, 329), 'numpy.int32', 'np.int32', (['(100000.0)'], {}), '(100000.0)\n', (319, 329), True, 'import numpy as np\n'), ((449, 478), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'dtype': 'DTYPE'}), '(0.1, dtype=DTYPE)\n', (460, 478), True, 'import tensorflow as tf\n'), ((490, 523), 'tensorflow.cast', 'tf.cast', (['(100 * n_dim)'], {'dtype': 'DTYPE'}), '(100 * n_dim, dtype=DTYPE)\n', (497, 523), True, 'import tensorflow as tf\n'), ((888, 899), 'time.time', 'time.time', ([], {}), '()\n', (897, 899), False, 'import time\n'), ((931, 975), 'vegasflow.vflow.vegas_wrapper', 'vegas_wrapper', (['symgauss', 'dim', 'n_iter', 'ncalls'], {}), '(symgauss, dim, n_iter, ncalls)\n', (944, 975), False, 'from vegasflow.vflow import vegas_wrapper\n'), ((986, 997), 'time.time', 'time.time', ([], {}), '()\n', (995, 997), False, 'import time\n'), ((600, 618), 'tensorflow.range', 'tf.range', (['(n100 + 1)'], {}), '(n100 + 1)\n', (608, 618), True, 'import tensorflow as tf\n'), ((646, 679), 'tensorflow.square', 'tf.square', (['((xarr - 1.0 / 2.0) / a)'], {}), '((xarr - 1.0 / 2.0) / a)\n', (655, 679), True, 'import tensorflow as tf\n'), ((743, 756), 'tensorflow.exp', 'tf.exp', (['(-coef)'], {}), '(-coef)\n', (749, 756), True, 'import tensorflow as tf\n'), ((552, 566), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (559, 566), True, 'import numpy as np\n')] |
import os
import numpy as np
from interface.camera_calibration import ModelImage
dir_path = os.path.dirname(os.path.realpath(__file__))
class RobertSquashCourtImage(ModelImage):
def __init__(self):
self.K = np.matrix([[-524.79644775, 0., 293.28320922],
[ 0., -523.37878418, 226.37976338],
[ 0., 0., 1. ]])
#Currently not used
self.distortion_coeff = None
self.lines = None
self.lines_img_ga = None
self.img_name = os.path.join(dir_path, "squash_robert_left.jpg")
def set_lines(self):
left_wall_down = np.array([162, 309, 1])
right_wall_down = np.array([473, 294.5, 1])
left_wall_back_down = np.array([108, 500, 1])
right_wall_back_down = np.array([640, 434, 1])
left_wall_back_up = np.array([0, 273, 1])
right_wall_back_up = np.array([640, 136, 1])
front_wall_top_line_left = np.array([145, 66, 1])
front_wall_top_line_right = np.array([492, 66, 1])
front_wall_middle_line_left = np.array([155.5, 220, 1])
front_wall_middle_line_right = np.array([480, 210, 1])
front_wall_bot_line_left = np.array([161, 286, 1])
front_wall_bot_line_right = np.array([475.5, 273, 1])
# Define lines
tint_line = (front_wall_bot_line_left, front_wall_bot_line_right)
service_line = (front_wall_middle_line_left, front_wall_middle_line_right)
front_out_line = (front_wall_top_line_left, front_wall_top_line_right)
side_out_line_left = (front_wall_top_line_left, left_wall_back_up)
side_out_line_right = (front_wall_top_line_right, right_wall_back_up)
floor_line_front = (left_wall_down, right_wall_down)
floor_line_left = (left_wall_down, left_wall_back_down)
floor_line_right = (right_wall_down, right_wall_back_down)
vertical_line_left = (left_wall_down, front_wall_top_line_left)
vertical_line_right = (right_wall_down, front_wall_top_line_right)
court_lines = {"tint_line": tint_line,
"service_line": service_line,
"front_out_line": front_out_line,
"side_out_line_left": side_out_line_left,
"side_out_line_right": side_out_line_right}
floor_lines = { "floor_line_front": floor_line_front,
"floor_line_right": floor_line_right,
"floor_line_left": floor_line_left}
vertical_lines = {"vertical_line_left": vertical_line_left,
"vertical_line_right": vertical_line_right}
self.lines = {**court_lines, **floor_lines, **vertical_lines}
return self.lines | [
"os.path.realpath",
"numpy.array",
"numpy.matrix",
"os.path.join"
] | [((111, 137), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (127, 137), False, 'import os\n'), ((223, 328), 'numpy.matrix', 'np.matrix', (['[[-524.79644775, 0.0, 293.28320922], [0.0, -523.37878418, 226.37976338], [\n 0.0, 0.0, 1.0]]'], {}), '([[-524.79644775, 0.0, 293.28320922], [0.0, -523.37878418, \n 226.37976338], [0.0, 0.0, 1.0]])\n', (232, 328), True, 'import numpy as np\n'), ((574, 622), 'os.path.join', 'os.path.join', (['dir_path', '"""squash_robert_left.jpg"""'], {}), "(dir_path, 'squash_robert_left.jpg')\n", (586, 622), False, 'import os\n'), ((674, 697), 'numpy.array', 'np.array', (['[162, 309, 1]'], {}), '([162, 309, 1])\n', (682, 697), True, 'import numpy as np\n'), ((724, 749), 'numpy.array', 'np.array', (['[473, 294.5, 1]'], {}), '([473, 294.5, 1])\n', (732, 749), True, 'import numpy as np\n'), ((798, 821), 'numpy.array', 'np.array', (['[108, 500, 1]'], {}), '([108, 500, 1])\n', (806, 821), True, 'import numpy as np\n'), ((854, 877), 'numpy.array', 'np.array', (['[640, 434, 1]'], {}), '([640, 434, 1])\n', (862, 877), True, 'import numpy as np\n'), ((918, 939), 'numpy.array', 'np.array', (['[0, 273, 1]'], {}), '([0, 273, 1])\n', (926, 939), True, 'import numpy as np\n'), ((971, 994), 'numpy.array', 'np.array', (['[640, 136, 1]'], {}), '([640, 136, 1])\n', (979, 994), True, 'import numpy as np\n'), ((1039, 1061), 'numpy.array', 'np.array', (['[145, 66, 1]'], {}), '([145, 66, 1])\n', (1047, 1061), True, 'import numpy as np\n'), ((1098, 1120), 'numpy.array', 'np.array', (['[492, 66, 1]'], {}), '([492, 66, 1])\n', (1106, 1120), True, 'import numpy as np\n'), ((1168, 1193), 'numpy.array', 'np.array', (['[155.5, 220, 1]'], {}), '([155.5, 220, 1])\n', (1176, 1193), True, 'import numpy as np\n'), ((1233, 1256), 'numpy.array', 'np.array', (['[480, 210, 1]'], {}), '([480, 210, 1])\n', (1241, 1256), True, 'import numpy as np\n'), ((1301, 1324), 'numpy.array', 'np.array', (['[161, 286, 1]'], {}), '([161, 286, 1])\n', (1309, 1324), True, 'import numpy as np\n'), ((1361, 1386), 'numpy.array', 'np.array', (['[475.5, 273, 1]'], {}), '([475.5, 273, 1])\n', (1369, 1386), True, 'import numpy as np\n')] |
"""Code to generate full mock LCs."""
import numpy as np
import kali
import kali.carma
import pandas as pd
import sys
from joblib import Parallel, delayed
sys.path.insert(0, '/home/mount/lsst_cadence')
from lsstlc import * # derived LSST lightcurve sub-class
def genLC(params, save_dir):
"""Generating simulated light curves with input.
Args:
params: A pandas series containing tau, sigma and noise level
save_dir(str): Where to store the simulated LCs
"""
Task = kali.carma.CARMATask(2, 1)
noise = float(params['noise'])
Theta = np.array([params['a1'], params['a2'], params['b0'], params['b1']])
# print(Theta)
# check whether the coefficients are valid before simulation
if (Task.check(Theta) is False):
return 0
dt = 30.0/86400
Task.set(dt, Theta)
lc = Task.simulate(duration=3653)
lc.fracNoiseToSignal = noise
Task.observe(lc)
# now save to file
fname = lc2file(save_dir, lc, full=True, timescales=[params['a1'], params['b1']/params['b0']])
return fname
if __name__ == '__main__':
idf = pd.read_csv(sys.argv[1])
lc_dir = sys.argv[2]
result = Parallel(n_jobs=-1)(delayed(genLC)(idf.loc[i], lc_dir) for i in idf.index)
idf['fname'] = result
# save lc fname back to input csv
idf.to_csv(sys.argv[1], index=False)
# np.save('lc_log', result)
| [
"sys.path.insert",
"pandas.read_csv",
"joblib.Parallel",
"kali.carma.CARMATask",
"numpy.array",
"joblib.delayed"
] | [((155, 201), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/mount/lsst_cadence"""'], {}), "(0, '/home/mount/lsst_cadence')\n", (170, 201), False, 'import sys\n'), ((501, 527), 'kali.carma.CARMATask', 'kali.carma.CARMATask', (['(2)', '(1)'], {}), '(2, 1)\n', (521, 527), False, 'import kali\n'), ((575, 641), 'numpy.array', 'np.array', (["[params['a1'], params['a2'], params['b0'], params['b1']]"], {}), "([params['a1'], params['a2'], params['b0'], params['b1']])\n", (583, 641), True, 'import numpy as np\n'), ((1096, 1120), 'pandas.read_csv', 'pd.read_csv', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (1107, 1120), True, 'import pandas as pd\n'), ((1160, 1179), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (1168, 1179), False, 'from joblib import Parallel, delayed\n'), ((1180, 1194), 'joblib.delayed', 'delayed', (['genLC'], {}), '(genLC)\n', (1187, 1194), False, 'from joblib import Parallel, delayed\n')] |
from pydantic import BaseModel
from icolos.core.workflow_steps.schrodinger.base import StepSchrodingerBase
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import shortest_path
from icolos.utils.enums.step_enums import StepFepPlusEnum
from typing import List
import time
import os
from icolos.core.workflow_steps.step import _LE
_SFE = StepFepPlusEnum()
class StepFEPBase(StepSchrodingerBase, BaseModel):
"""
Base class containing common functionality for Schrodinger FEP+ workflows
"""
def __init__(self, **data):
super().__init__(**data)
def _parse_output(self, tmp_dir):
# pick up the final annotated map construction
self.data.generic.clear_file_dict()
self._logger.log(f"Reading output map.", _LE.INFO)
data = None
counts = 0
# hold whilst the job data gets written to local fs
while data is None and counts < 50000:
try:
path = [
file
for file in os.listdir(tmp_dir)
if file.endswith(_SFE.FMP_OUTPUT_FILE)
]
assert len(path) == 1
path = path[0]
with open(os.path.join(tmp_dir, path), "rb") as f:
data = f.read()
except AssertionError:
self._logger.log(
"Output file has not yet appeared in the file system, sleeping and retrying...",
_LE.INFO,
)
time.sleep(15)
counts += 1
self._add_data_to_generic(path, data)
def _extract_log_file_data(self, tmp_dir):
"""
Parses FEP log file to extract edge and node properties
"""
lines = None
counts = 0
# wait whilst job sits in the queue
while lines is None and counts < 50000:
try:
log_file = [
file for file in os.listdir(tmp_dir) if file.endswith(_SFE.LOGFILE)
]
assert len(log_file) == 1
log_file = log_file[0]
with open(os.path.join(tmp_dir, log_file), "r") as f:
lines = f.readlines()
edge_header_index = [
idx for idx, s in enumerate(lines) if _SFE.EDGE_HEADER_LINE in s
][-1]
node_header_index = [
idx for idx, s in enumerate(lines) if _SFE.NODE_HEADER_LINE in s
][-1]
end_of_data_index = [
idx for idx, s in enumerate(lines) if _SFE.DATA_TERMINUS in s
][0]
edge_data_lines = [
line
for line in lines[edge_header_index + 3 : node_header_index - 1]
]
node_data_lines = [
line
for line in lines[node_header_index + 3 : end_of_data_index - 1]
]
self._process_edge_lines(edge_data_lines)
self._process_node_lines(node_data_lines)
except AssertionError:
self._logger.log(
"Log file has not yet appeared in the file system, sleeping and retrying...",
_LE.INFO,
)
time.sleep(15)
counts += 1
def _process_node_lines(self, data: List[str]) -> None:
for entry in data:
fields = entry.split()
idx = fields[1]
dG = fields[2]
# attach dG tags to compound objects if present
if self.data.compounds:
# account for running this step compoundless
self.data.compounds[int(idx[0])].get_enumerations()[0].get_conformers()[
0
].get_molecule().SetProp("dG", str(dG))
self._logger.log(
f"dG directly from the output file for compound {idx} is {dG} ",
_LE.INFO,
)
def _process_edge_lines(self, edge_data: List[str]) -> None:
"""
Calibrate dG values using a reference compound and edge ddG from log file output, return dG for each compound
"""
# caluclate the max ligand index, accounting for ligands that may have been skipped in previous steps, so can't rely on self.get_compounds()
len_nodes = 0
for line in edge_data:
parts = line.split()
lig_from = int(parts[1].split(":")[0])
lig_to = int(parts[3].split(":")[0])
for idx in [lig_from, lig_to]:
if idx > len_nodes:
len_nodes = idx
len_nodes += 1 # account for zero indexed ligands
error_matrix = np.zeros((len_nodes, len_nodes))
ddG_matrix = np.zeros((len_nodes, len_nodes))
for line in edge_data:
parts = line.split()
try:
# parse the compound info from the log file
lig_from = int(parts[1].split(":")[0])
lig_to = int(parts[3].split(":")[0])
ddG = float(parts[4].split("+-")[0])
err = float(parts[4].split("+-")[1])
except ValueError:
self._logger.log(
f"Line: {line} from the logfile contained an unexpected datatype - cannot process this edge - skipping",
_LE.WARNING,
)
continue
error_matrix[lig_from, lig_to] = err
error_matrix[lig_to, lig_from] = err
ddG_matrix[lig_from, lig_to] = ddG
ddG_matrix[lig_to, lig_from] = -ddG
error_matrix = csr_matrix(error_matrix)
# compute shortest path from one ligand to the anchor
_, predecessors = shortest_path(
error_matrix, directed=False, return_predecessors=True, indices=0
)
self._construct_dg_per_compound(ddG_matrix, predecessors, error_matrix)
def _construct_dg_per_compound(
self, ddG: np.ndarray, predecessors: List, error_matrix: np.ndarray
) -> None:
"""
Calculate the calibrated binding free energy per compound using a reference value
Attach calcualted dG to compounds
"""
try:
ref_dG = self.settings.additional[_SFE.REFERENCE_DG]
except KeyError:
self._logger.log(
"Expected to find a reference dG value for the lead compound, but none was found."
"Defaulting to 0.00, you will need to apply a manual correction afterwards",
_LE.WARNING,
)
ref_dG = 0.00
def _calculate_dg(comp_num: int, dG=ref_dG, err=0):
prev_index = predecessors[comp_num]
dG += ddG[prev_index, comp_num]
err += error_matrix[prev_index, comp_num]
if prev_index != 0:
_calculate_dg(prev_index, dG=dG, err=err)
else:
data = str(round(dG, 2)) + "+-" + str(round(err, 2))
self.data.compounds[idx].get_enumerations()[0].get_conformers()[
0
].get_molecule().SetProp("map_dG", data)
self._logger.log(
f"Calculated dG from spanning tree for compound {idx} is {data}",
_LE.INFO,
)
for comp in self.get_compounds():
idx = comp.get_compound_number()
# check whether the compound appeared in the final map
try:
if idx == 0:
comp.get_enumerations()[0].get_conformers()[
0
].get_molecule().SetProp(
"map_dG", str(self.settings.additional[_SFE.REFERENCE_DG])
)
if idx != 0: # skip the reference compound
_calculate_dg(idx)
except IndexError:
self._logger.log(
f"Compound {idx} was not found in the output map, it was likely dropped during the workflow",
_LE.WARNING,
)
continue
| [
"os.listdir",
"scipy.sparse.csgraph.shortest_path",
"os.path.join",
"time.sleep",
"icolos.utils.enums.step_enums.StepFepPlusEnum",
"numpy.zeros",
"scipy.sparse.csr_matrix"
] | [((369, 386), 'icolos.utils.enums.step_enums.StepFepPlusEnum', 'StepFepPlusEnum', ([], {}), '()\n', (384, 386), False, 'from icolos.utils.enums.step_enums import StepFepPlusEnum\n'), ((4798, 4830), 'numpy.zeros', 'np.zeros', (['(len_nodes, len_nodes)'], {}), '((len_nodes, len_nodes))\n', (4806, 4830), True, 'import numpy as np\n'), ((4852, 4884), 'numpy.zeros', 'np.zeros', (['(len_nodes, len_nodes)'], {}), '((len_nodes, len_nodes))\n', (4860, 4884), True, 'import numpy as np\n'), ((5723, 5747), 'scipy.sparse.csr_matrix', 'csr_matrix', (['error_matrix'], {}), '(error_matrix)\n', (5733, 5747), False, 'from scipy.sparse import csr_matrix\n'), ((5836, 5921), 'scipy.sparse.csgraph.shortest_path', 'shortest_path', (['error_matrix'], {'directed': '(False)', 'return_predecessors': '(True)', 'indices': '(0)'}), '(error_matrix, directed=False, return_predecessors=True, indices=0\n )\n', (5849, 5921), False, 'from scipy.sparse.csgraph import shortest_path\n'), ((1545, 1559), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (1555, 1559), False, 'import time\n'), ((3359, 3373), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (3369, 3373), False, 'import time\n'), ((1042, 1061), 'os.listdir', 'os.listdir', (['tmp_dir'], {}), '(tmp_dir)\n', (1052, 1061), False, 'import os\n'), ((1234, 1261), 'os.path.join', 'os.path.join', (['tmp_dir', 'path'], {}), '(tmp_dir, path)\n', (1246, 1261), False, 'import os\n'), ((1986, 2005), 'os.listdir', 'os.listdir', (['tmp_dir'], {}), '(tmp_dir)\n', (1996, 2005), False, 'import os\n'), ((2163, 2194), 'os.path.join', 'os.path.join', (['tmp_dir', 'log_file'], {}), '(tmp_dir, log_file)\n', (2175, 2194), False, 'import os\n')] |
from scipy.linalg import toeplitz
import numpy as np
from cooltools.lib.numutils import LazyToeplitz
n = 100
m = 150
c = np.arange(1, n + 1)
r = np.r_[1, np.arange(-2, -m, -1)]
L = LazyToeplitz(c, r)
T = toeplitz(c, r)
def test_symmetric():
for si in [
slice(10, 20),
slice(0, 150),
slice(0, 0),
slice(150, 150),
slice(10, 10),
]:
assert np.allclose(L[si, si], T[si, si])
def test_triu_no_overlap():
for si, sj in [
(slice(10, 20), slice(30, 40)),
(slice(10, 15), slice(30, 40)),
(slice(10, 20), slice(30, 45)),
]:
assert np.allclose(L[si, sj], T[si, sj])
def test_tril_no_overlap():
for si, sj in [
(slice(30, 40), slice(10, 20)),
(slice(30, 40), slice(10, 15)),
(slice(30, 45), slice(10, 20)),
]:
assert np.allclose(L[si, sj], T[si, sj])
def test_triu_with_overlap():
for si, sj in [
(slice(10, 20), slice(15, 25)),
(slice(13, 22), slice(15, 25)),
(slice(10, 20), slice(18, 22)),
]:
assert np.allclose(L[si, sj], T[si, sj])
def test_tril_with_overlap():
for si, sj in [
(slice(15, 25), slice(10, 20)),
(slice(15, 22), slice(10, 20)),
(slice(15, 25), slice(10, 18)),
]:
assert np.allclose(L[si, sj], T[si, sj])
def test_nested():
for si, sj in [
(slice(10, 40), slice(20, 30)),
(slice(10, 35), slice(20, 30)),
(slice(10, 40), slice(20, 25)),
(slice(20, 30), slice(10, 40)),
]:
assert np.allclose(L[si, sj], T[si, sj])
| [
"cooltools.lib.numutils.LazyToeplitz",
"numpy.allclose",
"scipy.linalg.toeplitz",
"numpy.arange"
] | [((123, 142), 'numpy.arange', 'np.arange', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (132, 142), True, 'import numpy as np\n'), ((184, 202), 'cooltools.lib.numutils.LazyToeplitz', 'LazyToeplitz', (['c', 'r'], {}), '(c, r)\n', (196, 202), False, 'from cooltools.lib.numutils import LazyToeplitz\n'), ((207, 221), 'scipy.linalg.toeplitz', 'toeplitz', (['c', 'r'], {}), '(c, r)\n', (215, 221), False, 'from scipy.linalg import toeplitz\n'), ((156, 177), 'numpy.arange', 'np.arange', (['(-2)', '(-m)', '(-1)'], {}), '(-2, -m, -1)\n', (165, 177), True, 'import numpy as np\n'), ((399, 432), 'numpy.allclose', 'np.allclose', (['L[si, si]', 'T[si, si]'], {}), '(L[si, si], T[si, si])\n', (410, 432), True, 'import numpy as np\n'), ((625, 658), 'numpy.allclose', 'np.allclose', (['L[si, sj]', 'T[si, sj]'], {}), '(L[si, sj], T[si, sj])\n', (636, 658), True, 'import numpy as np\n'), ((851, 884), 'numpy.allclose', 'np.allclose', (['L[si, sj]', 'T[si, sj]'], {}), '(L[si, sj], T[si, sj])\n', (862, 884), True, 'import numpy as np\n'), ((1079, 1112), 'numpy.allclose', 'np.allclose', (['L[si, sj]', 'T[si, sj]'], {}), '(L[si, sj], T[si, sj])\n', (1090, 1112), True, 'import numpy as np\n'), ((1307, 1340), 'numpy.allclose', 'np.allclose', (['L[si, sj]', 'T[si, sj]'], {}), '(L[si, sj], T[si, sj])\n', (1318, 1340), True, 'import numpy as np\n'), ((1564, 1597), 'numpy.allclose', 'np.allclose', (['L[si, sj]', 'T[si, sj]'], {}), '(L[si, sj], T[si, sj])\n', (1575, 1597), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
from math import pi
### falling_sphere.py
###
### Script to calculate the finite differences
### solution for the falling sphere in a viscous fluid
### (Stokes' law).
###
### Compared against analytical solution.
#####################
#### Parameters: ####
#####################
rho_f = 1000 # density of the fluid (kg m^-3)
rho_s = 7750 # density of the sphere (kg m^-3)
R = 0.02 # radius of the sphere (m)
eta = 100 # viscosity of the fluid (Pa s)
nt = 5 # how many timesteps to calculate (-)
z_ini = 0.0 # initial location of the sphere (m)
dt = 0.0020 # size of timestep used (s)
g = 9.81 # acceleration of gravity (m s^-2)
#####################
# Initialize an array to hold the results (sphere elevation)
# The first values is the initial elevation, rest are calculated
z = np.zeros(nt)
z[0] = z_ini
# generate array of times in seconds
time = np.zeros(nt)
time[0] = 0
# calculate sphere volume
V = (4/3)*pi*R**3
# calculate buoyancy
Fb = V*rho_f*g
# calculate sphere mass
m = V*rho_s
# calculate gravitational force
Fg = -m*g
# to shorten the expressions
A = -6 * pi * eta * R
# Loop over every time step, always calculating the new elevation
# based on the two previous elevation values.
# Skip the first value which is directly given by the initial condition.
for it in range(1, nt):
if it == 1:
# z_{-1} is replaced by z_{1} in the discretized
# equation since we don't know value for z_{-1}
# This is the zero velocity boundary (initial) condition
z[it] = (Fb + Fg - z[it-1] * (-2*m/dt**2) - z[it] * (m/dt**2 + A/(2*dt))) / (m/dt**2 - A/(2*dt))
# ^^^^^
else:
# At timesteps it>1 we do know two previous values,
# z[it-1] and z[it-2] so we use the normal discretized equation
# to calculate the next value z[it+1]
z[it] = (Fb + Fg - z[it-1] * (-2*m/dt**2) - z[it-2] * (m/dt**2 + A/(2*dt))) / (m/dt**2 - A/(2*dt))
# Calculate the time in seconds at this timestep
time[it] = time[it-1] + dt
### Calculate analytical solution
# See course material for the derivation.
# Calculate constants using initial conditions
# z = z_ini at t = 0
# v = dz/dt = 0 at t = 0
# Initialize arrays to hold the time values
# for the analytical solution. We calculate the analytical
# solution at 200 points for plotting.
t_analytical = np.linspace(0, (nt-1)*dt, 200)
# Coefficients of the differential equation
B1 = 6*pi*eta*R/m
B2 = (Fb+Fg)/m
# Here we can calculate all the points at once using NumPy's
# element-by-element array multiplication
z_analytical = (B2/B1)*(t_analytical + (1/B1)*np.exp(-B1*t_analytical) + (B1*z_ini/B2) - 1/B1)
# Create the plots for numerical and analytical solutions
fig, ax = plt.subplots()
ax.plot(time*1000, 1000*z, '.--')
ax.plot(t_analytical*1000, 1000*z_analytical, '-')
plt.xlabel("Time (ms)")
plt.ylabel("z (mm)")
plt.show()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.exp",
"numpy.zeros",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((892, 904), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (900, 904), True, 'import numpy as np\n'), ((963, 975), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (971, 975), True, 'import numpy as np\n'), ((2416, 2450), 'numpy.linspace', 'np.linspace', (['(0)', '((nt - 1) * dt)', '(200)'], {}), '(0, (nt - 1) * dt, 200)\n', (2427, 2450), True, 'import numpy as np\n'), ((2796, 2810), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2808, 2810), True, 'import matplotlib.pyplot as plt\n'), ((2897, 2920), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (ms)"""'], {}), "('Time (ms)')\n", (2907, 2920), True, 'import matplotlib.pyplot as plt\n'), ((2921, 2941), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""z (mm)"""'], {}), "('z (mm)')\n", (2931, 2941), True, 'import matplotlib.pyplot as plt\n'), ((2942, 2952), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2950, 2952), True, 'import matplotlib.pyplot as plt\n'), ((2677, 2703), 'numpy.exp', 'np.exp', (['(-B1 * t_analytical)'], {}), '(-B1 * t_analytical)\n', (2683, 2703), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import json
import math
import numpy as np
import re
import sys
from terminaltables import AsciiTable
from termcolor import colored
from scipy.stats import ttest_ind
p_value_significance_threshold = 0.001
min_iterations = 10
min_runtime_ns = 59 * 1000 * 1000 * 1000
min_iterations_disabling_min_runtime = 100
max_lost = 0
max_gain = 0
num_lost = 0
num_gain = 0
# threshold for latency gain/loss to be considered (>=)
confidence_threshold = 5
def format_diff(diff):
diff -= 1 # adapt to show change in percent
if diff < 0.0:
return f"{diff:.0%}"
else:
return f"+{diff:.0%}"
def color_diff(diff, inverse_colors=False):
def select_color(value, color):
return color if round(abs(value - 1), 2) >= 0.05 else "white"
diff_str = format_diff(diff)
color = "green" if (diff_str[0] == "+") != (inverse_colors) else "red"
return colored(format_diff(diff), select_color(diff, color))
def geometric_mean(values):
product = 1
for value in values:
product *= value
return product ** (1 / float(len(values)))
def calculate_and_format_p_value(old_durations, new_durations):
p_value = ttest_ind(old_durations, new_durations)[1]
is_significant = p_value < p_value_significance_threshold
old_runtime = sum(runtime for runtime in old_durations)
new_runtime = sum(runtime for runtime in new_durations)
# The results for a query are considered to be statistically not significant if the runtime is too short. However,
# if the item has been executed > `min_iterations_disabling_min_runtime` times, it is considered significant.
if (old_runtime < min_runtime_ns or new_runtime < min_runtime_ns) and (
len(old_durations) < min_iterations_disabling_min_runtime
or len(new_durations) < min_iterations_disabling_min_runtime
):
is_significant = False
return "(run time too short)"
elif len(old_durations) < min_iterations or len(new_durations) < min_iterations:
is_significant = False
# In case we cannot decide whether the change is significant due to an insufficient number of measurements, the
# add_note_for_insufficient_pvalue_runs flag it set, for which a note is later added to the table output.
global add_note_for_insufficient_pvalue_runs
add_note_for_insufficient_pvalue_runs = True
return colored("˅", "yellow", attrs=["bold"])
else:
if is_significant:
return colored(f"{p_value:.4f}", "white")
else:
return colored(f"{p_value:.4f}", "yellow", attrs=["bold"])
def create_context_overview(old_config, new_config, github_format):
ignore_difference_for = ["GIT-HASH", "date"]
old_context_keys = set(old_config["context"].keys())
new_context_keys = set(new_config["context"].keys())
common_context_keys = old_context_keys & new_context_keys
table_lines = [["Parameter", sys.argv[1], sys.argv[2]]]
for key in sorted(common_context_keys):
old_value = old_config["context"][key]
new_value = new_config["context"][key]
color = "white"
marker = " "
if old_value != new_value and key not in ignore_difference_for:
color = "red"
marker = "≠"
if key == "build_type" and (old_value == "debug" or new_value == "debug"):
# Always warn when non-release builds are benchmarked
color = "red"
marker = "!"
table_lines.append([colored(marker + key, color), old_value, new_value])
# Print keys that are not present in both contexts
for key in sorted(old_context_keys - common_context_keys):
value = old_config["context"][key]
table_lines.append([colored("≠" + key, "red"), value, "undefined"])
for key in sorted(new_context_keys - common_context_keys):
value = new_config["context"][key]
table_lines.append([colored("≠" + key, "red"), "undefined", value])
table = AsciiTable(table_lines)
table.title = "Configuration Overview"
table_output = str(table.table)
if github_format:
# For GitHub, the output is a fake diff, where a leading '-' marks a deletion and causes the line to be printed
# in red. We do that for all differing configuration lines. Other lines are prepended with ' '.
new_output = ""
for line in table_output.splitlines():
marker = "-" if ("≠" in line or "!" in line) else " "
new_output += f"{marker}{line}\n"
return new_output
return table_output
# Doubles the separators (can be '|' for normal rows and '+' for horizontal separators within the table) given by the
# list vertical_separators_to_duplicate. [0, 3] means that the first and fourth separator are doubled. Table contents
# must not contain '|' for this to work.
def double_vertical_separators(lines, vertical_separators_to_duplicate):
for line_id, line in enumerate(lines):
vertical_separator = line[0]
# positions might change due to color coding
pos_separators = [m.start() for m in re.finditer(re.escape(vertical_separator), line)]
# 0 required for splicing
pos_splits = [0] + [pos_separators[index] for index in vertical_separators_to_duplicate]
new_line = [line[i:j] for i, j in zip(pos_splits, pos_splits[1:] + [None])]
lines[line_id] = vertical_separator.join(new_line)
return lines
if not len(sys.argv) in [3, 4]:
exit("Usage: " + sys.argv[0] + " benchmark1.json benchmark2.json [--github]")
# Format the output as a diff (prepending - and +) so that Github shows colors
github_format = bool(len(sys.argv) == 4 and sys.argv[3] == "--github")
with open(sys.argv[1]) as old_file:
old_data = json.load(old_file)
with open(sys.argv[2]) as new_file:
new_data = json.load(new_file)
if old_data["context"]["benchmark_mode"] != new_data["context"]["benchmark_mode"]:
exit("Benchmark runs with different modes (ordered/shuffled) are not comparable")
if 'TPCDS' in sys.argv[1]:
for data in [old_data, new_data]:
new_b = []
for b in data['benchmarks']:
if b['name'] == '95':
continue
new_b.append(b)
data['benchmarks'] = new_b
diffs_throughput = []
total_runtime_old = 0
total_runtime_new = 0
add_note_for_capped_runs = False # Flag set when max query runs was set for benchmark run
add_note_for_insufficient_pvalue_runs = False # Flag set when runs was insufficient for p-value calculation
# Create table header:
# $latency and $thrghpt (abbreviated to keep the column at a max width of 8 chars) will later be replaced with a title
# spanning two columns
table_data = []
table_data.append(["Item", "$latency", "", "Change", "$thrghpt", "", "Change", "p-value"])
table_data.append(["", "old", "new", "", "old", "new", "", ""])
for old, new in zip(old_data["benchmarks"], new_data["benchmarks"]):
name = old["name"]
if old["name"] != new["name"]:
name += " -> " + new["name"]
# Create numpy arrays for old/new successful/unsuccessful runs from benchmark dictionary
old_successful_durations = np.array([run["duration"] for run in old["successful_runs"]], dtype=np.float64)
new_successful_durations = np.array([run["duration"] for run in new["successful_runs"]], dtype=np.float64)
old_unsuccessful_durations = np.array([run["duration"] for run in old["unsuccessful_runs"]], dtype=np.float64)
new_unsuccessful_durations = np.array([run["duration"] for run in new["unsuccessful_runs"]], dtype=np.float64)
old_avg_successful_duration = np.mean(old_successful_durations) # defaults to np.float64 for int input
new_avg_successful_duration = np.mean(new_successful_durations)
total_runtime_old += old_avg_successful_duration if not math.isnan(old_avg_successful_duration) else 0.0
total_runtime_new += new_avg_successful_duration if not math.isnan(new_avg_successful_duration) else 0.0
# Check for duration==0 to avoid div/0
if float(old_avg_successful_duration) > 0.0:
diff_duration = float(new_avg_successful_duration / old_avg_successful_duration)
change = int(format_diff(diff_duration).replace("%", ""))
max_lost = min(change, max_lost)
max_gain = max(change, max_gain)
if abs(change) >= confidence_threshold:
if change < 0:
num_lost += 1
elif change > 0:
num_gain += 1
else:
diff_duration = float("nan")
if float(old["items_per_second"]) > 0.0:
diff_throughput = float(new["items_per_second"]) / float(old["items_per_second"])
diffs_throughput.append(diff_throughput)
else:
diff_throughput = float("nan")
# Format the diffs (add colors and percentage output) and calculate p-value
diff_duration_formatted = color_diff(diff_duration, True)
diff_throughput_formatted = color_diff(diff_throughput)
p_value_formatted = calculate_and_format_p_value(old_successful_durations, new_successful_durations)
old_iteration_count = len(old_successful_durations) + len(old_unsuccessful_durations)
new_iteration_count = len(new_successful_durations) + len(new_unsuccessful_durations)
# Check if number of runs reached max_runs
if (old_data["context"]["max_runs"] > 0 or new_data["context"]["max_runs"] > 0) and (
old_iteration_count == old_data["context"]["max_runs"] or new_iteration_count == new_data["context"]["max_runs"]
):
note = colored("˄", "yellow", attrs=["bold"])
add_note_for_capped_runs = True
else:
note = " "
# Add table row for succesful executions. We use column widths of 7 (latency) and 8 (throughput) for printing to
# ensure that we have enough space to replace the latency/throughput marker with a column header spanning multiple
# columns.
table_data.append(
[
name,
f"{(old_avg_successful_duration / 1e6):>7.1f}" if old_avg_successful_duration else "nan",
f"{(new_avg_successful_duration / 1e6):>7.1f}" if new_avg_successful_duration else "nan",
diff_duration_formatted + note if not math.isnan(diff_duration) else "",
f'{old["items_per_second"]:>8.2f}',
f'{new["items_per_second"]:>8.2f}',
diff_throughput_formatted + note,
p_value_formatted,
]
)
if len(old["unsuccessful_runs"]) > 0 or len(new["unsuccessful_runs"]) > 0:
if old_data["context"]["benchmark_mode"] == "Ordered":
old_unsuccessful_per_second = float(len(old_unsuccessful_durations)) / (float(old["duration"]) / 1e9)
new_unsuccessful_per_second = float(len(new_unsuccessful_durations)) / (float(new["duration"]) / 1e9)
else:
old_unsuccessful_per_second = float(len(old_unsuccessful_durations)) / (
float(old_data["summary"]["total_duration"]) / 1e9
)
new_unsuccessful_per_second = float(len(new_unsuccessful_durations)) / (
float(new_data["summary"]["total_duration"]) / 1e9
)
old_avg_unsuccessful_duration = np.mean(old_unsuccessful_durations)
new_avg_unsuccessful_duration = np.mean(new_unsuccessful_durations)
if len(old_unsuccessful_durations) > 0 and len(new_unsuccessful_durations) > 0:
diff_throughput_unsuccessful = float(new_unsuccessful_per_second / old_unsuccessful_per_second)
diff_duration_unsuccessful = new_avg_unsuccessful_duration / old_avg_unsuccessful_duration
else:
diff_throughput_unsuccessful = float("nan")
diff_duration_unsuccessful = float("nan")
unsuccessful_info = [
" unsucc.:",
f"{(old_avg_unsuccessful_duration / 1e6):>7.1f}"
if not math.isnan(old_avg_unsuccessful_duration)
else "nan",
f"{(new_avg_unsuccessful_duration / 1e6):>7.1f}"
if not math.isnan(new_avg_unsuccessful_duration)
else "nan",
format_diff(diff_duration_unsuccessful) + " " if not math.isnan(diff_duration_unsuccessful) else " ",
f"{old_unsuccessful_per_second:>.2f}",
f"{new_unsuccessful_per_second:>.2f}",
format_diff(diff_throughput_unsuccessful) + " " if not math.isnan(diff_throughput_unsuccessful) else " ",
]
unsuccessful_info_colored = [colored(text, attrs=["dark"]) for text in unsuccessful_info]
table_data.append(unsuccessful_info_colored)
# Add a summary of all benchmark items to the final table, including (1) the change of the accumulated sum of all
# queries' average runtimes and (2) the geometric mean of the percentage changes.
table_data.append(
[
"Sum",
f"{(total_runtime_old / 1e6):>7.1f}",
f"{(total_runtime_new / 1e6):>7.1f}",
color_diff(total_runtime_new / total_runtime_old, True) + " ",
]
)
table_data.append(["Geomean", "", "", "", "", "", color_diff(geometric_mean(diffs_throughput)) + " "])
table = AsciiTable(table_data)
for column_index in range(1, len(table_data[0])): # all columns justified to right, except for item name
table.justify_columns[column_index] = "right"
table_string = str(table.table)
table_string_reformatted = ""
lines = table_string.splitlines()
# Double the vertical line between the item names and the two major measurements.
lines = double_vertical_separators(lines, [1, 4])
# As the used terminaltables module does not support cells that span multiple columns, we do that manually for latency
# and throughput in the header. We used two place holders that are narrow enough to not grow the column any wider than
# necessary for the actual values. After manually changing the column title to span two column, we replace the place
# holder with the actual full descriptions.
for (placeholder, final) in [
("$thrghpt", "Throughput (iter/s)"),
("$latency", "Latency (ms/iter)"),
]:
header_strings = lines[1].split("|")
for column_id, text in enumerate(header_strings):
if placeholder in text:
title_column = header_strings[column_id]
unit_column = header_strings[column_id + 1]
previous_length = len(title_column) + len(unit_column) + 1
new_title = f" {final} ".ljust(previous_length, " ")
lines[1] = "|".join(header_strings[:column_id] + [new_title] + header_strings[column_id + 2 :])
# Swap second line of header with automatically added separator. Terminaltables does not support multi-line headers. So
# we have the second header line as part of the results after a separator line. We need to swap these.
lines[2], lines[3] = lines[3], lines[2]
for (line_number, line) in enumerate(lines):
if line_number == len(table_data):
# Add another separation between benchmark items and aggregates
table_string_reformatted += lines[-1] + "\n"
table_string_reformatted += line + "\n"
# In case the runs for the executed benchmark have been cut or the number of runs was insufficient for the p-value
# calcution, we add notes to the end of the table.
if add_note_for_capped_runs or add_note_for_insufficient_pvalue_runs:
first_column_width = len(lines[1].split("|")[1])
width_for_note = len(lines[0]) - first_column_width - 5 # 5 for seperators and spaces
if add_note_for_capped_runs:
note = "˄ Execution stopped due to max runs reached"
table_string_reformatted += "|" + (" Notes ".rjust(first_column_width, " "))
table_string_reformatted += "|| " + note.ljust(width_for_note, " ") + "|\n"
if add_note_for_insufficient_pvalue_runs:
note = "˅" + " Insufficient number of runs for p-value calculation"
table_string_reformatted += "|" + (" " * first_column_width) + "|| " + note.ljust(width_for_note, " ") + "|\n"
table_string_reformatted += lines[-1] + "\n"
table_string = table_string_reformatted
# If github_format is set, format the output in the style of a diff file where added lines (starting with +) are
# colored green, removed lines (starting with -) are red, and others (starting with an empty space) are black.
# Because terminaltables (unsurprisingly) does not support this hack, we need to post-process the result string,
# searching for the control codes that define text to be formatted as green or red.
if github_format:
green_control_sequence = colored("", "green")[0:5]
red_control_sequence = colored("", "red")[0:5]
table_string_reformatted = (
"<details>\n"
+ "<summary>Configuration Overview - click to expand</summary>\n\n"
+ "```diff\n"
+ create_context_overview(old_data, new_data, github_format)
+ "```\n"
+ "</details>\n\n"
+ "```diff\n"
)
for line in table_string.splitlines():
if green_control_sequence in line:
table_string_reformatted += "+"
elif red_control_sequence in line:
table_string_reformatted += "-"
else:
table_string_reformatted += " "
table_string_reformatted += line + "\n"
table_string_reformatted += "```"
table_string = table_string_reformatted
else:
table_string = create_context_overview(old_data, new_data, github_format) + "\n\n" + table_string
print(table_string)
print()
print("loss --> latency now lower, gain --> latency now higher")
print(f"baseline: {round(total_runtime_old / 10**9, 1)}s")
print(f"abs. change: {round((total_runtime_new - total_runtime_old) / 10**9, 1)}s")
print(f"rel.change: {round(((total_runtime_new / total_runtime_old) - 1) * 100)}%")
print(f"max loss: {max_lost}%")
print(f"max gain: {max_gain}%")
print(f"# losses >= {confidence_threshold}%: {num_lost}")
print(f"# gains >= {confidence_threshold}%: {num_gain}")
| [
"numpy.mean",
"re.escape",
"termcolor.colored",
"numpy.array",
"terminaltables.AsciiTable",
"scipy.stats.ttest_ind",
"json.load",
"math.isnan"
] | [((13082, 13104), 'terminaltables.AsciiTable', 'AsciiTable', (['table_data'], {}), '(table_data)\n', (13092, 13104), False, 'from terminaltables import AsciiTable\n'), ((3995, 4018), 'terminaltables.AsciiTable', 'AsciiTable', (['table_lines'], {}), '(table_lines)\n', (4005, 4018), False, 'from terminaltables import AsciiTable\n'), ((5770, 5789), 'json.load', 'json.load', (['old_file'], {}), '(old_file)\n', (5779, 5789), False, 'import json\n'), ((5842, 5861), 'json.load', 'json.load', (['new_file'], {}), '(new_file)\n', (5851, 5861), False, 'import json\n'), ((7172, 7251), 'numpy.array', 'np.array', (["[run['duration'] for run in old['successful_runs']]"], {'dtype': 'np.float64'}), "([run['duration'] for run in old['successful_runs']], dtype=np.float64)\n", (7180, 7251), True, 'import numpy as np\n'), ((7283, 7362), 'numpy.array', 'np.array', (["[run['duration'] for run in new['successful_runs']]"], {'dtype': 'np.float64'}), "([run['duration'] for run in new['successful_runs']], dtype=np.float64)\n", (7291, 7362), True, 'import numpy as np\n'), ((7396, 7482), 'numpy.array', 'np.array', (["[run['duration'] for run in old['unsuccessful_runs']]"], {'dtype': 'np.float64'}), "([run['duration'] for run in old['unsuccessful_runs']], dtype=np.\n float64)\n", (7404, 7482), True, 'import numpy as np\n'), ((7511, 7597), 'numpy.array', 'np.array', (["[run['duration'] for run in new['unsuccessful_runs']]"], {'dtype': 'np.float64'}), "([run['duration'] for run in new['unsuccessful_runs']], dtype=np.\n float64)\n", (7519, 7597), True, 'import numpy as np\n'), ((7627, 7660), 'numpy.mean', 'np.mean', (['old_successful_durations'], {}), '(old_successful_durations)\n', (7634, 7660), True, 'import numpy as np\n'), ((7735, 7768), 'numpy.mean', 'np.mean', (['new_successful_durations'], {}), '(new_successful_durations)\n', (7742, 7768), True, 'import numpy as np\n'), ((1181, 1220), 'scipy.stats.ttest_ind', 'ttest_ind', (['old_durations', 'new_durations'], {}), '(old_durations, new_durations)\n', (1190, 1220), False, 'from scipy.stats import ttest_ind\n'), ((9534, 9572), 'termcolor.colored', 'colored', (['"""˄"""', '"""yellow"""'], {'attrs': "['bold']"}), "('˄', 'yellow', attrs=['bold'])\n", (9541, 9572), False, 'from termcolor import colored\n'), ((11181, 11216), 'numpy.mean', 'np.mean', (['old_unsuccessful_durations'], {}), '(old_unsuccessful_durations)\n', (11188, 11216), True, 'import numpy as np\n'), ((11257, 11292), 'numpy.mean', 'np.mean', (['new_unsuccessful_durations'], {}), '(new_unsuccessful_durations)\n', (11264, 11292), True, 'import numpy as np\n'), ((16451, 16471), 'termcolor.colored', 'colored', (['""""""', '"""green"""'], {}), "('', 'green')\n", (16458, 16471), False, 'from termcolor import colored\n'), ((16504, 16522), 'termcolor.colored', 'colored', (['""""""', '"""red"""'], {}), "('', 'red')\n", (16511, 16522), False, 'from termcolor import colored\n'), ((2400, 2438), 'termcolor.colored', 'colored', (['"""˅"""', '"""yellow"""'], {'attrs': "['bold']"}), "('˅', 'yellow', attrs=['bold'])\n", (2407, 2438), False, 'from termcolor import colored\n'), ((7830, 7869), 'math.isnan', 'math.isnan', (['old_avg_successful_duration'], {}), '(old_avg_successful_duration)\n', (7840, 7869), False, 'import math\n'), ((7939, 7978), 'math.isnan', 'math.isnan', (['new_avg_successful_duration'], {}), '(new_avg_successful_duration)\n', (7949, 7978), False, 'import math\n'), ((12448, 12477), 'termcolor.colored', 'colored', (['text'], {'attrs': "['dark']"}), "(text, attrs=['dark'])\n", (12455, 12477), False, 'from termcolor import colored\n'), ((2495, 2529), 'termcolor.colored', 'colored', (['f"""{p_value:.4f}"""', '"""white"""'], {}), "(f'{p_value:.4f}', 'white')\n", (2502, 2529), False, 'from termcolor import colored\n'), ((2563, 2614), 'termcolor.colored', 'colored', (['f"""{p_value:.4f}"""', '"""yellow"""'], {'attrs': "['bold']"}), "(f'{p_value:.4f}', 'yellow', attrs=['bold'])\n", (2570, 2614), False, 'from termcolor import colored\n'), ((3508, 3536), 'termcolor.colored', 'colored', (['(marker + key)', 'color'], {}), '(marker + key, color)\n', (3515, 3536), False, 'from termcolor import colored\n'), ((3751, 3776), 'termcolor.colored', 'colored', (["('≠' + key)", '"""red"""'], {}), "('≠' + key, 'red')\n", (3758, 3776), False, 'from termcolor import colored\n'), ((3934, 3959), 'termcolor.colored', 'colored', (["('≠' + key)", '"""red"""'], {}), "('≠' + key, 'red')\n", (3941, 3959), False, 'from termcolor import colored\n'), ((5122, 5151), 're.escape', 're.escape', (['vertical_separator'], {}), '(vertical_separator)\n', (5131, 5151), False, 'import re\n'), ((10199, 10224), 'math.isnan', 'math.isnan', (['diff_duration'], {}), '(diff_duration)\n', (10209, 10224), False, 'import math\n'), ((11854, 11895), 'math.isnan', 'math.isnan', (['old_avg_unsuccessful_duration'], {}), '(old_avg_unsuccessful_duration)\n', (11864, 11895), False, 'import math\n'), ((12000, 12041), 'math.isnan', 'math.isnan', (['new_avg_unsuccessful_duration'], {}), '(new_avg_unsuccessful_duration)\n', (12010, 12041), False, 'import math\n'), ((12131, 12169), 'math.isnan', 'math.isnan', (['diff_duration_unsuccessful'], {}), '(diff_duration_unsuccessful)\n', (12141, 12169), False, 'import math\n'), ((12349, 12389), 'math.isnan', 'math.isnan', (['diff_throughput_unsuccessful'], {}), '(diff_throughput_unsuccessful)\n', (12359, 12389), False, 'import math\n')] |
import numpy as np
np.random.seed(9453)
from time_series_augmentation_toolkit import DN
import pickle
with open('labeldata.pkl','rb') as f:
data = pickle.load(f)
output = []
for d in data:
this_output = d.copy()
r = np.random.choice([3,5,10,15,20], size=4, p=[0.1,0.5,0.3,0.05,0.05])
this_output['ohlc_data']['open'] = np.rint(DN(np.array(this_output['ohlc_data']['open']), sigma=r[0])).tolist()
this_output['ohlc_data']['high'] = np.rint(DN(np.array(this_output['ohlc_data']['high']), sigma=r[1])).tolist()
this_output['ohlc_data']['low'] = np.rint(DN(np.array(this_output['ohlc_data']['low']), sigma=r[2])).tolist()
this_output['ohlc_data']['close'] = np.rint(DN(np.array(this_output['ohlc_data']['close']), sigma=r[3])).tolist()
for i in range(this_output['index'][1] - this_output['index'][0] + 1):
if this_output['ohlc_data']['high'][i] < this_output['ohlc_data']['low'][i]:
this_output['ohlc_data']['high'][i], this_output['ohlc_data']['low'][i] = \
this_output['ohlc_data']['low'][i], this_output['ohlc_data']['high'][i]
output.append(this_output)
with open('labeldata_aug.pkl','wb') as f:
pickle.dump(output, f) | [
"pickle.dump",
"numpy.random.choice",
"pickle.load",
"numpy.array",
"numpy.random.seed"
] | [((19, 39), 'numpy.random.seed', 'np.random.seed', (['(9453)'], {}), '(9453)\n', (33, 39), True, 'import numpy as np\n'), ((152, 166), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (163, 166), False, 'import pickle\n'), ((243, 318), 'numpy.random.choice', 'np.random.choice', (['[3, 5, 10, 15, 20]'], {'size': '(4)', 'p': '[0.1, 0.5, 0.3, 0.05, 0.05]'}), '([3, 5, 10, 15, 20], size=4, p=[0.1, 0.5, 0.3, 0.05, 0.05])\n', (259, 318), True, 'import numpy as np\n'), ((1207, 1229), 'pickle.dump', 'pickle.dump', (['output', 'f'], {}), '(output, f)\n', (1218, 1229), False, 'import pickle\n'), ((361, 403), 'numpy.array', 'np.array', (["this_output['ohlc_data']['open']"], {}), "(this_output['ohlc_data']['open'])\n", (369, 403), True, 'import numpy as np\n'), ((477, 519), 'numpy.array', 'np.array', (["this_output['ohlc_data']['high']"], {}), "(this_output['ohlc_data']['high'])\n", (485, 519), True, 'import numpy as np\n'), ((592, 633), 'numpy.array', 'np.array', (["this_output['ohlc_data']['low']"], {}), "(this_output['ohlc_data']['low'])\n", (600, 633), True, 'import numpy as np\n'), ((708, 751), 'numpy.array', 'np.array', (["this_output['ohlc_data']['close']"], {}), "(this_output['ohlc_data']['close'])\n", (716, 751), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn as nn
from ....ops.iou3d_nms import iou3d_nms_utils
class CenterTargetLayer(nn.Module):
def __init__(self, roi_sampler_cfg):
super().__init__()
self.roi_sampler_cfg = roi_sampler_cfg
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
batch_dict:
rois: (B, M, 7 + C)
gt_of_rois: (B, M, 7 + C)
gt_iou_of_rois: (B, M)
roi_scores: (B, M)
roi_labels: (B, M)
reg_valid_mask: (B, M)
rcnn_cls_labels: (B, M)
"""
batch_rois, batch_gt_of_rois, batch_roi_dist, batch_roi_scores, batch_roi_labels = self.sample_rois_for_rcnn(
batch_dict=batch_dict
)
# regression valid mask
reg_valid_mask = (batch_roi_dist <= self.roi_sampler_cfg.REG_FG_DIST).long()
# classification label
assert self.roi_sampler_cfg.CLS_SCORE_TYPE == 'roi_dist'
dist_bg_thresh = self.roi_sampler_cfg.CLS_BG_DIST
dist_fg_thresh = self.roi_sampler_cfg.CLS_FG_DIST
fg_mask = batch_roi_dist <= dist_fg_thresh
bg_mask = batch_roi_dist > dist_bg_thresh
interval_mask = (fg_mask == 0) & (bg_mask == 0)
batch_cls_labels = (fg_mask > 0).float()
# inverse scores !!!
batch_cls_labels[interval_mask] = (dist_bg_thresh - batch_roi_dist[interval_mask]) / (dist_bg_thresh - dist_fg_thresh)
targets_dict = {'rois': batch_rois, 'gt_of_rois': batch_gt_of_rois, 'gt_dist_of_rois': batch_roi_dist,
'roi_scores': batch_roi_scores, 'roi_labels': batch_roi_labels,
'reg_valid_mask': reg_valid_mask,
'rcnn_cls_labels': batch_cls_labels}
return targets_dict
def sample_rois_for_rcnn(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
"""
batch_size = batch_dict['batch_size']
rois = batch_dict['rois']
roi_scores = batch_dict['roi_scores']
roi_labels = batch_dict['roi_labels']
gt_boxes = batch_dict['gt_boxes']
code_size = rois.shape[-1]
batch_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size)
batch_gt_of_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size + 1)
batch_roi_dist = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE)
batch_roi_scores = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE)
batch_roi_labels = rois.new_zeros((batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE), dtype=torch.long)
for index in range(batch_size):
cur_roi, cur_gt, cur_roi_labels, cur_roi_scores = \
rois[index], gt_boxes[index], roi_labels[index], roi_scores[index]
k = cur_gt.__len__() - 1
while k > 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
cur_gt = cur_gt.new_zeros((1, cur_gt.shape[1])) if len(cur_gt) == 0 else cur_gt
roi_flag = (cur_roi[:, 3:6].sum(dim = 1) != 0) # wlh != 0 valid roi
cur_roi = cur_roi[roi_flag]
cur_roi_scores = cur_roi_scores[roi_flag]
cur_roi_labels = cur_roi_labels[roi_flag]
if self.roi_sampler_cfg.get('SAMPLE_ROI_BY_EACH_CLASS', False):
min_dist, gt_assignment = self.get_min_dist_with_same_class(
rois=cur_roi[:, 0:7], roi_labels=cur_roi_labels,
gt_boxes=cur_gt[:, 0:7], gt_labels=cur_gt[:, -1].long()
)
else:
cdist = iou3d_nms_utils.boxes_dist_torch(cur_roi[:, 0:7], cur_gt[:, 0:7]) # (M, N)
min_dist, gt_assignment = torch.min(cdist, dim=1)
sampled_inds = self.subsample_rois(min_dist = min_dist)
batch_rois[index] = cur_roi[sampled_inds]
batch_roi_labels[index] = cur_roi_labels[sampled_inds]
batch_roi_dist[index] = min_dist[sampled_inds]
batch_roi_scores[index] = cur_roi_scores[sampled_inds]
batch_gt_of_rois[index] = cur_gt[gt_assignment[sampled_inds]]
return batch_rois, batch_gt_of_rois, batch_roi_dist, batch_roi_scores, batch_roi_labels
def subsample_rois(self, min_dist):
# sample fg, easy_bg, hard_bg
fg_rois_per_image = int(np.round(self.roi_sampler_cfg.FG_RATIO * self.roi_sampler_cfg.ROI_PER_IMAGE))
fg_dist = max(self.roi_sampler_cfg.REG_FG_DIST, self.roi_sampler_cfg.CLS_FG_DIST)
bg_dist = self.roi_sampler_cfg.CLS_BG_DIST_LO
fg_inds = (min_dist <= fg_dist).nonzero().view(-1)
easy_bg_inds = (min_dist > bg_dist).nonzero().view(-1)
hard_bg_inds = ((min_dist > fg_dist) & (min_dist <= bg_dist)).nonzero().view(-1)
fg_num_rois = fg_inds.numel()
bg_num_rois = hard_bg_inds.numel() + easy_bg_inds.numel()
if fg_num_rois > 0 and bg_num_rois > 0:
# sampling fg
fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)
rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).type_as(min_dist).long()
fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]]
# sampling bg
bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE - fg_rois_per_this_image
bg_inds = self.sample_bg_inds(
hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO
)
elif fg_num_rois > 0 and bg_num_rois == 0:
# sampling fg
rand_num = np.floor(np.random.rand(self.roi_sampler_cfg.ROI_PER_IMAGE) * fg_num_rois)
rand_num = torch.from_numpy(rand_num).type_as(min_dist).long()
fg_inds = fg_inds[rand_num]
bg_inds = []
elif bg_num_rois > 0 and fg_num_rois == 0:
# sampling bg
bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE
bg_inds = self.sample_bg_inds(
hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO
)
else:
print('min distance:(min=%f, max=%f)' % (min_dist.min().item(), min_dist.max().item()))
print('ERROR: FG=%d, BG=%d' % (fg_num_rois, bg_num_rois))
raise NotImplementedError
sampled_inds = torch.cat((fg_inds, bg_inds), dim=0)
return sampled_inds
@staticmethod
def sample_bg_inds(hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, hard_bg_ratio):
if hard_bg_inds.numel() > 0 and easy_bg_inds.numel() > 0:
hard_bg_rois_num = min(int(bg_rois_per_this_image * hard_bg_ratio), len(hard_bg_inds))
easy_bg_rois_num = bg_rois_per_this_image - hard_bg_rois_num
# sampling hard bg
rand_idx = torch.randint(low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)).long()
hard_bg_inds = hard_bg_inds[rand_idx]
# sampling easy bg
rand_idx = torch.randint(low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)).long()
easy_bg_inds = easy_bg_inds[rand_idx]
bg_inds = torch.cat([hard_bg_inds, easy_bg_inds], dim=0)
elif hard_bg_inds.numel() > 0 and easy_bg_inds.numel() == 0:
hard_bg_rois_num = bg_rois_per_this_image
# sampling hard bg
rand_idx = torch.randint(low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)).long()
bg_inds = hard_bg_inds[rand_idx]
elif hard_bg_inds.numel() == 0 and easy_bg_inds.numel() > 0:
easy_bg_rois_num = bg_rois_per_this_image
# sampling easy bg
rand_idx = torch.randint(low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)).long()
bg_inds = easy_bg_inds[rand_idx]
else:
raise NotImplementedError
return bg_inds
@staticmethod
def get_min_dist_with_same_class(rois, roi_labels, gt_boxes, gt_labels):
"""
Args:
rois: (N, 7)
roi_labels: (N)
gt_boxes: (N, )
gt_labels:
Returns:
"""
min_dist = rois.new_zeros(rois.shape[0])
gt_assignment = roi_labels.new_zeros(roi_labels.shape[0])
for k in range(gt_labels.min().item(), gt_labels.max().item() + 1):
roi_mask = (roi_labels == k)
gt_mask = (gt_labels == k)
if roi_mask.sum() > 0 and gt_mask.sum() > 0:
cur_roi = rois[roi_mask]
cur_gt = gt_boxes[gt_mask]
original_gt_assignment = gt_mask.nonzero().view(-1)
cdist = iou3d_nms_utils.boxes_dist_torch(cur_roi, cur_gt) # (M, N)
cur_min_dist, cur_gt_assignment = torch.min(cdist, dim=1)
min_dist[roi_mask] = cur_min_dist
gt_assignment[roi_mask] = original_gt_assignment[cur_gt_assignment]
return min_dist, gt_assignment
| [
"numpy.random.rand",
"torch.from_numpy",
"torch.min",
"torch.cat",
"numpy.round",
"numpy.random.permutation"
] | [((6891, 6927), 'torch.cat', 'torch.cat', (['(fg_inds, bg_inds)'], {'dim': '(0)'}), '((fg_inds, bg_inds), dim=0)\n', (6900, 6927), False, 'import torch\n'), ((4881, 4957), 'numpy.round', 'np.round', (['(self.roi_sampler_cfg.FG_RATIO * self.roi_sampler_cfg.ROI_PER_IMAGE)'], {}), '(self.roi_sampler_cfg.FG_RATIO * self.roi_sampler_cfg.ROI_PER_IMAGE)\n', (4889, 4957), True, 'import numpy as np\n'), ((7699, 7745), 'torch.cat', 'torch.cat', (['[hard_bg_inds, easy_bg_inds]'], {'dim': '(0)'}), '([hard_bg_inds, easy_bg_inds], dim=0)\n', (7708, 7745), False, 'import torch\n'), ((4258, 4281), 'torch.min', 'torch.min', (['cdist'], {'dim': '(1)'}), '(cdist, dim=1)\n', (4267, 4281), False, 'import torch\n'), ((9301, 9324), 'torch.min', 'torch.min', (['cdist'], {'dim': '(1)'}), '(cdist, dim=1)\n', (9310, 9324), False, 'import torch\n'), ((6129, 6179), 'numpy.random.rand', 'np.random.rand', (['self.roi_sampler_cfg.ROI_PER_IMAGE'], {}), '(self.roi_sampler_cfg.ROI_PER_IMAGE)\n', (6143, 6179), True, 'import numpy as np\n'), ((5609, 5643), 'numpy.random.permutation', 'np.random.permutation', (['fg_num_rois'], {}), '(fg_num_rois)\n', (5630, 5643), True, 'import numpy as np\n'), ((6218, 6244), 'torch.from_numpy', 'torch.from_numpy', (['rand_num'], {}), '(rand_num)\n', (6234, 6244), False, 'import torch\n')] |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import os
from astropy.table import Table, vstack
from collections import OrderedDict
## Import some helper functions, you can see their definitions by uncomenting the bash shell command
from desispec.workflow.exptable import default_obstypes_for_exptable
from desispec.workflow.utils import define_variable_from_environment, pathjoin
from desispec.io.util import difference_camwords, parse_badamps, create_camword, decode_camword
from desiutil.log import get_logger
###############################################
##### Processing Table Column Definitions #####
###############################################
## To eventually being turned into a full-fledged data model. For now a brief description.
# EXPID, int, the exposure ID's assosciate with the job. Always a np.array, even if a single exposure.
# OBSTYPE, string, the obstype as defined by ICS.
# TILEID, int, the TILEID of the tile the exposure observed.
# NIGHT, int, the night of the observation.
# BADAMPS, string, comma list of "{camera}{petal}{amp}", i.e. "[brz][0-9][ABCD]". Example: 'b7D,z8A'
# in the csv this is saved as a semicolon separated list
# LASTSTEP, string, the last step the pipeline should run through for the given exposure. Inclusive of last step.
# EXPFLAG, np.ndarray, set of flags that describe that describe the exposure.
# PROCCAMWORD, string, The result of difference_camword(CAMWORD,BADCAMWWORD) from those exposure table entries.
# This summarizes the cameras that should be processed for the given exposure/job
# CALIBRATOR, int, A 0 signifies that the job is not assosciated with a calibration exposure. 1 means that it is.
# INTID, int, an internally generated ID for a single job within a production. Only unique within a production and
# not guaranteed will not necessarily be the same between different production runs (e.g. between a daily
# run and a large batch reprocessing run).
# OBSDESC, string, describes the observation in more detail than obstype. Currently only used for DITHER on dither tiles.
# JOBDESC, string, described the job that the row defines. For a single science exposure that could be 'prestdstar' or
# 'poststdstar'. For joint science that would be 'stdstarfit'. For individual arcs it is 'arc', for
# joint arcs it is 'psfnight'. For individual flats it is 'flat', for joint fits it is 'psfnightly'.
# LATEST_QID, int, the most recent Slurm ID assigned to the submitted job.
# SUBMIT_DATE, int, the 'unix time' of the job submission in seconds (int(time.time())).
# STATUS, string, the most recent Slurm status of the job. See docstring of desispec.workflow.queue.get_resubmission_states
# for a list and description.
# SCRIPTNAME, string, the name of the script submitted to Slurm. Due to astropy table constraints, this is truncated
# to a maximum of 40 characters.
# INT_DEP_IDS, np.array, internal ID's of all jobs that are dependencies for the current row. I.e. inputs to the current job.
# LATEST_DEP_QID, np.array, the most recent Slurm ID's for the dependencies jobs uniquely identified by internal ID's
# in INT_DEP_IDS
# ALL_QIDS, np.array, a list of all Slurm ID's assosciated with submissions of this job. Useful if multiple submissions
# were made because of node failures or any other issues that were later resolved (or not resolved).
##################################################
def get_processing_table_column_defs(return_default_values=False, overlap_only=False, unique_only=False):
"""
Contains the column names, data types, and default row values for a DESI processing table. It returns
the names and datatypes with the defaults being given with an optional flag. Returned as 2 (or 3) lists.
Args:
return_default_values, bool. True if you want the default values returned.
overlap_only, bool. Only return the columns that are common to both processing and exposure tables.
unique_only, bool. Only return columns that are not found in an exposure table.
Returns:
colnames, list. List of column names for an processing table.
coldtypes, list. List of column datatypes for the names in colnames.
coldeflts, list. Optionally returned if return_default_values is True. List of default values for the
corresponding colnames.
"""
## Define the column names for the internal production table and their respective datatypes, split in two
## only for readability's sake
colnames1 = ['EXPID' , 'OBSTYPE', 'TILEID', 'NIGHT' ]
coltypes1 = [np.ndarray , 'S10' , int , int ]
coldeflt1 = [np.ndarray(shape=0).astype(int), 'unknown', -99 , 20000101]
colnames1 += ['BADAMPS', 'LASTSTEP', 'EXPFLAG' ]
coltypes1 += ['S30' , 'S30' , np.ndarray ]
coldeflt1 += ['' , 'all' , np.array([], dtype=str)]
colnames2 = [ 'PROCCAMWORD' ,'CALIBRATOR', 'INTID', 'OBSDESC', 'JOBDESC', 'LATEST_QID']
coltypes2 = [ 'S40' , np.int8 , int , 'S16' , 'S12' , int ]
coldeflt2 = [ 'a0123456789' , 0 , -99 , '' , 'unknown', -99 ]
colnames2 += [ 'SUBMIT_DATE', 'STATUS', 'SCRIPTNAME']
coltypes2 += [ int , 'S10' , 'S40' ]
coldeflt2 += [ -99 , 'U' , '' ]
colnames2 += ['INT_DEP_IDS' , 'LATEST_DEP_QID' , 'ALL_QIDS' ]
coltypes2 += [np.ndarray , np.ndarray , np.ndarray ]
coldeflt2 += [np.ndarray(shape=0).astype(int), np.ndarray(shape=0).astype(int), np.ndarray(shape=0).astype(int)]
colnames = colnames1 + colnames2
coldtypes = coltypes1 + coltypes2
coldeflts = coldeflt1 + coldeflt2
if return_default_values:
if overlap_only:
return colnames1, coltypes1, coldeflt1
elif unique_only:
return colnames2, coltypes2, coldeflt2
else:
return colnames, coldtypes, coldeflts
else:
if overlap_only:
return colnames1, coltypes1
elif unique_only:
return colnames2, coltypes2
else:
return colnames, coldtypes
def default_exptypes_for_proctable():
"""
Defines the exposure types to be recognized by the workflow and saved in the processing table by default.
Returns:
list. A list of default obstypes to be included in a processing table.
"""
## Define the science types to be included in the exposure table (case insensitive)
return ['arc', 'dark', 'flat', 'science', 'twilight', 'sci', 'dither']
def get_processing_table_name(specprod=None, prodmod=None, extension='csv'):
"""
Defines the default processing name given the specprod of the production and the optional extension.
Args:
specprod, str or None. The name of the production. If None, it will be taken from the environment variable.
prodmod, str. Additional str that can be added to the production table name to further differentiate it.
Used in daily workflow to add the night to the name and make it unique from other nightly tables.
extension, str. The extension (and therefore data format) without a leading period of the saved table.
Default is 'csv'.
Returns:
str. The processing table name given the input night and extension.
"""
if specprod is None:
specprod = define_variable_from_environment(env_name='SPECPROD',
var_descr="Use SPECPROD for unique processing table directories")
if prodmod is not None:
prodname_modifier = '-' + str(prodmod)
elif 'SPECPROD_MOD' in os.environ:
prodname_modifier = '-' + os.environ['SPECPROD_MOD']
else:
prodname_modifier = ''
return f'processing_table_{specprod}{prodname_modifier}.{extension}'
def get_processing_table_path(specprod=None):
"""
Defines the default path to save a processing table. If specprod is not given, the environment variable
'SPECPROD' must exist.
Args:
specprod, str or None. The name of the production. If None, it will be taken from the environment variable.
Returns:
str. The full path to the directory where the processing table should be written (or is already written). This
does not including the filename.
"""
if specprod is None:
specprod = define_variable_from_environment(env_name='SPECPROD',
var_descr="Use SPECPROD for unique processing table directories")
basedir = define_variable_from_environment(env_name='DESI_SPECTRO_REDUX',
var_descr="The specprod path")
path = pathjoin(basedir, specprod, 'processing_tables')
return path
def get_processing_table_pathname(specprod=None, prodmod=None, extension='csv'): # base_path,specprod
"""
Defines the default pathname to save a processing table.
Args:
specprod, str or None. The name of the production. If None, it will be taken from the environment variable.
prodmod, str. Additional str that can be added to the production table name to further differentiate it.
Used in daily workflow to add the night to the name and make it unique from other nightly tables.
extension, str. The extension (and therefore data format) without a leading period of the saved table.
Default is 'csv'.
Returns:
str. The full pathname where the processing table should be written (or is already written). This
includes the filename.
"""
if specprod is None:
specprod = define_variable_from_environment(env_name='SPECPROD',
var_descr="Use SPECPROD for unique processing table directories")
path = get_processing_table_path(specprod)
table_name = get_processing_table_name(specprod, prodmod, extension)
return pathjoin(path, table_name)
def instantiate_processing_table(colnames=None, coldtypes=None, rows=None):
"""
Create an empty processing table with proper column names and datatypes. If rows is given, it inserts the rows
into the table, otherwise it returns a table with no rows.
Args:
colnames, list. List of column names for a procesing table.
coldtypes, list. List of column datatypes for the names in colnames.
rows, list or np.array of Table.Rows or dicts. An iterable set of Table.Row's or dicts with keys/colnames and value
pairs that match the default column names and data types of the
default exposure table.
Returns:
processing_table, Table. An astropy Table with the column names and data types for a DESI workflow processing
table. If the input rows was not None, it contains those rows, otherwise it has no rows.
"""
## Define the column names for the exposure table and their respective datatypes
if colnames is None or coldtypes is None:
colnames, coldtypes = get_processing_table_column_defs()
processing_table = Table(names=colnames, dtype=coldtypes)
if rows is not None:
for row in rows:
processing_table.add_row(row)
return processing_table
def exptable_to_proctable(input_exptable, obstypes=None):
"""
Converts an exposure table to a processing table and an unprocessed table. The columns unique to a processing table
are filled with default values. If comments are made in COMMENTS or HEADERERR, those will be adjusted in the values
stored in the processing table.
Args:
input_exptable, Table. An exposure table. Each row will be converted to a row of an processing table. If
comments are made in COMMENTS or HEADERERR, those will be adjusted in the values
stored in the processing table.
obstypes, list or np.array. Optional. A list of exposure OBSTYPE's that should be processed (and therefore
added to the processing table).
Returns:
processing_table, Table. The output processing table. Each row corresponds with an exposure that should be
processed.
unprocessed_table, Table. The output unprocessed table. Each row is an exposure that should not be processed.
"""
log = get_logger()
exptable = input_exptable.copy()
if obstypes is None:
obstypes = default_obstypes_for_exptable()
## Define the column names for the exposure table and their respective datatypes
colnames, coldtypes, coldefaults = get_processing_table_column_defs(return_default_values=True)
# for col in ['COMMENTS']: #'HEADERERR',
# if col in exptable.colnames:
# for ii, arr in enumerate(exptable[col]):
# for item in arr:
# clean_item = item.strip(' \t')
# if len(clean_item) > 6:
# keyval = None
# for symb in [':', '=']:
# if symb in clean_item:
# keyval = [val.strip(' ') for val in clean_item.split(symb)]
# break
# if keyval is not None and len(keyval) == 2 and keyval[0].upper() in exptable.colnames:
# key, newval = keyval[0].upper(), keyval[1]
# expid, oldval = exptable['EXPID'][ii], exptable[key][ii]
# log.info(
# f'Found a requested correction to ExpID {expid}: Changing {key} val from {oldval} to {newval}')
# exptable[key][ii] = newval
good_exps = (exptable['EXPFLAG'] == 0)
good_types = np.array([val in obstypes for val in exptable['OBSTYPE']]).astype(bool)
good = (good_exps & good_types)
good_table = exptable[good]
unprocessed_table = exptable[~good]
## Remove columns that aren't relevant to processing, they will be added back in the production tables for
## end user viewing
for col in ['REQRA', 'REQDEC', 'TARGTRA', 'TARGTDEC', 'HEADERERR', 'COMMENTS', 'BADEXP']:
if col in exptable.colnames:
good_table.remove_column(col)
if len(good_table) > 0:
rows = []
for erow in good_table:
prow = erow_to_prow(erow)#, colnames, coldtypes, coldefaults)
rows.append(prow)
processing_table = Table(names=colnames, dtype=coldtypes, rows=rows)
else:
processing_table = Table(names=colnames, dtype=coldtypes)
return processing_table, unprocessed_table
def erow_to_prow(erow):#, colnames=None, coldtypes=None, coldefaults=None, joinsymb='|'):
"""
Converts an exposure table row to a processing table row. The columns unique to a processing table
are filled with default values. If comments are made in COMMENTS or HEADERERR, those are ignored.
Args:
erow, Table.Row or dict. An exposure table row. The row will be converted to a row of an processing table.
If comments are made in COMMENTS or HEADERERR, those are ignored.
Returns:
prow, dict. The output processing table row.
"""
log = get_logger()
erow = table_row_to_dict(erow)
row_names = list(erow.keys())
## Define the column names for the exposure table and their respective datatypes
#if colnames is None:
colnames, coldtypes, coldefaults = get_processing_table_column_defs(return_default_values=True)
colnames, coldtypes, coldefaults = np.array(colnames,dtype=object), \
np.array(coldtypes,dtype=object), \
np.array(coldefaults,dtype=object)
prow = dict()
for nam, typ, defval in zip(colnames, coldtypes, coldefaults):
if nam == 'PROCCAMWORD':
if 'BADCAMWORD' in row_names:
badcamword = erow['BADCAMWORD']
else:
badcamword = ''
prow[nam] = difference_camwords(erow['CAMWORD'],badcamword)
elif nam == 'OBSDESC':
if nam in colnames:
prow[nam] = coldefaults[colnames == nam][0]
else:
prow[nam] = ''
for word in ['dither', 'acquisition', 'focus', 'test']:
if 'PROGRAM' in row_names and word in erow['PROGRAM'].lower():
prow[nam] = word
elif nam == 'EXPID':
prow[nam] = np.array([erow[nam]])
elif nam in row_names:
prow[nam] = erow[nam]
else:
prow[nam] = defval
## For obstypes that aren't science, BADAMPS loses it's relevance. For processing,
## convert those into bad cameras in BADCAMWORD, so the cameras aren't processed.
## Otherwise we'll have nightly calibrations with only half the fibers useful.
if prow['OBSTYPE'] != 'science' and prow['BADAMPS'] != '':
badcams = []
for (camera, petal, amplifier) in parse_badamps(prow['BADAMPS']):
badcams.append(f'{camera}{petal}')
newbadcamword = create_camword(badcams)
log.info("For nonsscience exposure: {}, converting BADAMPS={} to bad cameras={}.".format( erow['EXPID'],
prow['BADAMPS'],
newbadcamword ) )
prow['PROCCAMWORD'] = difference_camwords(prow['PROCCAMWORD'],newbadcamword)
prow['BADAMPS'] = ''
return prow
def table_row_to_dict(table_row):
"""
Helper function to convert a table row to a dictionary, which is much easier to work with for some applications
Args:
table_row, Table.Row or dict. The row of an astropy table that you want to convert into a dictionary where
each key is a column name and the values are the column entry.
Returns:
out, dict. Dictionary where each key is a column name and the values are the column entry.
"""
if type(table_row) is Table.Row:
out = {coln: table_row[coln] for coln in table_row.colnames}
return out
elif type(table_row) in [dict, OrderedDict]:
return table_row
else:
log = get_logger()
typ = type(table_row)
log.error(f"Received table_row of type {typ}, can't convert to a dictionary. Exiting.")
raise TypeError(f"Received table_row of type {typ}, can't convert to a dictionary. Exiting.") | [
"astropy.table.Table",
"desispec.io.util.create_camword",
"desiutil.log.get_logger",
"numpy.array",
"numpy.ndarray",
"desispec.workflow.utils.pathjoin",
"desispec.workflow.exptable.default_obstypes_for_exptable",
"desispec.workflow.utils.define_variable_from_environment",
"desispec.io.util.differenc... | [((8941, 9040), 'desispec.workflow.utils.define_variable_from_environment', 'define_variable_from_environment', ([], {'env_name': '"""DESI_SPECTRO_REDUX"""', 'var_descr': '"""The specprod path"""'}), "(env_name='DESI_SPECTRO_REDUX', var_descr=\n 'The specprod path')\n", (8973, 9040), False, 'from desispec.workflow.utils import define_variable_from_environment, pathjoin\n'), ((9097, 9145), 'desispec.workflow.utils.pathjoin', 'pathjoin', (['basedir', 'specprod', '"""processing_tables"""'], {}), "(basedir, specprod, 'processing_tables')\n", (9105, 9145), False, 'from desispec.workflow.utils import define_variable_from_environment, pathjoin\n'), ((10363, 10389), 'desispec.workflow.utils.pathjoin', 'pathjoin', (['path', 'table_name'], {}), '(path, table_name)\n', (10371, 10389), False, 'from desispec.workflow.utils import define_variable_from_environment, pathjoin\n'), ((11617, 11655), 'astropy.table.Table', 'Table', ([], {'names': 'colnames', 'dtype': 'coldtypes'}), '(names=colnames, dtype=coldtypes)\n', (11622, 11655), False, 'from astropy.table import Table, vstack\n'), ((12920, 12932), 'desiutil.log.get_logger', 'get_logger', ([], {}), '()\n', (12930, 12932), False, 'from desiutil.log import get_logger\n'), ((15832, 15844), 'desiutil.log.get_logger', 'get_logger', ([], {}), '()\n', (15842, 15844), False, 'from desiutil.log import get_logger\n'), ((5086, 5109), 'numpy.array', 'np.array', (['[]'], {'dtype': 'str'}), '([], dtype=str)\n', (5094, 5109), True, 'import numpy as np\n'), ((7740, 7864), 'desispec.workflow.utils.define_variable_from_environment', 'define_variable_from_environment', ([], {'env_name': '"""SPECPROD"""', 'var_descr': '"""Use SPECPROD for unique processing table directories"""'}), "(env_name='SPECPROD', var_descr=\n 'Use SPECPROD for unique processing table directories')\n", (7772, 7864), False, 'from desispec.workflow.utils import define_variable_from_environment, pathjoin\n'), ((8754, 8878), 'desispec.workflow.utils.define_variable_from_environment', 'define_variable_from_environment', ([], {'env_name': '"""SPECPROD"""', 'var_descr': '"""Use SPECPROD for unique processing table directories"""'}), "(env_name='SPECPROD', var_descr=\n 'Use SPECPROD for unique processing table directories')\n", (8786, 8878), False, 'from desispec.workflow.utils import define_variable_from_environment, pathjoin\n'), ((10059, 10183), 'desispec.workflow.utils.define_variable_from_environment', 'define_variable_from_environment', ([], {'env_name': '"""SPECPROD"""', 'var_descr': '"""Use SPECPROD for unique processing table directories"""'}), "(env_name='SPECPROD', var_descr=\n 'Use SPECPROD for unique processing table directories')\n", (10091, 10183), False, 'from desispec.workflow.utils import define_variable_from_environment, pathjoin\n'), ((13015, 13046), 'desispec.workflow.exptable.default_obstypes_for_exptable', 'default_obstypes_for_exptable', ([], {}), '()\n', (13044, 13046), False, 'from desispec.workflow.exptable import default_obstypes_for_exptable\n'), ((15044, 15093), 'astropy.table.Table', 'Table', ([], {'names': 'colnames', 'dtype': 'coldtypes', 'rows': 'rows'}), '(names=colnames, dtype=coldtypes, rows=rows)\n', (15049, 15093), False, 'from astropy.table import Table, vstack\n'), ((15131, 15169), 'astropy.table.Table', 'Table', ([], {'names': 'colnames', 'dtype': 'coldtypes'}), '(names=colnames, dtype=coldtypes)\n', (15136, 15169), False, 'from astropy.table import Table, vstack\n'), ((16165, 16197), 'numpy.array', 'np.array', (['colnames'], {'dtype': 'object'}), '(colnames, dtype=object)\n', (16173, 16197), True, 'import numpy as np\n'), ((16239, 16272), 'numpy.array', 'np.array', (['coldtypes'], {'dtype': 'object'}), '(coldtypes, dtype=object)\n', (16247, 16272), True, 'import numpy as np\n'), ((16314, 16349), 'numpy.array', 'np.array', (['coldefaults'], {'dtype': 'object'}), '(coldefaults, dtype=object)\n', (16322, 16349), True, 'import numpy as np\n'), ((17604, 17634), 'desispec.io.util.parse_badamps', 'parse_badamps', (["prow['BADAMPS']"], {}), "(prow['BADAMPS'])\n", (17617, 17634), False, 'from desispec.io.util import difference_camwords, parse_badamps, create_camword, decode_camword\n'), ((17707, 17730), 'desispec.io.util.create_camword', 'create_camword', (['badcams'], {}), '(badcams)\n', (17721, 17730), False, 'from desispec.io.util import difference_camwords, parse_badamps, create_camword, decode_camword\n'), ((18108, 18163), 'desispec.io.util.difference_camwords', 'difference_camwords', (["prow['PROCCAMWORD']", 'newbadcamword'], {}), "(prow['PROCCAMWORD'], newbadcamword)\n", (18127, 18163), False, 'from desispec.io.util import difference_camwords, parse_badamps, create_camword, decode_camword\n'), ((14345, 14405), 'numpy.array', 'np.array', (["[(val in obstypes) for val in exptable['OBSTYPE']]"], {}), "([(val in obstypes) for val in exptable['OBSTYPE']])\n", (14353, 14405), True, 'import numpy as np\n'), ((16632, 16680), 'desispec.io.util.difference_camwords', 'difference_camwords', (["erow['CAMWORD']", 'badcamword'], {}), "(erow['CAMWORD'], badcamword)\n", (16651, 16680), False, 'from desispec.io.util import difference_camwords, parse_badamps, create_camword, decode_camword\n'), ((18939, 18951), 'desiutil.log.get_logger', 'get_logger', ([], {}), '()\n', (18949, 18951), False, 'from desiutil.log import get_logger\n'), ((4845, 4864), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(0)'}), '(shape=0)\n', (4855, 4864), True, 'import numpy as np\n'), ((5818, 5837), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(0)'}), '(shape=0)\n', (5828, 5837), True, 'import numpy as np\n'), ((5851, 5870), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(0)'}), '(shape=0)\n', (5861, 5870), True, 'import numpy as np\n'), ((5884, 5903), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(0)'}), '(shape=0)\n', (5894, 5903), True, 'import numpy as np\n'), ((17089, 17110), 'numpy.array', 'np.array', (['[erow[nam]]'], {}), '([erow[nam]])\n', (17097, 17110), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
GraphSAGE tests
"""
from tensorflow import keras
from tensorflow.keras import initializers, regularizers
import numpy as np
import pytest
from stellargraph.mapper import GraphSAGENodeGenerator
from stellargraph.layer.graphsage import (
GraphSAGE,
MeanAggregator,
MaxPoolingAggregator,
MeanPoolingAggregator,
AttentionalAggregator,
)
from ..test_utils.graphs import example_graph
from .. import test_utils
pytestmark = test_utils.ignore_stellargraph_experimental_mark
# Mean aggregator tests
def test_mean_agg_constructor():
agg = MeanAggregator(2)
assert agg.output_dim == 2
assert not agg.has_bias
# Check config
config = agg.get_config()
assert config["output_dim"] == 2
assert config["bias"] is False
assert config["act"] == "relu"
def test_mean_agg_constructor_1():
agg = MeanAggregator(output_dim=4, bias=True, act=lambda x: x + 1)
assert agg.output_dim == 4
assert agg.has_bias
assert agg.act(2) == 3
def test_mean_agg_apply():
agg = MeanAggregator(5, bias=True, act=lambda x: x, kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
assert agg.weight_dims == [3, 2]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 5, 5]]])
assert expected == pytest.approx(actual)
def test_mean_agg_apply_groups():
agg = MeanAggregator(11, bias=True, act=lambda x: x, kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 2, 2))
inp3 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2, inp3])
assert agg.weight_dims == [5, 3, 3]
model = keras.Model(inputs=[inp1, inp2, inp3], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
x3 = np.array([[[[5, 5], [4, 4]]]])
actual = model.predict([x1, x2, x3])
print(actual)
expected = np.array([[[2] * 5 + [5] * 3 + [9] * 3]])
assert expected == pytest.approx(actual)
def test_mean_agg_zero_neighbours():
agg = MeanAggregator(4, bias=False, act=lambda x: x, kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 0, 2))
out = agg([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.zeros((1, 1, 0, 2))
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 2]]])
assert expected == pytest.approx(actual)
# MaxPooling aggregator tests
def test_maxpool_agg_constructor():
agg = MaxPoolingAggregator(2, bias=False)
assert agg.output_dim == 2
assert agg.hidden_dim == 2
assert not agg.has_bias
assert agg.act.__name__ == "relu"
assert agg.hidden_act.__name__ == "relu"
# Check config
config = agg.get_config()
assert config["output_dim"] == 2
assert config["bias"] == False
assert config["act"] == "relu"
def test_maxpool_agg_constructor_1():
agg = MaxPoolingAggregator(output_dim=4, bias=True, act=lambda x: x + 1)
assert agg.output_dim == 4
assert agg.hidden_dim == 4
assert agg.has_bias
assert agg.act(2) == 3
def test_maxpool_agg_apply_hidden_bias():
# Specifying bias_initializer="ones" initialises all bias terms to ones;
# using bias=False turns of outer bias but retains hidden bias.
agg = MaxPoolingAggregator(
2, bias=False, act="linear", kernel_initializer="ones", bias_initializer="ones"
)
assert agg.get_config()["kernel_initializer"]["class_name"] == "Ones"
assert agg.get_config()["bias_initializer"]["class_name"] == "Ones"
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# Check sizes
assert agg.weight_dims == [1, 1]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# neigh_agg = max(relu(x2 · ones(2x2)) + ones(2)), axis=1) = max([[5,5],[7,7]]) = [[7,7]]
# from_self = K.dot(x1, ones) = [[2]]
# from_neigh = K.dot(neigh_agg, ones) = [[14]]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[2, 14]]])
assert expected == pytest.approx(actual)
def test_maxpool_agg_apply_no_bias():
# By default, bias_initializers="zeros", so all bias terms are initialised to zeros.
agg = MaxPoolingAggregator(2, act="linear", kernel_initializer="ones")
assert agg.get_config()["kernel_initializer"]["class_name"] == "Ones"
assert agg.get_config()["bias_initializer"]["class_name"] == "Zeros"
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# Check sizes
assert agg.weight_dims == [1, 1]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# neigh_agg = max(relu(x2 · ones(2x2)) + zeros(2)), axis=1) = max([[4,4],[6,6]]) = [[6,6]]
# from_self = K.dot(x1, ones) = [[2]]
# from_neigh = K.dot(neigh_agg, ones) = [[12]]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[2, 12]]])
assert expected == pytest.approx(actual)
def test_maxpool_agg_zero_neighbours():
agg = MaxPoolingAggregator(4, bias=False, act="linear", kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 0, 2))
out = agg([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.zeros((1, 1, 0, 2))
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 2]]])
assert expected == pytest.approx(actual)
# MeanPooling aggregator tests
def test_meanpool_agg_constructor():
agg = MeanPoolingAggregator(2, bias=False)
assert agg.output_dim == 2
assert agg.hidden_dim == 2
assert not agg.has_bias
assert agg.act.__name__ == "relu"
assert agg.hidden_act.__name__ == "relu"
# Check config
config = agg.get_config()
assert config["output_dim"] == 2
assert config["bias"] is False
assert config["act"] == "relu"
def test_meanpool_agg_constructor_1():
agg = MeanPoolingAggregator(output_dim=4, bias=True, act=lambda x: x + 1)
assert agg.output_dim == 4
assert agg.hidden_dim == 4
assert agg.has_bias
assert agg.act(2) == 3
def test_meanpool_agg_apply_hidden_bias():
# Specifying bias_initializer="ones" initialises all bias terms to ones;
# using bias=False turns of outer bias but retains hidden bias.
agg = MeanPoolingAggregator(
2, bias=False, act="linear", kernel_initializer="ones", bias_initializer="ones"
)
assert agg.get_config()["kernel_initializer"]["class_name"] == "Ones"
assert agg.get_config()["bias_initializer"]["class_name"] == "Ones"
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# Check sizes
assert agg.weight_dims == [1, 1]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# neigh_agg = mean(relu(x2 · ones(2x2) + ones(2)), axis=1)
# = mean([[5,5],[7,7]]) = [[6,6]]
# from_self = K.dot(x1, ones) = [[2]]
# from_neigh = K.dot(neigh_agg, ones(2x1)) = [[12]]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[2, 12]]])
assert expected == pytest.approx(actual)
def test_meanpool_agg_apply_no_bias():
# By default, bias_initializers="zeros", so all bias terms are initialised to zeros.
agg = MeanPoolingAggregator(2, act="linear", kernel_initializer="ones")
assert agg.get_config()["kernel_initializer"]["class_name"] == "Ones"
assert agg.get_config()["bias_initializer"]["class_name"] == "Zeros"
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# Check sizes
assert agg.weight_dims == [1, 1]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# neigh_agg = mean(relu(x2 · ones(2x2) + zeros(2)), axis=1)
# = mean([[4,4],[6,6]]) = [[5,5]]
# from_self = K.dot(x1, ones) = [[2]]
# from_neigh = K.dot(neigh_agg, ones) = [[10]]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[2, 10]]])
assert expected == pytest.approx(actual)
def test_meanpool_agg_zero_neighbours():
agg = MeanPoolingAggregator(4, bias=False, act="linear", kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 0, 2))
out = agg([inp1, inp2])
# Now we have an input shape with a 0, the attention model switches to
# a MLP and the first group will have non-zero output size.
assert agg.weight_dims == [4, 0]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.zeros((1, 1, 0, 2))
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 2]]])
assert expected == pytest.approx(actual)
# Attentional aggregator tests
def test_attn_agg_constructor():
agg = AttentionalAggregator(2, bias=False)
assert agg.output_dim == 2
assert not agg.has_bias
assert agg.act.__name__ == "relu"
# assert agg.attn_act.__name__ == "relu"
# Check config
config = agg.get_config()
assert config["output_dim"] == 2
assert config["bias"] is False
assert config["act"] == "relu"
def test_attn_agg_constructor_1():
agg = AttentionalAggregator(output_dim=4, bias=True, act=lambda x: x + 1)
assert agg.output_dim == 4
assert agg.has_bias
assert agg.act(2) == 3
def test_attn_agg_apply():
agg = AttentionalAggregator(2, bias=False, act="linear", kernel_initializer="ones")
agg.attn_act = keras.activations.get("linear")
# Self features
inp1 = keras.Input(shape=(1, 2))
# Neighbour features
inp2 = keras.Input(shape=(1, 2, 2))
out = agg([inp1, inp2])
# The AttentionalAggregator implmentation is a hack at the moment, it doesn't
# assign any dimensions in the output to head-node features.
assert agg.weight_dims == [0, 2]
# Numerical test values
x1 = np.array([[[1, 1]]])
x2 = np.array([[[[2, 2], [3, 3]]]])
# Agg output:
# hs = relu(x1 · ones(2x2)) = [2,2]
# hn = relu(x2 · ones(2x2)) = [[2,2], [4,4], [6,6]]
# attn_u = ones(2) · hs + ones(2) · hn = [8, 12, 16]
# attn = softmax(attn_u) = [3.3e-4, 1.8e-4, 9.81e-1]
# hout = attn · hn = [5.96, 5.96]
model = keras.Model(inputs=[inp1, inp2], outputs=out)
actual = model.predict([x1, x2])
expected = np.array([[[5.963, 5.963]]])
assert expected == pytest.approx(actual, rel=1e-4)
def test_attn_agg_zero_neighbours():
agg = AttentionalAggregator(4, bias=False, act="linear", kernel_initializer="ones")
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(1, 0, 2))
out = agg([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
x1 = np.array([[[1, 1]]])
x2 = np.zeros((1, 1, 0, 2))
actual = model.predict([x1, x2])
expected = np.array([[[2, 2, 2, 2]]])
assert expected == pytest.approx(actual)
def test_graphsage_constructor():
gs = GraphSAGE(
layer_sizes=[4], n_samples=[2], input_dim=2, normalize="l2", multiplicity=1
)
assert gs.dims == [2, 4]
assert gs.n_samples == [2]
assert gs.max_hops == 1
assert gs.bias
assert len(gs._aggs) == 1
# Check incorrect normalization flag
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
normalize=lambda x: x,
multiplicity=1,
)
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
normalize="unknown",
multiplicity=1,
)
# Check requirement for generator or n_samples
with pytest.raises(KeyError):
GraphSAGE(layer_sizes=[4])
# Construction from generator
G = example_graph(feature_size=3)
gen = GraphSAGENodeGenerator(G, batch_size=2, num_samples=[2, 2])
gs = GraphSAGE(layer_sizes=[4, 8], generator=gen, bias=True)
# The GraphSAGE should no longer accept a Sequence
t_gen = gen.flow([1, 2])
with pytest.raises(TypeError):
gs = GraphSAGE(layer_sizes=[4, 8], generator=t_gen, bias=True)
assert gs.dims == [3, 4, 8]
assert gs.n_samples == [2, 2]
assert gs.max_hops == 2
assert gs.bias
assert len(gs._aggs) == 2
def test_graphsage_constructor_passing_aggregator():
gs = GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
aggregator=MeanAggregator,
)
assert gs.dims == [2, 4]
assert gs.n_samples == [2]
assert gs.max_hops == 1
assert gs.bias
assert len(gs._aggs) == 1
with pytest.raises(TypeError):
GraphSAGE(
layer_sizes=[4], n_samples=[2], input_dim=2, multiplicity=1, aggregator=1
)
def test_graphsage_constructor_1():
gs = GraphSAGE(
layer_sizes=[4, 6, 8],
n_samples=[2, 4, 6],
input_dim=2,
multiplicity=1,
bias=True,
dropout=0.5,
)
assert gs.dims == [2, 4, 6, 8]
assert gs.n_samples == [2, 4, 6]
assert gs.max_hops == 3
assert gs.bias
assert len(gs._aggs) == 3
def test_graphsage_apply():
gs = GraphSAGE(
layer_sizes=[4],
n_samples=[2],
bias=False,
input_dim=2,
multiplicity=1,
normalize=None,
kernel_initializer="ones",
)
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(2, 2))
out = gs([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
def test_graphsage_apply_1():
gs = GraphSAGE(
layer_sizes=[2, 2, 2],
n_samples=[2, 2, 2],
bias=False,
input_dim=2,
multiplicity=1,
normalize=None,
kernel_initializer="ones",
)
inp = [keras.Input(shape=(i, 2)) for i in [1, 2, 4, 8]]
out = gs(inp)
model = keras.Model(inputs=inp, outputs=out)
x = [
np.array([[[1, 1]]]),
np.array([[[2, 2], [2, 2]]]),
np.array([[[3, 3], [3, 3], [3, 3], [3, 3]]]),
np.array([[[4, 4], [4, 4], [4, 4], [4, 4], [5, 5], [5, 5], [5, 5], [5, 5]]]),
]
expected = np.array([[16, 25]])
actual = model.predict(x)
assert expected == pytest.approx(actual)
# Use the node model:
xinp, xout = gs.build()
model2 = keras.Model(inputs=xinp, outputs=xout)
assert pytest.approx(expected) == model2.predict(x)
def test_graphsage_serialize():
gs = GraphSAGE(
layer_sizes=[4],
n_samples=[2],
bias=False,
input_dim=2,
multiplicity=1,
normalize=None,
)
inp1 = keras.Input(shape=(1, 2))
inp2 = keras.Input(shape=(2, 2))
out = gs([inp1, inp2])
model = keras.Model(inputs=[inp1, inp2], outputs=out)
# Save model
model_json = model.to_json()
# Set all weights to one
model_weights = [np.ones_like(w) for w in model.get_weights()]
# Load model from json & set all weights
model2 = keras.models.model_from_json(
model_json, custom_objects={"MeanAggregator": MeanAggregator}
)
model2.set_weights(model_weights)
# Test loaded model
x1 = np.array([[[1, 1]]])
x2 = np.array([[[2, 2], [3, 3]]])
expected = np.array([[2, 2, 5, 5]])
actual = model2.predict([x1, x2])
assert expected == pytest.approx(actual)
def test_graphsage_zero_neighbours():
gs = GraphSAGE(
layer_sizes=[2, 2],
n_samples=[0, 0],
bias=False,
input_dim=2,
multiplicity=1,
normalize="none",
kernel_initializer="ones",
)
inp = [keras.Input(shape=(i, 2)) for i in [1, 0, 0]]
out = gs(inp)
model = keras.Model(inputs=inp, outputs=out)
x = [np.array([[[1.5, 1]]]), np.zeros((1, 0, 2)), np.zeros((1, 0, 2))]
actual = model.predict(x)
expected = np.array([[5, 5]])
assert actual == pytest.approx(expected)
def test_graphsage_passing_activations():
gs = GraphSAGE(layer_sizes=[4], n_samples=[2], input_dim=2, multiplicity=1)
assert gs.activations == ["linear"]
gs = GraphSAGE(layer_sizes=[4, 4], n_samples=[2, 2], input_dim=2, multiplicity=1)
assert gs.activations == ["relu", "linear"]
gs = GraphSAGE(
layer_sizes=[4, 4, 4], n_samples=[2, 2, 2], input_dim=2, multiplicity=1
)
assert gs.activations == ["relu", "relu", "linear"]
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4, 4, 4],
n_samples=[2, 2, 2],
input_dim=2,
multiplicity=1,
activations=["relu"],
)
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4, 4, 4],
n_samples=[2, 2, 2],
input_dim=2,
multiplicity=1,
activations=["relu"] * 2,
)
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4, 4, 4],
n_samples=[2, 2, 2],
input_dim=2,
multiplicity=1,
activations=["fred", "wilma", "barney"],
)
gs = GraphSAGE(
layer_sizes=[4, 4, 4],
n_samples=[2, 2, 2],
input_dim=2,
multiplicity=1,
activations=["linear"] * 3,
)
assert gs.activations == ["linear"] * 3
def test_graphsage_passing_regularisers():
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_initializer="fred",
)
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_initializer="ones",
)
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_initializer=initializers.ones(),
)
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_regularizer=regularizers.l2(0.01),
)
with pytest.raises(ValueError):
GraphSAGE(
layer_sizes=[4],
n_samples=[2],
input_dim=2,
multiplicity=1,
kernel_regularizer="wilma",
)
| [
"stellargraph.layer.graphsage.AttentionalAggregator",
"pytest.approx",
"numpy.ones_like",
"stellargraph.layer.graphsage.MaxPoolingAggregator",
"tensorflow.keras.activations.get",
"tensorflow.keras.models.model_from_json",
"tensorflow.keras.initializers.ones",
"stellargraph.layer.graphsage.MeanAggregat... | [((1173, 1190), 'stellargraph.layer.graphsage.MeanAggregator', 'MeanAggregator', (['(2)'], {}), '(2)\n', (1187, 1190), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((1454, 1514), 'stellargraph.layer.graphsage.MeanAggregator', 'MeanAggregator', ([], {'output_dim': '(4)', 'bias': '(True)', 'act': '(lambda x: x + 1)'}), '(output_dim=4, bias=True, act=lambda x: x + 1)\n', (1468, 1514), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((1636, 1708), 'stellargraph.layer.graphsage.MeanAggregator', 'MeanAggregator', (['(5)'], {'bias': '(True)', 'act': '(lambda x: x)', 'kernel_initializer': '"""ones"""'}), "(5, bias=True, act=lambda x: x, kernel_initializer='ones')\n", (1650, 1708), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((1720, 1745), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2)'}), '(shape=(1, 2))\n', (1731, 1745), False, 'from tensorflow import keras\n'), ((1757, 1785), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2, 2)'}), '(shape=(1, 2, 2))\n', (1768, 1785), False, 'from tensorflow import keras\n'), ((1865, 1910), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[inp1, inp2]', 'outputs': 'out'}), '(inputs=[inp1, inp2], outputs=out)\n', (1876, 1910), False, 'from tensorflow import keras\n'), ((1920, 1940), 'numpy.array', 'np.array', (['[[[1, 1]]]'], {}), '([[[1, 1]]])\n', (1928, 1940), True, 'import numpy as np\n'), ((1950, 1980), 'numpy.array', 'np.array', (['[[[[2, 2], [3, 3]]]]'], {}), '([[[[2, 2], [3, 3]]]])\n', (1958, 1980), True, 'import numpy as np\n'), ((2033, 2062), 'numpy.array', 'np.array', (['[[[2, 2, 2, 5, 5]]]'], {}), '([[[2, 2, 2, 5, 5]]])\n', (2041, 2062), True, 'import numpy as np\n'), ((2154, 2227), 'stellargraph.layer.graphsage.MeanAggregator', 'MeanAggregator', (['(11)'], {'bias': '(True)', 'act': '(lambda x: x)', 'kernel_initializer': '"""ones"""'}), "(11, bias=True, act=lambda x: x, kernel_initializer='ones')\n", (2168, 2227), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((2239, 2264), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2)'}), '(shape=(1, 2))\n', (2250, 2264), False, 'from tensorflow import keras\n'), ((2276, 2304), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2, 2)'}), '(shape=(1, 2, 2))\n', (2287, 2304), False, 'from tensorflow import keras\n'), ((2316, 2344), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2, 2)'}), '(shape=(1, 2, 2))\n', (2327, 2344), False, 'from tensorflow import keras\n'), ((2433, 2484), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[inp1, inp2, inp3]', 'outputs': 'out'}), '(inputs=[inp1, inp2, inp3], outputs=out)\n', (2444, 2484), False, 'from tensorflow import keras\n'), ((2494, 2514), 'numpy.array', 'np.array', (['[[[1, 1]]]'], {}), '([[[1, 1]]])\n', (2502, 2514), True, 'import numpy as np\n'), ((2524, 2554), 'numpy.array', 'np.array', (['[[[[2, 2], [3, 3]]]]'], {}), '([[[[2, 2], [3, 3]]]])\n', (2532, 2554), True, 'import numpy as np\n'), ((2564, 2594), 'numpy.array', 'np.array', (['[[[[5, 5], [4, 4]]]]'], {}), '([[[[5, 5], [4, 4]]]])\n', (2572, 2594), True, 'import numpy as np\n'), ((2671, 2712), 'numpy.array', 'np.array', (['[[[2] * 5 + [5] * 3 + [9] * 3]]'], {}), '([[[2] * 5 + [5] * 3 + [9] * 3]])\n', (2679, 2712), True, 'import numpy as np\n'), ((2807, 2880), 'stellargraph.layer.graphsage.MeanAggregator', 'MeanAggregator', (['(4)'], {'bias': '(False)', 'act': '(lambda x: x)', 'kernel_initializer': '"""ones"""'}), "(4, bias=False, act=lambda x: x, kernel_initializer='ones')\n", (2821, 2880), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((2893, 2918), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2)'}), '(shape=(1, 2))\n', (2904, 2918), False, 'from tensorflow import keras\n'), ((2930, 2958), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 0, 2)'}), '(shape=(1, 0, 2))\n', (2941, 2958), False, 'from tensorflow import keras\n'), ((3000, 3045), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[inp1, inp2]', 'outputs': 'out'}), '(inputs=[inp1, inp2], outputs=out)\n', (3011, 3045), False, 'from tensorflow import keras\n'), ((3056, 3076), 'numpy.array', 'np.array', (['[[[1, 1]]]'], {}), '([[[1, 1]]])\n', (3064, 3076), True, 'import numpy as np\n'), ((3086, 3108), 'numpy.zeros', 'np.zeros', (['(1, 1, 0, 2)'], {}), '((1, 1, 0, 2))\n', (3094, 3108), True, 'import numpy as np\n'), ((3162, 3188), 'numpy.array', 'np.array', (['[[[2, 2, 2, 2]]]'], {}), '([[[2, 2, 2, 2]]])\n', (3170, 3188), True, 'import numpy as np\n'), ((3312, 3347), 'stellargraph.layer.graphsage.MaxPoolingAggregator', 'MaxPoolingAggregator', (['(2)'], {'bias': '(False)'}), '(2, bias=False)\n', (3332, 3347), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((3728, 3794), 'stellargraph.layer.graphsage.MaxPoolingAggregator', 'MaxPoolingAggregator', ([], {'output_dim': '(4)', 'bias': '(True)', 'act': '(lambda x: x + 1)'}), '(output_dim=4, bias=True, act=lambda x: x + 1)\n', (3748, 3794), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((4107, 4212), 'stellargraph.layer.graphsage.MaxPoolingAggregator', 'MaxPoolingAggregator', (['(2)'], {'bias': '(False)', 'act': '"""linear"""', 'kernel_initializer': '"""ones"""', 'bias_initializer': '"""ones"""'}), "(2, bias=False, act='linear', kernel_initializer='ones',\n bias_initializer='ones')\n", (4127, 4212), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((4401, 4426), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2)'}), '(shape=(1, 2))\n', (4412, 4426), False, 'from tensorflow import keras\n'), ((4463, 4491), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2, 2)'}), '(shape=(1, 2, 2))\n', (4474, 4491), False, 'from tensorflow import keras\n'), ((4614, 4634), 'numpy.array', 'np.array', (['[[[1, 1]]]'], {}), '([[[1, 1]]])\n', (4622, 4634), True, 'import numpy as np\n'), ((4644, 4674), 'numpy.array', 'np.array', (['[[[[2, 2], [3, 3]]]]'], {}), '([[[[2, 2], [3, 3]]]])\n', (4652, 4674), True, 'import numpy as np\n'), ((4893, 4938), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[inp1, inp2]', 'outputs': 'out'}), '(inputs=[inp1, inp2], outputs=out)\n', (4904, 4938), False, 'from tensorflow import keras\n'), ((4991, 5012), 'numpy.array', 'np.array', (['[[[2, 14]]]'], {}), '([[[2, 14]]])\n', (4999, 5012), True, 'import numpy as np\n'), ((5198, 5262), 'stellargraph.layer.graphsage.MaxPoolingAggregator', 'MaxPoolingAggregator', (['(2)'], {'act': '"""linear"""', 'kernel_initializer': '"""ones"""'}), "(2, act='linear', kernel_initializer='ones')\n", (5218, 5262), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((5442, 5467), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2)'}), '(shape=(1, 2))\n', (5453, 5467), False, 'from tensorflow import keras\n'), ((5504, 5532), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2, 2)'}), '(shape=(1, 2, 2))\n', (5515, 5532), False, 'from tensorflow import keras\n'), ((5655, 5675), 'numpy.array', 'np.array', (['[[[1, 1]]]'], {}), '([[[1, 1]]])\n', (5663, 5675), True, 'import numpy as np\n'), ((5685, 5715), 'numpy.array', 'np.array', (['[[[[2, 2], [3, 3]]]]'], {}), '([[[[2, 2], [3, 3]]]])\n', (5693, 5715), True, 'import numpy as np\n'), ((5935, 5980), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[inp1, inp2]', 'outputs': 'out'}), '(inputs=[inp1, inp2], outputs=out)\n', (5946, 5980), False, 'from tensorflow import keras\n'), ((6033, 6054), 'numpy.array', 'np.array', (['[[[2, 12]]]'], {}), '([[[2, 12]]])\n', (6041, 6054), True, 'import numpy as np\n'), ((6153, 6229), 'stellargraph.layer.graphsage.MaxPoolingAggregator', 'MaxPoolingAggregator', (['(4)'], {'bias': '(False)', 'act': '"""linear"""', 'kernel_initializer': '"""ones"""'}), "(4, bias=False, act='linear', kernel_initializer='ones')\n", (6173, 6229), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((6242, 6267), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2)'}), '(shape=(1, 2))\n', (6253, 6267), False, 'from tensorflow import keras\n'), ((6279, 6307), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 0, 2)'}), '(shape=(1, 0, 2))\n', (6290, 6307), False, 'from tensorflow import keras\n'), ((6349, 6394), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[inp1, inp2]', 'outputs': 'out'}), '(inputs=[inp1, inp2], outputs=out)\n', (6360, 6394), False, 'from tensorflow import keras\n'), ((6405, 6425), 'numpy.array', 'np.array', (['[[[1, 1]]]'], {}), '([[[1, 1]]])\n', (6413, 6425), True, 'import numpy as np\n'), ((6435, 6457), 'numpy.zeros', 'np.zeros', (['(1, 1, 0, 2)'], {}), '((1, 1, 0, 2))\n', (6443, 6457), True, 'import numpy as np\n'), ((6511, 6537), 'numpy.array', 'np.array', (['[[[2, 2, 2, 2]]]'], {}), '([[[2, 2, 2, 2]]])\n', (6519, 6537), True, 'import numpy as np\n'), ((6663, 6699), 'stellargraph.layer.graphsage.MeanPoolingAggregator', 'MeanPoolingAggregator', (['(2)'], {'bias': '(False)'}), '(2, bias=False)\n', (6684, 6699), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((7081, 7148), 'stellargraph.layer.graphsage.MeanPoolingAggregator', 'MeanPoolingAggregator', ([], {'output_dim': '(4)', 'bias': '(True)', 'act': '(lambda x: x + 1)'}), '(output_dim=4, bias=True, act=lambda x: x + 1)\n', (7102, 7148), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((7462, 7569), 'stellargraph.layer.graphsage.MeanPoolingAggregator', 'MeanPoolingAggregator', (['(2)'], {'bias': '(False)', 'act': '"""linear"""', 'kernel_initializer': '"""ones"""', 'bias_initializer': '"""ones"""'}), "(2, bias=False, act='linear', kernel_initializer=\n 'ones', bias_initializer='ones')\n", (7483, 7569), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((7757, 7782), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2)'}), '(shape=(1, 2))\n', (7768, 7782), False, 'from tensorflow import keras\n'), ((7819, 7847), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2, 2)'}), '(shape=(1, 2, 2))\n', (7830, 7847), False, 'from tensorflow import keras\n'), ((7971, 7991), 'numpy.array', 'np.array', (['[[[1, 1]]]'], {}), '([[[1, 1]]])\n', (7979, 7991), True, 'import numpy as np\n'), ((8001, 8031), 'numpy.array', 'np.array', (['[[[[2, 2], [3, 3]]]]'], {}), '([[[[2, 2], [3, 3]]]])\n', (8009, 8031), True, 'import numpy as np\n'), ((8265, 8310), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[inp1, inp2]', 'outputs': 'out'}), '(inputs=[inp1, inp2], outputs=out)\n', (8276, 8310), False, 'from tensorflow import keras\n'), ((8363, 8384), 'numpy.array', 'np.array', (['[[[2, 12]]]'], {}), '([[[2, 12]]])\n', (8371, 8384), True, 'import numpy as np\n'), ((8571, 8636), 'stellargraph.layer.graphsage.MeanPoolingAggregator', 'MeanPoolingAggregator', (['(2)'], {'act': '"""linear"""', 'kernel_initializer': '"""ones"""'}), "(2, act='linear', kernel_initializer='ones')\n", (8592, 8636), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((8816, 8841), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2)'}), '(shape=(1, 2))\n', (8827, 8841), False, 'from tensorflow import keras\n'), ((8878, 8906), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2, 2)'}), '(shape=(1, 2, 2))\n', (8889, 8906), False, 'from tensorflow import keras\n'), ((9030, 9050), 'numpy.array', 'np.array', (['[[[1, 1]]]'], {}), '([[[1, 1]]])\n', (9038, 9050), True, 'import numpy as np\n'), ((9060, 9090), 'numpy.array', 'np.array', (['[[[[2, 2], [3, 3]]]]'], {}), '([[[[2, 2], [3, 3]]]])\n', (9068, 9090), True, 'import numpy as np\n'), ((9320, 9365), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[inp1, inp2]', 'outputs': 'out'}), '(inputs=[inp1, inp2], outputs=out)\n', (9331, 9365), False, 'from tensorflow import keras\n'), ((9418, 9439), 'numpy.array', 'np.array', (['[[[2, 10]]]'], {}), '([[[2, 10]]])\n', (9426, 9439), True, 'import numpy as np\n'), ((9539, 9616), 'stellargraph.layer.graphsage.MeanPoolingAggregator', 'MeanPoolingAggregator', (['(4)'], {'bias': '(False)', 'act': '"""linear"""', 'kernel_initializer': '"""ones"""'}), "(4, bias=False, act='linear', kernel_initializer='ones')\n", (9560, 9616), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((9629, 9654), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2)'}), '(shape=(1, 2))\n', (9640, 9654), False, 'from tensorflow import keras\n'), ((9666, 9694), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 0, 2)'}), '(shape=(1, 0, 2))\n', (9677, 9694), False, 'from tensorflow import keras\n'), ((9913, 9958), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[inp1, inp2]', 'outputs': 'out'}), '(inputs=[inp1, inp2], outputs=out)\n', (9924, 9958), False, 'from tensorflow import keras\n'), ((9968, 9988), 'numpy.array', 'np.array', (['[[[1, 1]]]'], {}), '([[[1, 1]]])\n', (9976, 9988), True, 'import numpy as np\n'), ((9998, 10020), 'numpy.zeros', 'np.zeros', (['(1, 1, 0, 2)'], {}), '((1, 1, 0, 2))\n', (10006, 10020), True, 'import numpy as np\n'), ((10074, 10100), 'numpy.array', 'np.array', (['[[[2, 2, 2, 2]]]'], {}), '([[[2, 2, 2, 2]]])\n', (10082, 10100), True, 'import numpy as np\n'), ((10222, 10258), 'stellargraph.layer.graphsage.AttentionalAggregator', 'AttentionalAggregator', (['(2)'], {'bias': '(False)'}), '(2, bias=False)\n', (10243, 10258), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((10605, 10672), 'stellargraph.layer.graphsage.AttentionalAggregator', 'AttentionalAggregator', ([], {'output_dim': '(4)', 'bias': '(True)', 'act': '(lambda x: x + 1)'}), '(output_dim=4, bias=True, act=lambda x: x + 1)\n', (10626, 10672), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((10794, 10871), 'stellargraph.layer.graphsage.AttentionalAggregator', 'AttentionalAggregator', (['(2)'], {'bias': '(False)', 'act': '"""linear"""', 'kernel_initializer': '"""ones"""'}), "(2, bias=False, act='linear', kernel_initializer='ones')\n", (10815, 10871), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((10891, 10922), 'tensorflow.keras.activations.get', 'keras.activations.get', (['"""linear"""'], {}), "('linear')\n", (10912, 10922), False, 'from tensorflow import keras\n'), ((10955, 10980), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2)'}), '(shape=(1, 2))\n', (10966, 10980), False, 'from tensorflow import keras\n'), ((11017, 11045), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2, 2)'}), '(shape=(1, 2, 2))\n', (11028, 11045), False, 'from tensorflow import keras\n'), ((11297, 11317), 'numpy.array', 'np.array', (['[[[1, 1]]]'], {}), '([[[1, 1]]])\n', (11305, 11317), True, 'import numpy as np\n'), ((11327, 11357), 'numpy.array', 'np.array', (['[[[[2, 2], [3, 3]]]]'], {}), '([[[[2, 2], [3, 3]]]])\n', (11335, 11357), True, 'import numpy as np\n'), ((11640, 11685), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[inp1, inp2]', 'outputs': 'out'}), '(inputs=[inp1, inp2], outputs=out)\n', (11651, 11685), False, 'from tensorflow import keras\n'), ((11739, 11767), 'numpy.array', 'np.array', (['[[[5.963, 5.963]]]'], {}), '([[[5.963, 5.963]]])\n', (11747, 11767), True, 'import numpy as np\n'), ((11873, 11950), 'stellargraph.layer.graphsage.AttentionalAggregator', 'AttentionalAggregator', (['(4)'], {'bias': '(False)', 'act': '"""linear"""', 'kernel_initializer': '"""ones"""'}), "(4, bias=False, act='linear', kernel_initializer='ones')\n", (11894, 11950), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((11963, 11988), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2)'}), '(shape=(1, 2))\n', (11974, 11988), False, 'from tensorflow import keras\n'), ((12000, 12028), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 0, 2)'}), '(shape=(1, 0, 2))\n', (12011, 12028), False, 'from tensorflow import keras\n'), ((12070, 12115), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[inp1, inp2]', 'outputs': 'out'}), '(inputs=[inp1, inp2], outputs=out)\n', (12081, 12115), False, 'from tensorflow import keras\n'), ((12126, 12146), 'numpy.array', 'np.array', (['[[[1, 1]]]'], {}), '([[[1, 1]]])\n', (12134, 12146), True, 'import numpy as np\n'), ((12156, 12178), 'numpy.zeros', 'np.zeros', (['(1, 1, 0, 2)'], {}), '((1, 1, 0, 2))\n', (12164, 12178), True, 'import numpy as np\n'), ((12232, 12258), 'numpy.array', 'np.array', (['[[[2, 2, 2, 2]]]'], {}), '([[[2, 2, 2, 2]]])\n', (12240, 12258), True, 'import numpy as np\n'), ((12349, 12439), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4]', 'n_samples': '[2]', 'input_dim': '(2)', 'normalize': '"""l2"""', 'multiplicity': '(1)'}), "(layer_sizes=[4], n_samples=[2], input_dim=2, normalize='l2',\n multiplicity=1)\n", (12358, 12439), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((13250, 13309), 'stellargraph.mapper.GraphSAGENodeGenerator', 'GraphSAGENodeGenerator', (['G'], {'batch_size': '(2)', 'num_samples': '[2, 2]'}), '(G, batch_size=2, num_samples=[2, 2])\n', (13272, 13309), False, 'from stellargraph.mapper import GraphSAGENodeGenerator\n'), ((13319, 13374), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4, 8]', 'generator': 'gen', 'bias': '(True)'}), '(layer_sizes=[4, 8], generator=gen, bias=True)\n', (13328, 13374), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((13774, 13875), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4]', 'n_samples': '[2]', 'input_dim': '(2)', 'multiplicity': '(1)', 'aggregator': 'MeanAggregator'}), '(layer_sizes=[4], n_samples=[2], input_dim=2, multiplicity=1,\n aggregator=MeanAggregator)\n', (13783, 13875), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((14254, 14364), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4, 6, 8]', 'n_samples': '[2, 4, 6]', 'input_dim': '(2)', 'multiplicity': '(1)', 'bias': '(True)', 'dropout': '(0.5)'}), '(layer_sizes=[4, 6, 8], n_samples=[2, 4, 6], input_dim=2,\n multiplicity=1, bias=True, dropout=0.5)\n', (14263, 14364), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((14604, 14733), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4]', 'n_samples': '[2]', 'bias': '(False)', 'input_dim': '(2)', 'multiplicity': '(1)', 'normalize': 'None', 'kernel_initializer': '"""ones"""'}), "(layer_sizes=[4], n_samples=[2], bias=False, input_dim=2,\n multiplicity=1, normalize=None, kernel_initializer='ones')\n", (14613, 14733), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((14805, 14830), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2)'}), '(shape=(1, 2))\n', (14816, 14830), False, 'from tensorflow import keras\n'), ((14842, 14867), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(2, 2)'}), '(shape=(2, 2))\n', (14853, 14867), False, 'from tensorflow import keras\n'), ((14907, 14952), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[inp1, inp2]', 'outputs': 'out'}), '(inputs=[inp1, inp2], outputs=out)\n', (14918, 14952), False, 'from tensorflow import keras\n'), ((14994, 15136), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[2, 2, 2]', 'n_samples': '[2, 2, 2]', 'bias': '(False)', 'input_dim': '(2)', 'multiplicity': '(1)', 'normalize': 'None', 'kernel_initializer': '"""ones"""'}), "(layer_sizes=[2, 2, 2], n_samples=[2, 2, 2], bias=False, input_dim\n =2, multiplicity=1, normalize=None, kernel_initializer='ones')\n", (15003, 15136), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((15286, 15322), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inp', 'outputs': 'out'}), '(inputs=inp, outputs=out)\n', (15297, 15322), False, 'from tensorflow import keras\n'), ((15563, 15583), 'numpy.array', 'np.array', (['[[16, 25]]'], {}), '([[16, 25]])\n', (15571, 15583), True, 'import numpy as np\n'), ((15728, 15766), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'xinp', 'outputs': 'xout'}), '(inputs=xinp, outputs=xout)\n', (15739, 15766), False, 'from tensorflow import keras\n'), ((15866, 15968), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4]', 'n_samples': '[2]', 'bias': '(False)', 'input_dim': '(2)', 'multiplicity': '(1)', 'normalize': 'None'}), '(layer_sizes=[4], n_samples=[2], bias=False, input_dim=2,\n multiplicity=1, normalize=None)\n', (15875, 15968), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((16032, 16057), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(1, 2)'}), '(shape=(1, 2))\n', (16043, 16057), False, 'from tensorflow import keras\n'), ((16069, 16094), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(2, 2)'}), '(shape=(2, 2))\n', (16080, 16094), False, 'from tensorflow import keras\n'), ((16134, 16179), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[inp1, inp2]', 'outputs': 'out'}), '(inputs=[inp1, inp2], outputs=out)\n', (16145, 16179), False, 'from tensorflow import keras\n'), ((16387, 16482), 'tensorflow.keras.models.model_from_json', 'keras.models.model_from_json', (['model_json'], {'custom_objects': "{'MeanAggregator': MeanAggregator}"}), "(model_json, custom_objects={'MeanAggregator':\n MeanAggregator})\n", (16415, 16482), False, 'from tensorflow import keras\n'), ((16565, 16585), 'numpy.array', 'np.array', (['[[[1, 1]]]'], {}), '([[[1, 1]]])\n', (16573, 16585), True, 'import numpy as np\n'), ((16595, 16623), 'numpy.array', 'np.array', (['[[[2, 2], [3, 3]]]'], {}), '([[[2, 2], [3, 3]]])\n', (16603, 16623), True, 'import numpy as np\n'), ((16639, 16663), 'numpy.array', 'np.array', (['[[2, 2, 5, 5]]'], {}), '([[2, 2, 5, 5]])\n', (16647, 16663), True, 'import numpy as np\n'), ((16797, 16934), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[2, 2]', 'n_samples': '[0, 0]', 'bias': '(False)', 'input_dim': '(2)', 'multiplicity': '(1)', 'normalize': '"""none"""', 'kernel_initializer': '"""ones"""'}), "(layer_sizes=[2, 2], n_samples=[0, 0], bias=False, input_dim=2,\n multiplicity=1, normalize='none', kernel_initializer='ones')\n", (16806, 16934), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((17082, 17118), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inp', 'outputs': 'out'}), '(inputs=inp, outputs=out)\n', (17093, 17118), False, 'from tensorflow import keras\n'), ((17241, 17259), 'numpy.array', 'np.array', (['[[5, 5]]'], {}), '([[5, 5]])\n', (17249, 17259), True, 'import numpy as np\n'), ((17358, 17428), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4]', 'n_samples': '[2]', 'input_dim': '(2)', 'multiplicity': '(1)'}), '(layer_sizes=[4], n_samples=[2], input_dim=2, multiplicity=1)\n', (17367, 17428), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((17479, 17555), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4, 4]', 'n_samples': '[2, 2]', 'input_dim': '(2)', 'multiplicity': '(1)'}), '(layer_sizes=[4, 4], n_samples=[2, 2], input_dim=2, multiplicity=1)\n', (17488, 17555), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((17614, 17700), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4, 4, 4]', 'n_samples': '[2, 2, 2]', 'input_dim': '(2)', 'multiplicity': '(1)'}), '(layer_sizes=[4, 4, 4], n_samples=[2, 2, 2], input_dim=2,\n multiplicity=1)\n', (17623, 17700), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((18463, 18577), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4, 4, 4]', 'n_samples': '[2, 2, 2]', 'input_dim': '(2)', 'multiplicity': '(1)', 'activations': "(['linear'] * 3)"}), "(layer_sizes=[4, 4, 4], n_samples=[2, 2, 2], input_dim=2,\n multiplicity=1, activations=['linear'] * 3)\n", (18472, 18577), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((18928, 19029), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4]', 'n_samples': '[2]', 'input_dim': '(2)', 'multiplicity': '(1)', 'kernel_initializer': '"""ones"""'}), "(layer_sizes=[4], n_samples=[2], input_dim=2, multiplicity=1,\n kernel_initializer='ones')\n", (18937, 19029), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((2086, 2107), 'pytest.approx', 'pytest.approx', (['actual'], {}), '(actual)\n', (2099, 2107), False, 'import pytest\n'), ((2736, 2757), 'pytest.approx', 'pytest.approx', (['actual'], {}), '(actual)\n', (2749, 2757), False, 'import pytest\n'), ((3212, 3233), 'pytest.approx', 'pytest.approx', (['actual'], {}), '(actual)\n', (3225, 3233), False, 'import pytest\n'), ((5037, 5058), 'pytest.approx', 'pytest.approx', (['actual'], {}), '(actual)\n', (5050, 5058), False, 'import pytest\n'), ((6079, 6100), 'pytest.approx', 'pytest.approx', (['actual'], {}), '(actual)\n', (6092, 6100), False, 'import pytest\n'), ((6561, 6582), 'pytest.approx', 'pytest.approx', (['actual'], {}), '(actual)\n', (6574, 6582), False, 'import pytest\n'), ((8409, 8430), 'pytest.approx', 'pytest.approx', (['actual'], {}), '(actual)\n', (8422, 8430), False, 'import pytest\n'), ((9464, 9485), 'pytest.approx', 'pytest.approx', (['actual'], {}), '(actual)\n', (9477, 9485), False, 'import pytest\n'), ((10124, 10145), 'pytest.approx', 'pytest.approx', (['actual'], {}), '(actual)\n', (10137, 10145), False, 'import pytest\n'), ((11792, 11825), 'pytest.approx', 'pytest.approx', (['actual'], {'rel': '(0.0001)'}), '(actual, rel=0.0001)\n', (11805, 11825), False, 'import pytest\n'), ((12282, 12303), 'pytest.approx', 'pytest.approx', (['actual'], {}), '(actual)\n', (12295, 12303), False, 'import pytest\n'), ((12638, 12663), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12651, 12663), False, 'import pytest\n'), ((12673, 12770), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4]', 'n_samples': '[2]', 'input_dim': '(2)', 'normalize': '(lambda x: x)', 'multiplicity': '(1)'}), '(layer_sizes=[4], n_samples=[2], input_dim=2, normalize=lambda x:\n x, multiplicity=1)\n', (12682, 12770), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((12848, 12873), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12861, 12873), False, 'import pytest\n'), ((12883, 12978), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4]', 'n_samples': '[2]', 'input_dim': '(2)', 'normalize': '"""unknown"""', 'multiplicity': '(1)'}), "(layer_sizes=[4], n_samples=[2], input_dim=2, normalize='unknown',\n multiplicity=1)\n", (12892, 12978), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((13107, 13130), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (13120, 13130), False, 'import pytest\n'), ((13140, 13166), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4]'}), '(layer_sizes=[4])\n', (13149, 13166), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((13469, 13493), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (13482, 13493), False, 'import pytest\n'), ((13508, 13565), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4, 8]', 'generator': 't_gen', 'bias': '(True)'}), '(layer_sizes=[4, 8], generator=t_gen, bias=True)\n', (13517, 13565), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((14066, 14090), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (14079, 14090), False, 'import pytest\n'), ((14100, 14188), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4]', 'n_samples': '[2]', 'input_dim': '(2)', 'multiplicity': '(1)', 'aggregator': '(1)'}), '(layer_sizes=[4], n_samples=[2], input_dim=2, multiplicity=1,\n aggregator=1)\n', (14109, 14188), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((15207, 15232), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(i, 2)'}), '(shape=(i, 2))\n', (15218, 15232), False, 'from tensorflow import keras\n'), ((15342, 15362), 'numpy.array', 'np.array', (['[[[1, 1]]]'], {}), '([[[1, 1]]])\n', (15350, 15362), True, 'import numpy as np\n'), ((15372, 15400), 'numpy.array', 'np.array', (['[[[2, 2], [2, 2]]]'], {}), '([[[2, 2], [2, 2]]])\n', (15380, 15400), True, 'import numpy as np\n'), ((15410, 15454), 'numpy.array', 'np.array', (['[[[3, 3], [3, 3], [3, 3], [3, 3]]]'], {}), '([[[3, 3], [3, 3], [3, 3], [3, 3]]])\n', (15418, 15454), True, 'import numpy as np\n'), ((15464, 15540), 'numpy.array', 'np.array', (['[[[4, 4], [4, 4], [4, 4], [4, 4], [5, 5], [5, 5], [5, 5], [5, 5]]]'], {}), '([[[4, 4], [4, 4], [4, 4], [4, 4], [5, 5], [5, 5], [5, 5], [5, 5]]])\n', (15472, 15540), True, 'import numpy as np\n'), ((15638, 15659), 'pytest.approx', 'pytest.approx', (['actual'], {}), '(actual)\n', (15651, 15659), False, 'import pytest\n'), ((15778, 15801), 'pytest.approx', 'pytest.approx', (['expected'], {}), '(expected)\n', (15791, 15801), False, 'import pytest\n'), ((16282, 16297), 'numpy.ones_like', 'np.ones_like', (['w'], {}), '(w)\n', (16294, 16297), True, 'import numpy as np\n'), ((16726, 16747), 'pytest.approx', 'pytest.approx', (['actual'], {}), '(actual)\n', (16739, 16747), False, 'import pytest\n'), ((17006, 17031), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(i, 2)'}), '(shape=(i, 2))\n', (17017, 17031), False, 'from tensorflow import keras\n'), ((17129, 17151), 'numpy.array', 'np.array', (['[[[1.5, 1]]]'], {}), '([[[1.5, 1]]])\n', (17137, 17151), True, 'import numpy as np\n'), ((17153, 17172), 'numpy.zeros', 'np.zeros', (['(1, 0, 2)'], {}), '((1, 0, 2))\n', (17161, 17172), True, 'import numpy as np\n'), ((17174, 17193), 'numpy.zeros', 'np.zeros', (['(1, 0, 2)'], {}), '((1, 0, 2))\n', (17182, 17193), True, 'import numpy as np\n'), ((17281, 17304), 'pytest.approx', 'pytest.approx', (['expected'], {}), '(expected)\n', (17294, 17304), False, 'import pytest\n'), ((17777, 17802), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17790, 17802), False, 'import pytest\n'), ((17812, 17920), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4, 4, 4]', 'n_samples': '[2, 2, 2]', 'input_dim': '(2)', 'multiplicity': '(1)', 'activations': "['relu']"}), "(layer_sizes=[4, 4, 4], n_samples=[2, 2, 2], input_dim=2,\n multiplicity=1, activations=['relu'])\n", (17821, 17920), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((17998, 18023), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (18011, 18023), False, 'import pytest\n'), ((18033, 18145), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4, 4, 4]', 'n_samples': '[2, 2, 2]', 'input_dim': '(2)', 'multiplicity': '(1)', 'activations': "(['relu'] * 2)"}), "(layer_sizes=[4, 4, 4], n_samples=[2, 2, 2], input_dim=2,\n multiplicity=1, activations=['relu'] * 2)\n", (18042, 18145), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((18223, 18248), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (18236, 18248), False, 'import pytest\n'), ((18258, 18385), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4, 4, 4]', 'n_samples': '[2, 2, 2]', 'input_dim': '(2)', 'multiplicity': '(1)', 'activations': "['fred', 'wilma', 'barney']"}), "(layer_sizes=[4, 4, 4], n_samples=[2, 2, 2], input_dim=2,\n multiplicity=1, activations=['fred', 'wilma', 'barney'])\n", (18267, 18385), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((18719, 18744), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (18732, 18744), False, 'import pytest\n'), ((18754, 18855), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4]', 'n_samples': '[2]', 'input_dim': '(2)', 'multiplicity': '(1)', 'kernel_initializer': '"""fred"""'}), "(layer_sizes=[4], n_samples=[2], input_dim=2, multiplicity=1,\n kernel_initializer='fred')\n", (18763, 18855), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((19411, 19436), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (19424, 19436), False, 'import pytest\n'), ((19446, 19548), 'stellargraph.layer.graphsage.GraphSAGE', 'GraphSAGE', ([], {'layer_sizes': '[4]', 'n_samples': '[2]', 'input_dim': '(2)', 'multiplicity': '(1)', 'kernel_regularizer': '"""wilma"""'}), "(layer_sizes=[4], n_samples=[2], input_dim=2, multiplicity=1,\n kernel_regularizer='wilma')\n", (19455, 19548), False, 'from stellargraph.layer.graphsage import GraphSAGE, MeanAggregator, MaxPoolingAggregator, MeanPoolingAggregator, AttentionalAggregator\n'), ((19209, 19228), 'tensorflow.keras.initializers.ones', 'initializers.ones', ([], {}), '()\n', (19226, 19228), False, 'from tensorflow.keras import initializers, regularizers\n'), ((19372, 19393), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (19387, 19393), False, 'from tensorflow.keras import initializers, regularizers\n')] |
#######################################################################
# Copyright (C) #
# 2018 <NAME> (<EMAIL>) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
#
# This is a reproduction of the plot shown in Example 13.1
# in Chapter 13, "Policy Gradient Methods". Book draft May 27, 2018.
#
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
def f(p):
""" True value of the first state
Args:
p (float): probability of the action 'right'.
Returns:
True value of the first state.
The expression is obtained by manually solving the easy linear system
of Bellman equations using known dynamics.
"""
return (2 * p - 4) / (p * (1 - p))
epsilon = 0.05
fig, ax = plt.subplots(1, 1)
# Plot a graph
p = np.linspace(0.01, 0.99, 100)
y = f(p)
ax.plot(p, y, color='red')
# Find a maximum point, can also be done analytically by taking a derivative
imax = np.argmax(y)
pmax = p[imax]
ymax = y[imax]
ax.plot(pmax, ymax, color='green', marker="*", label="optimal point: f({0:.2f}) = {1:.2f}".format(pmax, ymax))
# Plot points of two epsilon-greedy policies
ax.plot(epsilon, f(epsilon), color='magenta', marker="o", label="epsilon-greedy left")
ax.plot(1 - epsilon, f(1 - epsilon), color='blue', marker="o", label="epsilon-greedy right")
ax.set_ylabel("Value of the first state")
ax.set_xlabel("Probability of the action 'right'")
ax.set_title("Short corridor with switched actions")
ax.set_ylim(ymin=-105.0, ymax=5)
ax.legend()
fig.tight_layout()
plt.show()
| [
"matplotlib.use",
"numpy.argmax",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((577, 600), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (591, 600), False, 'import matplotlib\n'), ((1001, 1019), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1013, 1019), True, 'import matplotlib.pyplot as plt\n'), ((1041, 1069), 'numpy.linspace', 'np.linspace', (['(0.01)', '(0.99)', '(100)'], {}), '(0.01, 0.99, 100)\n', (1052, 1069), True, 'import numpy as np\n'), ((1191, 1203), 'numpy.argmax', 'np.argmax', (['y'], {}), '(y)\n', (1200, 1203), True, 'import numpy as np\n'), ((1782, 1792), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1790, 1792), True, 'import matplotlib.pyplot as plt\n')] |
import torch
import numpy as np
from ialgebra.utils.utils_data import preprocess_fn
from ialgebra.utils.utils_interpreter import resize_postfn, generate_map
device = 'cuda' if torch.cuda.is_available() else 'cpu'
class Interpreter(object):
def __init__(self, pretrained_model=None, dataset=None, target_layer=None):
super(Interpreter, self).__init__()
self.pretrained_model = pretrained_model
self.dataset = dataset
self.features = self.pretrained_model.features
self.preprocess_fn = preprocess_fn
self.device = device
self.resize_postfn = resize_postfn
self.generate_map = generate_map
self.target_layer = target_layer
def __call__(self, bx, by, batch_size=None):
return self.interpret(bx, by, batch_size=batch_size)
def get_default_config(self):
pass
def interpret_per_batch(self, bxn, byn):
pass
def interpret(self, bx, by, batch_size = None):
print("bx:", bx.size())
print("by,", by.size())
bx = bx.unsqueeze(0) if len(bx.size()) != 4 else bx # assert len(x.size) = 4, bx torch.Tensor()
batch_size = len(bx) if batch_size is None else batch_size
n_batches = (len(bx) + batch_size - 1) // batch_size
interpreter_map = []
interpreter_mapimg = []
for i in range(n_batches):
si = i * batch_size
ei = min(len(bx), si + batch_size)
bxn, byn = bx[si:ei], by[si:ei]
bxn, byn = torch.tensor(bxn, requires_grad=True), torch.tensor(byn)
gradmap, gradimg = self.interpret_per_batch(bxn, byn)
interpreter_map.append(gradmap)
interpreter_mapimg.append(gradimg)
interpreter_map = np.concatenate(interpreter_map, axis=0)
interpreter_mapimg = np.concatenate(interpreter_mapimg, axis=0)
return interpreter_map, interpreter_mapimg
| [
"torch.tensor",
"torch.cuda.is_available",
"numpy.concatenate"
] | [((177, 202), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (200, 202), False, 'import torch\n'), ((1758, 1797), 'numpy.concatenate', 'np.concatenate', (['interpreter_map'], {'axis': '(0)'}), '(interpreter_map, axis=0)\n', (1772, 1797), True, 'import numpy as np\n'), ((1827, 1869), 'numpy.concatenate', 'np.concatenate', (['interpreter_mapimg'], {'axis': '(0)'}), '(interpreter_mapimg, axis=0)\n', (1841, 1869), True, 'import numpy as np\n'), ((1518, 1555), 'torch.tensor', 'torch.tensor', (['bxn'], {'requires_grad': '(True)'}), '(bxn, requires_grad=True)\n', (1530, 1555), False, 'import torch\n'), ((1557, 1574), 'torch.tensor', 'torch.tensor', (['byn'], {}), '(byn)\n', (1569, 1574), False, 'import torch\n')] |
#!/usr/bin/env python
import os
from copy import deepcopy
import numpy as np
from numpy.testing import assert_array_equal
import gippy as gp
import unittest
import gippy.test as gpt
class GeoImageTests(unittest.TestCase):
prefix = 'test-'
def setUp(self):
""" Configure options """
gp.Options.set_verbose(1)
gp.Options.set_chunksize(4.0)
def test0_open(self):
""" Open existing image """
geoimg = gpt.get_test_image()
self.assertEqual(geoimg.xsize(), 627)
self.assertEqual(geoimg.ysize(), 603)
def test1_create(self):
""" Create single band image """
fout = 'test.tif'
geoimg = gp.GeoImage.create(fout, xsz=1000, ysz=1000)
self.assertTrue(geoimg.xsize() == 1000)
self.assertTrue(geoimg.ysize() == 1000)
self.assertTrue(os.path.exists(fout))
# test resolution
res = geoimg.resolution()
self.assertEqual(res.x(), 1.0/geoimg.xsize())
self.assertEqual(res.y(), -1.0/geoimg.ysize())
os.remove(fout)
def test_read(self):
""" Read multiband image """
geoimg = gpt.get_test_image()
arr = geoimg.read()
self.assertEqual(geoimg.nbands(), arr.shape[0])
# make sure x, y dimensions are same when reading single bands
self.assertEqual(arr.shape[1:3], geoimg[0].read().shape)
def test_read_random_pixels(self):
""" Read random pixels """
geoimg = gpt.get_test_image()
arr = geoimg.read_random_pixels(1000)
def test_uint16_read(self):
""" read uint16 makes uint16 array """
fout = 'test.tif'
geoimg = gp.GeoImage.create(fout, dtype='uint16')
self.assertTrue(os.path.exists(fout))
arr = geoimg.read()
self.assertEqual(str(arr.dtype), geoimg.type().string())
os.remove(fout)
def test_loop_through_bands(self):
""" Check that GeoImage is iterable """
geoimg = gpt.get_test_image()
for band in geoimg:
self.assertEqual(band.xsize(), geoimg.xsize())
def test_select(self):
""" Selection of bands from GeoImage """
img1 = gpt.get_test_image()
img2 = img1.select(['red', 'green', 'blue'])
self.assertTrue(np.array_equal(img1['red'].read(), img2[0].read()))
self.assertTrue(np.array_equal(img1['green'].read(), img2[1].read()))
self.assertTrue(np.array_equal(img1['blue'].read(), img2[2].read()))
def test_persistent_metadata(self):
""" Writing metadata and check for persistence after reopening """
fout = 'test-meta.tif'
geoimg = gp.GeoImage.create(fout, xsz=1000, ysz=1000, nb=3)
geoimg.set_bandnames(['red', 'green', 'blue'])
geoimg.set_nodata(7)
self.assertEqual(geoimg.bandnames()[0], 'red')
geoimg = None
# reopen
geoimg = gp.GeoImage(fout)
self.assertEqual(geoimg[0].nodata(), 7)
self.assertEqual(list(geoimg.bandnames()), ['red', 'green', 'blue'])
geoimg = None
os.remove(fout)
def test_create_image_with_gain(self):
""" Create int image with floating point gain """
fout = 'test-gain.tif'
geoimg = gp.GeoImage.create(fout, xsz=1000, ysz=1000, dtype='int16')
geoimg.set_gain(0.0001)
arr = np.zeros((1000,1000)) + 0.0001
arr[0:500,:] = 0.0002
geoimg[0].write(deepcopy(arr))
arrout = geoimg[0].read()
np.testing.assert_array_equal(arr, arrout)
os.remove(fout)
def test_create_multiband(self):
""" Create an RGB image """
fout = 'test_3band.tif'
geoimg = gp.GeoImage.create(fout, xsz=1000, ysz=1000, nb=3)
geoimg.set_bandnames(['green', 'red', 'blue'])
# test selection of bands
geoimg2 = geoimg.select(["red"])
self.assertTrue(geoimg2.nbands() == 1)
self.assertTrue(geoimg["red"].description() == "red")
geoimg = None
geoimg2 = None
os.remove(fout)
def test_create_temp_file(self):
""" Create a temp file that is deleted when last reference gone """
fout = self.prefix + '_temp.tif'
geoimg = gp.GeoImage.create(fout, xsz=1000, ysz=1000, nb=5, temp=True)
self.assertTrue(os.path.exists(fout))
# keep a band
band = geoimg[1]
geoimg = None
# band still references file
self.assertTrue(os.path.exists(fout))
band = None
# file should now have been deleted
self.assertFalse(os.path.exists(fout))
def test_create_autoname_temp(self):
""" Create temp file with auto-generated filename """
geoimg = gp.GeoImage.create(xsz=1000, ysz=1000, nb=3)
fout = geoimg.filename()
self.assertTrue(os.path.exists(fout))
geoimg = None
self.assertFalse(os.path.exists(fout))
def test_autoscale(self):
""" Auto scale each band in image """
geoimg = gpt.get_test_image()
for band in geoimg:
self.assertTrue(band.min() != 1.0)
self.assertTrue(band.max() != 255.0)
geoimg2 = geoimg.autoscale(minout=1.0, maxout=255.0)
for band in geoimg2:
self.assertTrue(band.min() == 1)
self.assertTrue(band.max() == 255)
def test_overviews(self):
""" Add overviews to an image """
fout = 'test-overviews.tif'
geoimg = gp.GeoImage.create(filename=fout, xsz=1000, ysz=1000, nb=2)
fout = geoimg.filename()
# add overviews
geoimg.add_overviews()
# clear overviews
geoimg.add_overviews(levels=[])
self.assertFalse(os.path.exists(fout + '.ovr'))
geoimg = None
geoimg = gp.GeoImage(fout, False)
geoimg.add_overviews()
self.assertTrue(os.path.exists(fout + '.ovr'))
os.remove(fout)
os.remove(fout + '.ovr')
def test_save(self):
""" Save image as new image with different datatype """
fout = 'test-byte.tif'
geoimg = gpt.get_test_image().autoscale(1.0, 255.0).save(fout, 'uint8')
geoimg = None
geoimg = gp.GeoImage(fout)
self.assertEqual(geoimg.type().string(), 'uint8')
self.assertEqual(geoimg[0].min(), 1.0)
self.assertEqual(geoimg[0].max(), 255.0)
os.remove(fout)
def test_save_with_gain(self):
""" Save image with a gain, which should copy through """
geoimg = gpt.get_test_image().select([2])
geoimg.set_gain(0.0001)
fout = 'test-savegain.tif'
imgout = geoimg.save(fout)
assert_array_equal(imgout.read(), geoimg.read())
os.remove(fout)
def test_warp(self):
""" Warping image into another (blank) image """
bbox = np.array([0.0, 0.0, 1.0, 1.0])
# default image in EPSG:4326 that spans 1 degree
geoimg = gp.GeoImage.create(xsz=1000, ysz=1000, nb=3, proj='EPSG:4326', bbox=bbox)
# 3857, set resolution to 100 meters
imgout = geoimg.warp(proj='EPSG:3857', xres=100.0, yres=100.0)
self.assertTrue(os.path.exists(imgout.filename()))
self.assertEqual(imgout.xsize(), 1114)
self.assertEqual(imgout.ysize(), 1114)
self.assertAlmostEqual(np.ceil(imgout.resolution().x()), 100.0)
def test_real_warp(self):
""" Warp real image to another projection """
geoimg = gpt.get_test_image()
fout = 'test-realwarp.tif'
imgout = geoimg.warp(fout, proj='EPSG:4326', xres=0.0003, yres=0.0003)
self.assertEqual(imgout.xsize(), 653)
self.assertEqual(imgout.ysize(), 547)
os.remove(fout)
def test_warp_into(self):
""" Warp real image into an existing image """
geoimg = gpt.get_test_image().select([1])
ext = geoimg.extent()
bbox = np.array([ext.x0(), ext.y0(), ext.width(), ext.height()])
imgout = gp.GeoImage.create('', geoimg.xsize(), geoimg.ysize(), 1, geoimg.srs(),
bbox, geoimg.type().string());
geoimg.warp_into(imgout)
self.assertEqual(imgout.read().sum(), geoimg.read().sum())
| [
"os.path.exists",
"gippy.test.get_test_image",
"gippy.Options.set_chunksize",
"gippy.GeoImage",
"numpy.array",
"numpy.zeros",
"gippy.GeoImage.create",
"gippy.Options.set_verbose",
"copy.deepcopy",
"numpy.testing.assert_array_equal",
"os.remove"
] | [((311, 336), 'gippy.Options.set_verbose', 'gp.Options.set_verbose', (['(1)'], {}), '(1)\n', (333, 336), True, 'import gippy as gp\n'), ((345, 374), 'gippy.Options.set_chunksize', 'gp.Options.set_chunksize', (['(4.0)'], {}), '(4.0)\n', (369, 374), True, 'import gippy as gp\n'), ((455, 475), 'gippy.test.get_test_image', 'gpt.get_test_image', ([], {}), '()\n', (473, 475), True, 'import gippy.test as gpt\n'), ((681, 725), 'gippy.GeoImage.create', 'gp.GeoImage.create', (['fout'], {'xsz': '(1000)', 'ysz': '(1000)'}), '(fout, xsz=1000, ysz=1000)\n', (699, 725), True, 'import gippy as gp\n'), ((1045, 1060), 'os.remove', 'os.remove', (['fout'], {}), '(fout)\n', (1054, 1060), False, 'import os\n'), ((1141, 1161), 'gippy.test.get_test_image', 'gpt.get_test_image', ([], {}), '()\n', (1159, 1161), True, 'import gippy.test as gpt\n'), ((1474, 1494), 'gippy.test.get_test_image', 'gpt.get_test_image', ([], {}), '()\n', (1492, 1494), True, 'import gippy.test as gpt\n'), ((1664, 1704), 'gippy.GeoImage.create', 'gp.GeoImage.create', (['fout'], {'dtype': '"""uint16"""'}), "(fout, dtype='uint16')\n", (1682, 1704), True, 'import gippy as gp\n'), ((1852, 1867), 'os.remove', 'os.remove', (['fout'], {}), '(fout)\n', (1861, 1867), False, 'import os\n'), ((1973, 1993), 'gippy.test.get_test_image', 'gpt.get_test_image', ([], {}), '()\n', (1991, 1993), True, 'import gippy.test as gpt\n'), ((2173, 2193), 'gippy.test.get_test_image', 'gpt.get_test_image', ([], {}), '()\n', (2191, 2193), True, 'import gippy.test as gpt\n'), ((2642, 2692), 'gippy.GeoImage.create', 'gp.GeoImage.create', (['fout'], {'xsz': '(1000)', 'ysz': '(1000)', 'nb': '(3)'}), '(fout, xsz=1000, ysz=1000, nb=3)\n', (2660, 2692), True, 'import gippy as gp\n'), ((2888, 2905), 'gippy.GeoImage', 'gp.GeoImage', (['fout'], {}), '(fout)\n', (2899, 2905), True, 'import gippy as gp\n'), ((3061, 3076), 'os.remove', 'os.remove', (['fout'], {}), '(fout)\n', (3070, 3076), False, 'import os\n'), ((3227, 3286), 'gippy.GeoImage.create', 'gp.GeoImage.create', (['fout'], {'xsz': '(1000)', 'ysz': '(1000)', 'dtype': '"""int16"""'}), "(fout, xsz=1000, ysz=1000, dtype='int16')\n", (3245, 3286), True, 'import gippy as gp\n'), ((3475, 3517), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['arr', 'arrout'], {}), '(arr, arrout)\n', (3504, 3517), True, 'import numpy as np\n'), ((3526, 3541), 'os.remove', 'os.remove', (['fout'], {}), '(fout)\n', (3535, 3541), False, 'import os\n'), ((3665, 3715), 'gippy.GeoImage.create', 'gp.GeoImage.create', (['fout'], {'xsz': '(1000)', 'ysz': '(1000)', 'nb': '(3)'}), '(fout, xsz=1000, ysz=1000, nb=3)\n', (3683, 3715), True, 'import gippy as gp\n'), ((4008, 4023), 'os.remove', 'os.remove', (['fout'], {}), '(fout)\n', (4017, 4023), False, 'import os\n'), ((4196, 4257), 'gippy.GeoImage.create', 'gp.GeoImage.create', (['fout'], {'xsz': '(1000)', 'ysz': '(1000)', 'nb': '(5)', 'temp': '(True)'}), '(fout, xsz=1000, ysz=1000, nb=5, temp=True)\n', (4214, 4257), True, 'import gippy as gp\n'), ((4688, 4732), 'gippy.GeoImage.create', 'gp.GeoImage.create', ([], {'xsz': '(1000)', 'ysz': '(1000)', 'nb': '(3)'}), '(xsz=1000, ysz=1000, nb=3)\n', (4706, 4732), True, 'import gippy as gp\n'), ((4975, 4995), 'gippy.test.get_test_image', 'gpt.get_test_image', ([], {}), '()\n', (4993, 4995), True, 'import gippy.test as gpt\n'), ((5428, 5487), 'gippy.GeoImage.create', 'gp.GeoImage.create', ([], {'filename': 'fout', 'xsz': '(1000)', 'ysz': '(1000)', 'nb': '(2)'}), '(filename=fout, xsz=1000, ysz=1000, nb=2)\n', (5446, 5487), True, 'import gippy as gp\n'), ((5738, 5762), 'gippy.GeoImage', 'gp.GeoImage', (['fout', '(False)'], {}), '(fout, False)\n', (5749, 5762), True, 'import gippy as gp\n'), ((5857, 5872), 'os.remove', 'os.remove', (['fout'], {}), '(fout)\n', (5866, 5872), False, 'import os\n'), ((5881, 5905), 'os.remove', 'os.remove', (["(fout + '.ovr')"], {}), "(fout + '.ovr')\n", (5890, 5905), False, 'import os\n'), ((6146, 6163), 'gippy.GeoImage', 'gp.GeoImage', (['fout'], {}), '(fout)\n', (6157, 6163), True, 'import gippy as gp\n'), ((6326, 6341), 'os.remove', 'os.remove', (['fout'], {}), '(fout)\n', (6335, 6341), False, 'import os\n'), ((6661, 6676), 'os.remove', 'os.remove', (['fout'], {}), '(fout)\n', (6670, 6676), False, 'import os\n'), ((6775, 6805), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 1.0, 1.0])\n', (6783, 6805), True, 'import numpy as np\n'), ((6880, 6953), 'gippy.GeoImage.create', 'gp.GeoImage.create', ([], {'xsz': '(1000)', 'ysz': '(1000)', 'nb': '(3)', 'proj': '"""EPSG:4326"""', 'bbox': 'bbox'}), "(xsz=1000, ysz=1000, nb=3, proj='EPSG:4326', bbox=bbox)\n", (6898, 6953), True, 'import gippy as gp\n'), ((7397, 7417), 'gippy.test.get_test_image', 'gpt.get_test_image', ([], {}), '()\n', (7415, 7417), True, 'import gippy.test as gpt\n'), ((7632, 7647), 'os.remove', 'os.remove', (['fout'], {}), '(fout)\n', (7641, 7647), False, 'import os\n'), ((846, 866), 'os.path.exists', 'os.path.exists', (['fout'], {}), '(fout)\n', (860, 866), False, 'import os\n'), ((1729, 1749), 'os.path.exists', 'os.path.exists', (['fout'], {}), '(fout)\n', (1743, 1749), False, 'import os\n'), ((3333, 3355), 'numpy.zeros', 'np.zeros', (['(1000, 1000)'], {}), '((1000, 1000))\n', (3341, 3355), True, 'import numpy as np\n'), ((3418, 3431), 'copy.deepcopy', 'deepcopy', (['arr'], {}), '(arr)\n', (3426, 3431), False, 'from copy import deepcopy\n'), ((4282, 4302), 'os.path.exists', 'os.path.exists', (['fout'], {}), '(fout)\n', (4296, 4302), False, 'import os\n'), ((4434, 4454), 'os.path.exists', 'os.path.exists', (['fout'], {}), '(fout)\n', (4448, 4454), False, 'import os\n'), ((4545, 4565), 'os.path.exists', 'os.path.exists', (['fout'], {}), '(fout)\n', (4559, 4565), False, 'import os\n'), ((4790, 4810), 'os.path.exists', 'os.path.exists', (['fout'], {}), '(fout)\n', (4804, 4810), False, 'import os\n'), ((4859, 4879), 'os.path.exists', 'os.path.exists', (['fout'], {}), '(fout)\n', (4873, 4879), False, 'import os\n'), ((5668, 5697), 'os.path.exists', 'os.path.exists', (["(fout + '.ovr')"], {}), "(fout + '.ovr')\n", (5682, 5697), False, 'import os\n'), ((5818, 5847), 'os.path.exists', 'os.path.exists', (["(fout + '.ovr')"], {}), "(fout + '.ovr')\n", (5832, 5847), False, 'import os\n'), ((6461, 6481), 'gippy.test.get_test_image', 'gpt.get_test_image', ([], {}), '()\n', (6479, 6481), True, 'import gippy.test as gpt\n'), ((7751, 7771), 'gippy.test.get_test_image', 'gpt.get_test_image', ([], {}), '()\n', (7769, 7771), True, 'import gippy.test as gpt\n'), ((6044, 6064), 'gippy.test.get_test_image', 'gpt.get_test_image', ([], {}), '()\n', (6062, 6064), True, 'import gippy.test as gpt\n')] |
import random
import re
import sys
sys.path.append("../../")
import pandas as pd
import numpy as np
from demo import *
# just utility so we don't clobber original dataframe
def cp(d):
return df.copy()
def code(db_node):
return db.get_code(db_node)
def run(db_node):
func = db.get_executable(db_node)
cp_df = cp(df)
return func(cp_df)
db = None
ALL_FUNCS = None
ALL_CODE_FRAGMENTS = None
df = None
def init():
global db
global ALL_FUNCS
global ALL_CODE_FRAGMENTS
global df
db = start("../../sample_db.pkl")
ALL_FUNCS = db.extracted_functions()
ALL_CODE_FRAGMENTS = [code(p) for p in ALL_FUNCS]
df = pd.read_csv("../../demo-data/loan.csv", nrows=1000)
def survey_task(
db, query, n, max_loc=None, random_state=None, rename_funcs=True
):
if random_state is not None:
np.random.seed(random_state)
random.seed(random_state)
if query is None:
# random querying -- effectively
all_funcs = ALL_CODE_FRAGMENTS
n = min(len(all_funcs), n)
if max_loc is not None:
all_funcs = [c for c in all_funcs if len(c.split("\n")) <= max_loc]
query_results = np.random.choice(
all_funcs,
size=n,
replace=False,
)
else:
query_results = db.query(query)[:n]
code_fragments = []
for ix, prog in enumerate(query_results):
if not isinstance(prog, str):
prog = code(prog)
if rename_funcs:
prog = re.sub(r'cleaning_func_[0-9]+', 'f{}'.format(ix), prog)
print("# Fragment {}".format(ix))
print(prog)
print("\n")
code_fragments.append(prog)
return code_fragments
class Task(object):
def __init__(self, title, description, query):
self.title = title
self.description = description
self.query = query
def generate(self, db, n, random_state):
print("# Task {}".format(self.title))
print("# {}".format(self.description))
print("# Transfer fragments (treatment)")
survey_task(db, self.query, n, random_state=random_state)
print("\n")
print("# Random fragments (control)")
survey_task(db, None, n, max_loc=20, random_state=random_state)
task1 = Task(
title="1",
description="Identify non-current loans based on loan_status",
query=["loan_status"],
)
task2 = Task(
title="2",
description=
"Round the interest rate column (`int_rate`) to nearest integer",
query=["int_rate", pd.DataFrame.astype],
)
task3 = Task(
title="3",
description="Compute the issue month and year associated with each loan",
query=["issue_month", pd.to_datetime],
)
task4 = Task(
title="4",
description=
"Fill in missing values in the months since last delinquency column (`mths_since_last_delinq`)",
query=["mths_since_last_delinq", pd.Series.fillna],
)
task5 = Task(
title="5",
description="Drop columns with many missing values",
query=[pd.DataFrame.dropna],
)
def main():
init()
seed = 42
tasks = [task1, task2, task3, task4, task5]
for ix, t in enumerate(tasks):
t.generate(db, 5, seed + ix)
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
| [
"pandas.read_csv",
"numpy.random.choice",
"pdb.post_mortem",
"random.seed",
"numpy.random.seed",
"sys.path.append"
] | [((35, 60), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (50, 60), False, 'import sys\n'), ((661, 712), 'pandas.read_csv', 'pd.read_csv', (['"""../../demo-data/loan.csv"""'], {'nrows': '(1000)'}), "('../../demo-data/loan.csv', nrows=1000)\n", (672, 712), True, 'import pandas as pd\n'), ((845, 873), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (859, 873), True, 'import numpy as np\n'), ((882, 907), 'random.seed', 'random.seed', (['random_state'], {}), '(random_state)\n', (893, 907), False, 'import random\n'), ((1181, 1231), 'numpy.random.choice', 'np.random.choice', (['all_funcs'], {'size': 'n', 'replace': '(False)'}), '(all_funcs, size=n, replace=False)\n', (1197, 1231), True, 'import numpy as np\n'), ((3314, 3331), 'pdb.post_mortem', 'pdb.post_mortem', ([], {}), '()\n', (3329, 3331), False, 'import pdb\n')] |
import torch, mmcv
import torch.nn as nn
from mmcv.cnn import normal_init, kaiming_init
from mmdet.core import distance2bbox, bbox_overlaps, force_fp32, multi_apply, multiclass_nms, multiclass_nms_idx
from mmdet.ops import ConvModule, Scale
from ..builder import build_loss
from ..registry import HEADS
from ..utils import bias_init_with_prob
from mmdet.ops import DeformConv, CropSplit, CropSplitGt
import torch.nn.functional as F
import pycocotools.mask as mask_util
import numpy as np
INF = 1e8
def center_size(boxes):
return torch.cat(( (boxes[:, 2:] + boxes[:, :2])/2, # cx, cy
boxes[:, 2:] - boxes[:, :2] ), 1) # w, h
class FeatureAlign(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
deformable_groups=4,
flag_norm=True):
super(FeatureAlign, self).__init__()
offset_channels = kernel_size * kernel_size * 2
self.conv_offset = nn.Conv2d(4,
deformable_groups * offset_channels,
1,
bias=False)
self.conv_adaption = DeformConv(in_channels,
out_channels,
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2,
deformable_groups=deformable_groups)
self.relu = nn.ReLU(inplace=True)
self.norm = nn.GroupNorm(32, in_channels)
self.flag_norm = flag_norm
def init_weights(self,bias_value=0):
torch.nn.init.normal_(self.conv_offset.weight, std=0.0)
torch.nn.init.normal_(self.conv_adaption.weight, std=0.01)
def forward(self, x, shape):
offset = self.conv_offset(shape.detach())
if self.flag_norm:
x = self.relu(self.norm(self.conv_adaption(x, offset)))
else:
x = self.relu(self.conv_adaption(x, offset))
return x
def crop_split(masks00, masks01, masks10, masks11, boxes, masksG=None):
h, w, n = masks00.size()
rows = torch.arange(w, device=masks00.device, dtype=boxes.dtype).view(1, -1, 1).expand(h, w, n)
cols = torch.arange(h, device=masks00.device, dtype=boxes.dtype).view(-1, 1, 1).expand(h, w, n)
x1, x2 = boxes[:, 0], boxes[:, 2]
y1, y2 = boxes[:, 1], boxes[:, 3]
xc = (x1+x2)/2
yc = (y1+y2)/2
x1 = torch.clamp(x1, min=0, max=w - 1)
y1 = torch.clamp(y1, min=0, max=h - 1)
x2 = torch.clamp(x2, min=0, max=w - 1)
y2 = torch.clamp(y2, min=0, max=h - 1)
xc = torch.clamp(xc, min=0, max=w - 1)
yc = torch.clamp(yc, min=0, max=h - 1)
##x1,y1,xc,yc
crop_mask = (rows >= x1.view(1, 1, -1)) & (rows < xc.view(1, 1, -1)) & (cols >= y1.view(1, 1, -1)) & (cols < yc.view(1, 1, -1))
crop_mask = crop_mask.float().detach()
masks00 = masks00 * crop_mask
##xc,y1,x2,yc
crop_mask = (rows >= xc.view(1, 1, -1)) & (rows < x2.view(1, 1, -1)) & (cols >= y1.view(1, 1, -1)) & (cols < yc.view(1, 1, -1))
crop_mask = crop_mask.float().detach()
masks01 = masks01 * crop_mask
##x1,yc,xc,y2
crop_mask = (rows >= x1.view(1, 1, -1)) & (rows < xc.view(1, 1, -1)) & (cols >= yc.view(1, 1, -1)) & (cols < y2.view(1, 1, -1))
crop_mask = crop_mask.float().detach()
masks10 = masks10 * crop_mask
##xc,yc,x2,y2
crop_mask = (rows >= xc.view(1, 1, -1)) & (rows < x2.view(1, 1, -1)) & (cols >= yc.view(1, 1, -1)) & (cols < y2.view(1, 1, -1))
crop_mask = crop_mask.float().detach()
masks11 = masks11 * crop_mask
masks = masks00+masks01+masks10+masks11
########whole
if masksG is not None:
crop_mask = (rows >= x1.view(1, 1, -1)) & (rows < x2.view(1, 1, -1)) & (cols >= y1.view(1, 1, -1)) & (cols < y2.view(1, 1, -1))
crop_mask = crop_mask.float()
masksG = masksG * crop_mask
return masks, masksG
return masks
@HEADS.register_module
class SipMaskHead(nn.Module):
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
strides=(4, 8, 16, 32, 64),
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
(512, INF)),
center_sampling=False,
center_sample_radius=1.5,
ssd_flag=False,
rescoring_flag=False,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):#
super(SipMaskHead, self).__init__()
self.num_classes = num_classes
self.cls_out_channels = num_classes - 1
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.regress_ranges = regress_ranges
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.loss_centerness = build_loss(loss_centerness)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.center_sampling = center_sampling
self.center_sample_radius = center_sample_radius
self.fpn_strides = [8, 16, 32, 64, 128]
self.loss_center = build_loss(loss_bbox)
self.ssd_flag = ssd_flag
self.rescoring_flag = rescoring_flag
if self.rescoring_flag:
self.loss_iou = build_loss(dict(type='MSELoss', loss_weight=1.0, reduction='sum'))
self._init_layers()
def _init_layers(self):
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs-1):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.fcos_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.fcos_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
self.nc = 32
###########instance##############
self.feat_align = FeatureAlign(self.feat_channels, self.feat_channels, 3, flag_norm=self.norm_cfg is not None)
self.sip_cof = nn.Conv2d(self.feat_channels, self.nc*4, 3, padding=1)
self.sip_mask_lat = nn.Conv2d(512, self.nc, 3, padding=1)
self.sip_mask_lat0 = nn.Conv2d(768, 512, 1, padding=0)
if self.rescoring_flag:
self.convs_scoring = []
channels = [1, 16, 16, 16, 32, 64, 128]
for i in range(6):
in_channels = channels[i]
out_channels = channels[i + 1]
stride = 2 if i == 0 else 2
padding = 0
self.convs_scoring.append(
ConvModule(
in_channels, out_channels,
3,
stride=stride,
padding=padding,
bias=True))
self.convs_scoring = nn.Sequential(*self.convs_scoring)
self.mask_scoring = nn.Conv2d(128, self.num_classes-1, 1)
for m in self.convs_scoring:
kaiming_init(m.conv)
normal_init(self.mask_scoring, std=0.001)
self.relu = nn.ReLU(inplace=True)
self.crop_cuda = CropSplit(2)
self.crop_gt_cuda = CropSplitGt(2)
self.init_weights()
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.fcos_cls, std=0.01, bias=bias_cls)
normal_init(self.fcos_reg, std=0.01)
normal_init(self.fcos_centerness, std=0.01)
normal_init(self.sip_cof, std=0.001)
normal_init(self.sip_mask_lat, std=0.01)
normal_init(self.sip_mask_lat0, std=0.01)
self.feat_align.init_weights()
def forward(self, feats):
# return multi_apply(self.forward_single, feats, self.scales)
cls_scores = []
bbox_preds = []
centernesses = []
cof_preds = []
feat_masks = []
count = 0
for x, scale, stride in zip(feats,self.scales, self.strides):
cls_feat = x
reg_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
# scale the bbox_pred of different level
# float to avoid overflow when enabling FP16
bbox_pred = scale(self.fcos_reg(reg_feat))
cls_feat = self.feat_align(cls_feat, bbox_pred)
cls_score = self.fcos_cls(cls_feat)
centerness = self.fcos_centerness(reg_feat)
centernesses.append(centerness)
cls_scores.append(cls_score)
bbox_preds.append(bbox_pred.float()*stride)
########COFFECIENTS###############
cof_pred = self.sip_cof(cls_feat)
cof_preds.append(cof_pred)
############contextual#######################
if count < 3:
if count == 0:
feat_masks.append(reg_feat)
else:
feat_up = F.interpolate(reg_feat, scale_factor=(2 ** count), mode='bilinear', align_corners=False)
feat_masks.append(feat_up)
count = count + 1
# ################contextual enhanced##################
feat_masks = torch.cat(feat_masks, dim=1)
feat_masks = self.relu(self.sip_mask_lat(self.relu(self.sip_mask_lat0(feat_masks))))
feat_masks = F.interpolate(feat_masks, scale_factor=4, mode='bilinear', align_corners=False)
return cls_scores, bbox_preds, centernesses, cof_preds, feat_masks
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def loss(self,
cls_scores,
bbox_preds,
centernesses,
cof_preds,
feat_masks,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None,
gt_masks_list=None):
assert len(cls_scores) == len(bbox_preds) == len(centernesses)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
all_level_points, all_level_strides = self.get_points(featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device)
labels, bbox_targets, label_list, bbox_targets_list, gt_inds = self.fcos_target(all_level_points,
gt_bboxes, gt_labels)
#decode detection and groundtruth
det_bboxes = []
det_targets = []
num_levels = len(bbox_preds)
for img_id in range(len(img_metas)):
bbox_pred_list = [
bbox_preds[i][img_id].permute(1, 2, 0).reshape(-1, 4).detach() for i in range(num_levels)
]
bbox_target_list = bbox_targets_list[img_id]
bboxes = []
targets = []
for i in range(len(bbox_pred_list)):
bbox_pred = bbox_pred_list[i]
bbox_target = bbox_target_list[i]
points = all_level_points[i]
bboxes.append(distance2bbox(points, bbox_pred))
targets.append(distance2bbox(points, bbox_target))
bboxes = torch.cat(bboxes, dim=0)
targets = torch.cat(targets, dim=0)
det_bboxes.append(bboxes)
det_targets.append(targets)
gt_masks = []
for i in range(len(gt_labels)):
gt_label = gt_labels[i]
gt_masks.append(torch.from_numpy(np.array(gt_masks_list[i][:gt_label.shape[0]], dtype=np.float32)).to(gt_label.device))
num_imgs = cls_scores[0].size(0)
# flatten cls_scores, bbox_preds and centerness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
for bbox_pred in bbox_preds
]
flatten_centerness = [
centerness.permute(0, 2, 3, 1).reshape(-1)
for centerness in centernesses
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
flatten_centerness = torch.cat(flatten_centerness)
flatten_labels = torch.cat(labels)
flatten_bbox_targets = torch.cat(bbox_targets)
# repeat points to align with bbox_preds
flatten_points = torch.cat(
[points.repeat(num_imgs, 1) for points in all_level_points])
flatten_strides = torch.cat(
[strides.view(-1,1).repeat(num_imgs, 1) for strides in all_level_strides])
pos_inds = flatten_labels.nonzero().reshape(-1)
num_pos = len(pos_inds)
loss_cls = self.loss_cls(
flatten_cls_scores, flatten_labels,
avg_factor=num_pos + num_imgs) # avoid num_pos is 0
pos_bbox_preds = flatten_bbox_preds[pos_inds]
pos_centerness = flatten_centerness[pos_inds]
if num_pos > 0:
pos_bbox_targets = flatten_bbox_targets[pos_inds]
pos_centerness_targets = self.centerness_target(pos_bbox_targets)
pos_points = flatten_points[pos_inds]
pos_strides = flatten_strides[pos_inds]
pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds/pos_strides)
pos_decoded_target_preds = distance2bbox(pos_points,
pos_bbox_targets/pos_strides)
# centerness weighted iou loss
loss_bbox = self.loss_bbox(
pos_decoded_bbox_preds,
pos_decoded_target_preds,
weight=pos_centerness_targets,
avg_factor=pos_centerness_targets.sum())
loss_centerness = self.loss_centerness(pos_centerness,
pos_centerness_targets)
else:
loss_bbox = pos_bbox_preds.sum()
loss_centerness = pos_centerness.sum()
##########mask loss#################
flatten_cls_scores1 = [
cls_score.permute(0, 2, 3, 1).reshape(num_imgs,-1, self.cls_out_channels)
for cls_score in cls_scores
]
flatten_cls_scores1 = torch.cat(flatten_cls_scores1,dim=1)
flatten_cof_preds = [
cof_pred.permute(0, 2, 3, 1).reshape(cof_pred.shape[0],-1, 32*4)
for cof_pred in cof_preds
]
loss_mask = 0
loss_iou = 0
num_iou = 0.1
flatten_cof_preds = torch.cat(flatten_cof_preds,dim=1)
for i in range(num_imgs):
labels = torch.cat([labels_level.flatten() for labels_level in label_list[i]])
bbox_dt = det_bboxes[i]/2
bbox_dt = bbox_dt.detach()
pos_inds = (labels > 0).nonzero().view(-1)
cof_pred = flatten_cof_preds[i][pos_inds]
img_mask = feat_masks[i]
mask_h = img_mask.shape[1]
mask_w = img_mask.shape[2]
idx_gt = gt_inds[i]
bbox_dt = bbox_dt[pos_inds, :4]
area = (bbox_dt[:, 2] - bbox_dt[:, 0]) * (bbox_dt[:, 3] - bbox_dt[:, 1])
bbox_dt = bbox_dt[area > 1.0, :]
idx_gt = idx_gt[area > 1.0]
cof_pred = cof_pred[area > 1.0]
if bbox_dt.shape[0] == 0:
loss_mask += area.sum()*0
continue
bbox_gt = gt_bboxes[i]
cls_score = flatten_cls_scores1[i, pos_inds, labels[pos_inds] - 1].sigmoid().detach()
cls_score = cls_score[area>1.0]
pos_inds = pos_inds[area > 1.0]
ious = bbox_overlaps(bbox_gt[idx_gt]/2, bbox_dt, is_aligned=True)
with torch.no_grad():
weighting = cls_score * ious
weighting = weighting/(torch.sum(weighting)+0.0001)*len(weighting)
gt_mask = F.interpolate(gt_masks[i].unsqueeze(0), scale_factor=0.5, mode='bilinear', align_corners=False).squeeze(0)
shape = np.minimum(feat_masks[i].shape, gt_mask.shape)
gt_mask_new = gt_mask.new_zeros(gt_mask.shape[0], mask_h, mask_w)
gt_mask_new[:gt_mask.shape[0], :shape[1], :shape[2]] = gt_mask[:gt_mask.shape[0], :shape[1], :shape[2]]
gt_mask_new = gt_mask_new.gt(0.5).float()
gt_mask_new = torch.index_select(gt_mask_new,0,idx_gt).permute(1, 2, 0).contiguous()
#######spp###########################
img_mask1 = img_mask.permute(1,2,0)
pos_masks00 = torch.sigmoid(img_mask1 @ cof_pred[:, 0:32].t())
pos_masks01 = torch.sigmoid(img_mask1 @ cof_pred[:, 32:64].t())
pos_masks10 = torch.sigmoid(img_mask1 @ cof_pred[:, 64:96].t())
pos_masks11 = torch.sigmoid(img_mask1 @ cof_pred[:, 96:128].t())
pred_masks = torch.stack([pos_masks00, pos_masks01, pos_masks10, pos_masks11], dim=0)
pred_masks = self.crop_cuda(pred_masks, bbox_dt)
gt_mask_crop = self.crop_gt_cuda(gt_mask_new, bbox_dt)
# pred_masks, gt_mask_crop = crop_split(pos_masks00, pos_masks01, pos_masks10, pos_masks11, bbox_dt,
# gt_mask_new)
pre_loss = F.binary_cross_entropy(pred_masks, gt_mask_crop, reduction='none')
pos_get_csize = center_size(bbox_dt)
gt_box_width = pos_get_csize[:, 2]
gt_box_height = pos_get_csize[:, 3]
pre_loss = pre_loss.sum(dim=(0, 1)) / gt_box_width / gt_box_height / pos_get_csize.shape[0]
loss_mask += torch.sum(pre_loss*weighting.detach())
if self.rescoring_flag:
pos_labels = labels[pos_inds] - 1
input_iou = pred_masks.detach().unsqueeze(0).permute(3, 0, 1, 2)
pred_iou = self.convs_scoring(input_iou)
pred_iou = self.relu(self.mask_scoring(pred_iou))
pred_iou = F.max_pool2d(pred_iou, kernel_size=pred_iou.size()[2:]).squeeze(-1).squeeze(-1)
pred_iou = pred_iou[range(pred_iou.size(0)), pos_labels]
with torch.no_grad():
mask_pred = (pred_masks > 0.4).float()
mask_pred_areas = mask_pred.sum((0, 1))
overlap_areas = (mask_pred * gt_mask_new).sum((0, 1))
gt_full_areas = gt_mask_new.sum((0, 1))
iou_targets = overlap_areas / (mask_pred_areas + gt_full_areas - overlap_areas + 0.1)
iou_weights = ((iou_targets > 0.1) & (iou_targets <= 1.0) & (gt_full_areas >= 10 * 10)).float()
loss_iou += self.loss_iou(pred_iou.view(-1, 1), iou_targets.view(-1, 1), iou_weights.view(-1, 1))
num_iou += torch.sum(iou_weights.detach())
loss_mask = loss_mask/num_imgs
if self.rescoring_flag:
loss_iou = loss_iou * 10 / num_iou.detach()
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
loss_centerness=loss_centerness,
loss_mask=loss_mask,
loss_iou=loss_iou)
else:
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
loss_centerness=loss_centerness,
loss_mask=loss_mask)
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def get_bboxes(self,
cls_scores,
bbox_preds,
centernesses,
cof_preds,
feat_masks,
img_metas,
cfg,
rescale=None):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_points, mlvl_strides = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
centerness_pred_list = [
centernesses[i][img_id].detach() for i in range(num_levels)
]
cof_pred_list = [
cof_preds[i][img_id].detach() for i in range(num_levels)
]
feat_mask_list = feat_masks[img_id]
img_shape = img_metas[img_id]['img_shape']
ori_shape = img_metas[img_id]['ori_shape']
scale_factor = img_metas[img_id]['scale_factor']
det_bboxes = self.get_bboxes_single(cls_score_list, bbox_pred_list,
centerness_pred_list, cof_pred_list, feat_mask_list,
mlvl_points, img_shape, ori_shape,
scale_factor, cfg, rescale)
result_list.append(det_bboxes)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
centernesses,
cof_preds,
feat_mask,
mlvl_points,
img_shape,
ori_shape,
scale_factor,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
mlvl_bboxes = []
mlvl_scores = []
mlvl_centerness = []
mlvl_cofs = []
for cls_score, bbox_pred, cof_pred, centerness, points in zip(
cls_scores, bbox_preds, cof_preds, centernesses, mlvl_points):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
cof_pred = cof_pred.permute(1,2,0).reshape(-1,32*4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = (scores * centerness[:, None]).max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
cof_pred = cof_pred[topk_inds, :]
scores = scores[topk_inds, :]
centerness = centerness[topk_inds]
bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
mlvl_cofs.append(cof_pred)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_centerness.append(centerness)
mlvl_bboxes = torch.cat(mlvl_bboxes)
mlvl_cofs = torch.cat(mlvl_cofs)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
mlvl_centerness = torch.cat(mlvl_centerness)
if self.ssd_flag is False:
det_bboxes, det_labels, idxs_keep = multiclass_nms_idx(
mlvl_bboxes,
mlvl_scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=mlvl_centerness)
else:
mlvl_scores = mlvl_scores*mlvl_centerness.view(-1,1)
det_bboxes, det_labels, det_cofs = self.fast_nms(mlvl_bboxes, mlvl_scores[:, 1:].transpose(1, 0).contiguous(),
mlvl_cofs, iou_threshold=cfg.nms.iou_thr, score_thr=cfg.score_thr)
cls_segms = [[] for _ in range(self.num_classes - 1)]
mask_scores = [[] for _ in range(self.num_classes - 1)]
if det_bboxes.shape[0]>0:
scale = 2
if self.ssd_flag is False:
det_cofs = mlvl_cofs[idxs_keep]
#####spp########################
img_mask1 = feat_mask.permute(1,2,0)
pos_masks00 = torch.sigmoid(img_mask1 @ det_cofs[:, 0:32].t())
pos_masks01 = torch.sigmoid(img_mask1 @ det_cofs[:, 32:64].t())
pos_masks10 = torch.sigmoid(img_mask1 @ det_cofs[:, 64:96].t())
pos_masks11 = torch.sigmoid(img_mask1 @ det_cofs[:, 96:128].t())
pos_masks = torch.stack([pos_masks00,pos_masks01,pos_masks10,pos_masks11],dim=0)
pos_masks = self.crop_cuda(pos_masks, det_bboxes[:,:4] * det_bboxes.new_tensor(scale_factor) / scale)
# pos_masks = crop_split(pos_masks00, pos_masks01, pos_masks10, pos_masks11,
# det_bboxes * det_bboxes.new_tensor(scale_factor) / scale)
pos_masks = pos_masks.permute(2, 0, 1)
# masks = F.interpolate(pos_masks.unsqueeze(0), scale_factor=scale/scale_factor, mode='bilinear', align_corners=False).squeeze(0)
if self.ssd_flag:
masks = F.interpolate(pos_masks.unsqueeze(0), scale_factor=scale / scale_factor[3:1:-1], mode='bilinear', align_corners=False).squeeze(0)
else:
masks = F.interpolate(pos_masks.unsqueeze(0), scale_factor=scale / scale_factor, mode='bilinear', align_corners=False).squeeze(0)
masks.gt_(0.4)
if self.rescoring_flag:
pred_iou = pos_masks.unsqueeze(1)
pred_iou = self.convs_scoring(pred_iou)
pred_iou = self.relu(self.mask_scoring(pred_iou))
pred_iou = F.max_pool2d(pred_iou, kernel_size=pred_iou.size()[2:]).squeeze(-1).squeeze(-1)
pred_iou = pred_iou[range(pred_iou.size(0)), det_labels].squeeze()
mask_scores = pred_iou*det_bboxes[:, -1]
mask_scores = mask_scores.cpu().numpy()
mask_scores = [mask_scores[det_labels.cpu().numpy() == i] for i in range(self.num_classes - 1)]
for i in range(det_bboxes.shape[0]):
label = det_labels[i]
mask = masks[i].cpu().numpy()
im_mask = np.zeros((ori_shape[0], ori_shape[1]), dtype=np.uint8)
shape = np.minimum(mask.shape, ori_shape[0:2])
im_mask[:shape[0],:shape[1]] = mask[:shape[0],:shape[1]]
rle = mask_util.encode(
np.array(im_mask[:, :, np.newaxis], order='F'))[0]
cls_segms[label].append(rle)
if self.rescoring_flag:
return det_bboxes, det_labels, (cls_segms, mask_scores)
else:
return det_bboxes, det_labels, cls_segms
def get_points(self, featmap_sizes, dtype, device):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
mlvl_points = []
mlvl_strides = []
for i in range(len(featmap_sizes)):
points, strides = self.get_points_single(featmap_sizes[i], self.strides[i],
dtype, device)
mlvl_points.append(points)
mlvl_strides.append(strides)
return mlvl_points, mlvl_strides
def get_points_single(self, featmap_size, stride, dtype, device):
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride, dtype=dtype, device=device)
y_range = torch.arange(
0, h * stride, stride, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range)
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
strides = points[:,0]*0+stride
return points, strides
def center_target(self, gt_bboxes_raw, gt_masks_raw, featmap_size):
stride = 8
h, w = featmap_size
x_range = torch.arange(0, w, 1, dtype=gt_bboxes_raw[0].dtype, device=gt_bboxes_raw[0].device)
y_range = torch.arange(0, h, 1, dtype=gt_bboxes_raw[0].dtype, device=gt_bboxes_raw[0].device)
y, x = torch.meshgrid(y_range, x_range)
center_targets = []
labels = []
for n in range(len(gt_bboxes_raw)):
center_target = gt_bboxes_raw[n].new(featmap_size[0], featmap_size[1],4) + 0
label = gt_bboxes_raw[n].new_zeros(featmap_size)
gt_bboxes = gt_bboxes_raw[n]/stride
gt_masks = gt_masks_raw[n]
mask_size = gt_masks.shape
pos_left = torch.floor(gt_bboxes[:, 0]).long().clamp(0, gt_masks.shape[2]//stride - 1)
pos_right = torch.ceil(gt_bboxes[:, 2]).long().clamp(0, gt_masks.shape[2]//stride - 1)
pos_top = torch.floor(gt_bboxes[:, 1]).long().clamp(0, gt_masks.shape[1]//stride - 1)
pos_down = torch.ceil(gt_bboxes[:, 3]).long().clamp(0, gt_masks.shape[1]//stride - 1)
for px1, py1, px2, py2, gt_mask, (x1, y1, x2, y2) in \
zip(pos_left, pos_top, pos_right, pos_down, gt_masks, gt_bboxes):
gt_mask = mmcv.imrescale(gt_mask, scale=1. / stride)
gt_mask = torch.Tensor(gt_mask)
label[py1:py2 + 1, px1:px2 + 1] = gt_mask[py1:py2 + 1, px1:px2 + 1]
center_target[py1:py2 + 1, px1:px2 + 1, 0] = x1 / w
center_target[py1:py2 + 1, px1:px2 + 1, 1] = y1 / h
center_target[py1:py2 + 1, px1:px2 + 1, 2] = x2 / w
center_target[py1:py2 + 1, px1:px2 + 1, 3] = y2 / h
center_targets.append(center_target.reshape(-1, 4))
labels.append(label.reshape(-1, 1))
labels = torch.cat(labels)
center_targets = torch.cat(center_targets)
return labels, center_targets
def fcos_target(self, points, gt_bboxes_list, gt_labels_list):
assert len(points) == len(self.regress_ranges)
num_levels = len(points)
# expand regress ranges to align with points
expanded_regress_ranges = [
points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
points[i]) for i in range(num_levels)
]
# concat all levels points and regress ranges
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(points, dim=0)
# the number of points per img, per lvl
num_points = [center.size(0) for center in points]
# get labels and bbox_targets of each image
labels_list, bbox_targets_list, gt_inds = multi_apply(
self.fcos_target_single,
gt_bboxes_list,
gt_labels_list,
points=concat_points,
regress_ranges=concat_regress_ranges,
num_points_per_lvl=num_points)
# split to per img, per level
labels_list = [labels.split(num_points, 0) for labels in labels_list]
bbox_targets_list = [
bbox_targets.split(num_points, 0)
for bbox_targets in bbox_targets_list
]
# concat per level image
concat_lvl_labels = []
concat_lvl_bbox_targets = []
for i in range(num_levels):
concat_lvl_labels.append(
torch.cat([labels[i] for labels in labels_list]))
concat_lvl_bbox_targets.append(
torch.cat(
[bbox_targets[i] for bbox_targets in bbox_targets_list]))
return concat_lvl_labels, concat_lvl_bbox_targets, labels_list, bbox_targets_list, gt_inds
def fcos_target_single(self, gt_bboxes, gt_labels, points, regress_ranges,
num_points_per_lvl):
num_points = points.size(0)
num_gts = gt_labels.size(0)
if num_gts == 0:
return gt_labels.new_zeros(num_points), \
gt_bboxes.new_zeros((num_points, 4))
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
# TODO: figure out why these two are different
# areas = areas[None].expand(num_points, num_gts)
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1)
bbox_targets = bbox_targets
if self.center_sampling:
# condition1: inside a `center bbox`
radius = self.center_sample_radius
center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2
center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2
center_gts = torch.zeros_like(gt_bboxes)
stride = center_xs.new_zeros(center_xs.shape)
# project the points on current lvl back to the `original` sizes
lvl_begin = 0
for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl):
lvl_end = lvl_begin + num_points_lvl
stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius
lvl_begin = lvl_end
x_mins = center_xs - stride
y_mins = center_ys - stride
x_maxs = center_xs + stride
y_maxs = center_ys + stride
center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0],
x_mins, gt_bboxes[..., 0])
center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1],
y_mins, gt_bboxes[..., 1])
center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2],
gt_bboxes[..., 2], x_maxs)
center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3],
gt_bboxes[..., 3], y_maxs)
cb_dist_left = xs - center_gts[..., 0]
cb_dist_right = center_gts[..., 2] - xs
cb_dist_top = ys - center_gts[..., 1]
cb_dist_bottom = center_gts[..., 3] - ys
center_bbox = torch.stack(
(cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1)
inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0
else:
# condition1: inside a gt bbox
inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
# condition2: limit the regression range for each location
max_regress_distance = bbox_targets.max(-1)[0]
inside_regress_range = (
max_regress_distance >= regress_ranges[..., 0]) & (
max_regress_distance <= regress_ranges[..., 1])
# if there are still more than one objects for a location,
# we choose the one with minimal area
areas[inside_gt_bbox_mask == 0] = INF
areas[inside_regress_range == 0] = INF
min_area, min_area_inds = areas.min(dim=1)
labels = gt_labels[min_area_inds]
labels[min_area == INF] = 0
bbox_targets = bbox_targets[range(num_points), min_area_inds]
gt_ind = min_area_inds[labels > 0]
return labels, bbox_targets, gt_ind
def centerness_target(self, pos_bbox_targets):
# only calculate pos centerness targets, otherwise there may be nan
left_right = pos_bbox_targets[:, [0, 2]]
top_bottom = pos_bbox_targets[:, [1, 3]]
centerness_targets = (
left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (
top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
return torch.sqrt(centerness_targets)
def fast_nms(self, boxes, scores, masks, iou_threshold = 0.5, top_k = 200, score_thr=0.1):
scores, idx = scores.sort(1, descending=True)
idx = idx[:, :top_k].contiguous()
scores = scores[:, :top_k]
num_classes, num_dets = idx.size()
boxes = boxes[idx.view(-1), :].view(num_classes, num_dets, 4)
masks = masks[idx.view(-1), :].view(num_classes, num_dets, -1)
iou = self.jaccard(boxes, boxes)
iou.triu_(diagonal=1)
iou_max, _ = iou.max(dim=1)
# Now just filter out the ones higher than the threshold
keep = (iou_max <= iou_threshold)
# We should also only keep detections over the confidence threshold, but at the cost of
# maxing out your detection count for every image, you can just not do that. Because we
# have such a minimal amount of computation per detection (matrix mulitplication only),
# this increase doesn't affect us much (+0.2 mAP for 34 -> 33 fps), so we leave it out.
# However, when you implement this in your method, you should do this second threshold.
keep *= (scores > score_thr)
# Assign each kept detection to its corresponding class
classes = torch.arange(num_classes, device=boxes.device)[:, None].expand_as(keep)
classes = classes[keep]
boxes = boxes[keep]
masks = masks[keep]
scores = scores[keep]
# Only keep the top cfg.max_num_detections highest scores across all classes
scores, idx = scores.sort(0, descending=True)
idx = idx[:100]
scores = scores[:100]
classes = classes[idx]
boxes = boxes[idx]
masks = masks[idx]
boxes = torch.cat([boxes, scores[:, None]], dim=1)
return boxes, classes, masks
def jaccard(self, box_a, box_b, iscrowd:bool=False):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes. Here we operate on
ground truth boxes and default boxes. If iscrowd=True, put the crowd in box_b.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
Return:
jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
"""
use_batch = True
if box_a.dim() == 2:
use_batch = False
box_a = box_a[None, ...]
box_b = box_b[None, ...]
inter = self.intersect(box_a, box_b)
area_a = ((box_a[:, :, 2]-box_a[:, :, 0]) *
(box_a[:, :, 3]-box_a[:, :, 1])).unsqueeze(2).expand_as(inter) # [A,B]
area_b = ((box_b[:, :, 2]-box_b[:, :, 0]) *
(box_b[:, :, 3]-box_b[:, :, 1])).unsqueeze(1).expand_as(inter) # [A,B]
union = area_a + area_b - inter
out = inter / area_a if iscrowd else inter / union
return out if use_batch else out.squeeze(0)
def intersect(self, box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [n,A,4].
box_b: (tensor) bounding boxes, Shape: [n,B,4].
Return:
(tensor) intersection area, Shape: [n,A,B].
"""
n = box_a.size(0)
A = box_a.size(1)
B = box_b.size(1)
max_xy = torch.min(box_a[:, :, 2:].unsqueeze(2).expand(n, A, B, 2),
box_b[:, :, 2:].unsqueeze(1).expand(n, A, B, 2))
min_xy = torch.max(box_a[:, :, :2].unsqueeze(2).expand(n, A, B, 2),
box_b[:, :, :2].unsqueeze(1).expand(n, A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, :, 0] * inter[:, :, :, 1] | [
"mmdet.ops.CropSplit",
"mmcv.cnn.kaiming_init",
"torch.nn.ReLU",
"torch.nn.Sequential",
"mmdet.ops.CropSplitGt",
"torch.sqrt",
"numpy.array",
"mmdet.core.bbox_overlaps",
"torch.sum",
"torch.nn.functional.interpolate",
"mmdet.ops.DeformConv",
"torch.arange",
"torch.nn.GroupNorm",
"mmcv.imre... | [((535, 613), 'torch.cat', 'torch.cat', (['((boxes[:, 2:] + boxes[:, :2]) / 2, boxes[:, 2:] - boxes[:, :2])', '(1)'], {}), '(((boxes[:, 2:] + boxes[:, :2]) / 2, boxes[:, 2:] - boxes[:, :2]), 1)\n', (544, 613), False, 'import torch, mmcv\n'), ((2503, 2536), 'torch.clamp', 'torch.clamp', (['x1'], {'min': '(0)', 'max': '(w - 1)'}), '(x1, min=0, max=w - 1)\n', (2514, 2536), False, 'import torch, mmcv\n'), ((2546, 2579), 'torch.clamp', 'torch.clamp', (['y1'], {'min': '(0)', 'max': '(h - 1)'}), '(y1, min=0, max=h - 1)\n', (2557, 2579), False, 'import torch, mmcv\n'), ((2589, 2622), 'torch.clamp', 'torch.clamp', (['x2'], {'min': '(0)', 'max': '(w - 1)'}), '(x2, min=0, max=w - 1)\n', (2600, 2622), False, 'import torch, mmcv\n'), ((2632, 2665), 'torch.clamp', 'torch.clamp', (['y2'], {'min': '(0)', 'max': '(h - 1)'}), '(y2, min=0, max=h - 1)\n', (2643, 2665), False, 'import torch, mmcv\n'), ((2675, 2708), 'torch.clamp', 'torch.clamp', (['xc'], {'min': '(0)', 'max': '(w - 1)'}), '(xc, min=0, max=w - 1)\n', (2686, 2708), False, 'import torch, mmcv\n'), ((2718, 2751), 'torch.clamp', 'torch.clamp', (['yc'], {'min': '(0)', 'max': '(h - 1)'}), '(yc, min=0, max=h - 1)\n', (2729, 2751), False, 'import torch, mmcv\n'), ((11362, 11427), 'mmdet.core.force_fp32', 'force_fp32', ([], {'apply_to': "('cls_scores', 'bbox_preds', 'centernesses')"}), "(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))\n", (11372, 11427), False, 'from mmdet.core import distance2bbox, bbox_overlaps, force_fp32, multi_apply, multiclass_nms, multiclass_nms_idx\n'), ((21145, 21210), 'mmdet.core.force_fp32', 'force_fp32', ([], {'apply_to': "('cls_scores', 'bbox_preds', 'centernesses')"}), "(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))\n", (21155, 21210), False, 'from mmdet.core import distance2bbox, bbox_overlaps, force_fp32, multi_apply, multiclass_nms, multiclass_nms_idx\n'), ((1009, 1073), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4)', '(deformable_groups * offset_channels)', '(1)'], {'bias': '(False)'}), '(4, deformable_groups * offset_channels, 1, bias=False)\n', (1018, 1073), True, 'import torch.nn as nn\n'), ((1214, 1350), 'mmdet.ops.DeformConv', 'DeformConv', (['in_channels', 'out_channels'], {'kernel_size': 'kernel_size', 'padding': '((kernel_size - 1) // 2)', 'deformable_groups': 'deformable_groups'}), '(in_channels, out_channels, kernel_size=kernel_size, padding=(\n kernel_size - 1) // 2, deformable_groups=deformable_groups)\n', (1224, 1350), False, 'from mmdet.ops import DeformConv, CropSplit, CropSplitGt\n'), ((1526, 1547), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1533, 1547), True, 'import torch.nn as nn\n'), ((1568, 1597), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(32)', 'in_channels'], {}), '(32, in_channels)\n', (1580, 1597), True, 'import torch.nn as nn\n'), ((1685, 1740), 'torch.nn.init.normal_', 'torch.nn.init.normal_', (['self.conv_offset.weight'], {'std': '(0.0)'}), '(self.conv_offset.weight, std=0.0)\n', (1706, 1740), False, 'import torch, mmcv\n'), ((1749, 1807), 'torch.nn.init.normal_', 'torch.nn.init.normal_', (['self.conv_adaption.weight'], {'std': '(0.01)'}), '(self.conv_adaption.weight, std=0.01)\n', (1770, 1807), False, 'import torch, mmcv\n'), ((6171, 6186), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (6184, 6186), True, 'import torch.nn as nn\n'), ((6212, 6227), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (6225, 6227), True, 'import torch.nn as nn\n'), ((7178, 7244), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.feat_channels', 'self.cls_out_channels', '(3)'], {'padding': '(1)'}), '(self.feat_channels, self.cls_out_channels, 3, padding=1)\n', (7187, 7244), True, 'import torch.nn as nn\n'), ((7282, 7328), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.feat_channels', '(4)', '(3)'], {'padding': '(1)'}), '(self.feat_channels, 4, 3, padding=1)\n', (7291, 7328), True, 'import torch.nn as nn\n'), ((7360, 7406), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.feat_channels', '(1)', '(3)'], {'padding': '(1)'}), '(self.feat_channels, 1, 3, padding=1)\n', (7369, 7406), True, 'import torch.nn as nn\n'), ((7685, 7741), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.feat_channels', '(self.nc * 4)', '(3)'], {'padding': '(1)'}), '(self.feat_channels, self.nc * 4, 3, padding=1)\n', (7694, 7741), True, 'import torch.nn as nn\n'), ((7769, 7806), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', 'self.nc', '(3)'], {'padding': '(1)'}), '(512, self.nc, 3, padding=1)\n', (7778, 7806), True, 'import torch.nn as nn\n'), ((7836, 7869), 'torch.nn.Conv2d', 'nn.Conv2d', (['(768)', '(512)', '(1)'], {'padding': '(0)'}), '(768, 512, 1, padding=0)\n', (7845, 7869), True, 'import torch.nn as nn\n'), ((8743, 8764), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (8750, 8764), True, 'import torch.nn as nn\n'), ((8790, 8802), 'mmdet.ops.CropSplit', 'CropSplit', (['(2)'], {}), '(2)\n', (8799, 8802), False, 'from mmdet.ops import DeformConv, CropSplit, CropSplitGt\n'), ((8831, 8845), 'mmdet.ops.CropSplitGt', 'CropSplitGt', (['(2)'], {}), '(2)\n', (8842, 8845), False, 'from mmdet.ops import DeformConv, CropSplit, CropSplitGt\n'), ((9106, 9157), 'mmcv.cnn.normal_init', 'normal_init', (['self.fcos_cls'], {'std': '(0.01)', 'bias': 'bias_cls'}), '(self.fcos_cls, std=0.01, bias=bias_cls)\n', (9117, 9157), False, 'from mmcv.cnn import normal_init, kaiming_init\n'), ((9166, 9202), 'mmcv.cnn.normal_init', 'normal_init', (['self.fcos_reg'], {'std': '(0.01)'}), '(self.fcos_reg, std=0.01)\n', (9177, 9202), False, 'from mmcv.cnn import normal_init, kaiming_init\n'), ((9211, 9254), 'mmcv.cnn.normal_init', 'normal_init', (['self.fcos_centerness'], {'std': '(0.01)'}), '(self.fcos_centerness, std=0.01)\n', (9222, 9254), False, 'from mmcv.cnn import normal_init, kaiming_init\n'), ((9264, 9300), 'mmcv.cnn.normal_init', 'normal_init', (['self.sip_cof'], {'std': '(0.001)'}), '(self.sip_cof, std=0.001)\n', (9275, 9300), False, 'from mmcv.cnn import normal_init, kaiming_init\n'), ((9309, 9349), 'mmcv.cnn.normal_init', 'normal_init', (['self.sip_mask_lat'], {'std': '(0.01)'}), '(self.sip_mask_lat, std=0.01)\n', (9320, 9349), False, 'from mmcv.cnn import normal_init, kaiming_init\n'), ((9358, 9399), 'mmcv.cnn.normal_init', 'normal_init', (['self.sip_mask_lat0'], {'std': '(0.01)'}), '(self.sip_mask_lat0, std=0.01)\n', (9369, 9399), False, 'from mmcv.cnn import normal_init, kaiming_init\n'), ((11057, 11085), 'torch.cat', 'torch.cat', (['feat_masks'], {'dim': '(1)'}), '(feat_masks, dim=1)\n', (11066, 11085), False, 'import torch, mmcv\n'), ((11200, 11279), 'torch.nn.functional.interpolate', 'F.interpolate', (['feat_masks'], {'scale_factor': '(4)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(feat_masks, scale_factor=4, mode='bilinear', align_corners=False)\n", (11213, 11279), True, 'import torch.nn.functional as F\n'), ((13932, 13961), 'torch.cat', 'torch.cat', (['flatten_cls_scores'], {}), '(flatten_cls_scores)\n', (13941, 13961), False, 'import torch, mmcv\n'), ((13991, 14020), 'torch.cat', 'torch.cat', (['flatten_bbox_preds'], {}), '(flatten_bbox_preds)\n', (14000, 14020), False, 'import torch, mmcv\n'), ((14050, 14079), 'torch.cat', 'torch.cat', (['flatten_centerness'], {}), '(flatten_centerness)\n', (14059, 14079), False, 'import torch, mmcv\n'), ((14105, 14122), 'torch.cat', 'torch.cat', (['labels'], {}), '(labels)\n', (14114, 14122), False, 'import torch, mmcv\n'), ((14154, 14177), 'torch.cat', 'torch.cat', (['bbox_targets'], {}), '(bbox_targets)\n', (14163, 14177), False, 'import torch, mmcv\n'), ((16076, 16113), 'torch.cat', 'torch.cat', (['flatten_cls_scores1'], {'dim': '(1)'}), '(flatten_cls_scores1, dim=1)\n', (16085, 16113), False, 'import torch, mmcv\n'), ((16363, 16398), 'torch.cat', 'torch.cat', (['flatten_cof_preds'], {'dim': '(1)'}), '(flatten_cof_preds, dim=1)\n', (16372, 16398), False, 'import torch, mmcv\n'), ((24864, 24886), 'torch.cat', 'torch.cat', (['mlvl_bboxes'], {}), '(mlvl_bboxes)\n', (24873, 24886), False, 'import torch, mmcv\n'), ((24907, 24927), 'torch.cat', 'torch.cat', (['mlvl_cofs'], {}), '(mlvl_cofs)\n', (24916, 24927), False, 'import torch, mmcv\n'), ((25035, 25057), 'torch.cat', 'torch.cat', (['mlvl_scores'], {}), '(mlvl_scores)\n', (25044, 25057), False, 'import torch, mmcv\n'), ((25145, 25185), 'torch.cat', 'torch.cat', (['[padding, mlvl_scores]'], {'dim': '(1)'}), '([padding, mlvl_scores], dim=1)\n', (25154, 25185), False, 'import torch, mmcv\n'), ((25212, 25238), 'torch.cat', 'torch.cat', (['mlvl_centerness'], {}), '(mlvl_centerness)\n', (25221, 25238), False, 'import torch, mmcv\n'), ((29606, 29669), 'torch.arange', 'torch.arange', (['(0)', '(w * stride)', 'stride'], {'dtype': 'dtype', 'device': 'device'}), '(0, w * stride, stride, dtype=dtype, device=device)\n', (29618, 29669), False, 'import torch, mmcv\n'), ((29701, 29764), 'torch.arange', 'torch.arange', (['(0)', '(h * stride)', 'stride'], {'dtype': 'dtype', 'device': 'device'}), '(0, h * stride, stride, dtype=dtype, device=device)\n', (29713, 29764), False, 'import torch, mmcv\n'), ((29793, 29825), 'torch.meshgrid', 'torch.meshgrid', (['y_range', 'x_range'], {}), '(y_range, x_range)\n', (29807, 29825), False, 'import torch, mmcv\n'), ((30130, 30218), 'torch.arange', 'torch.arange', (['(0)', 'w', '(1)'], {'dtype': 'gt_bboxes_raw[0].dtype', 'device': 'gt_bboxes_raw[0].device'}), '(0, w, 1, dtype=gt_bboxes_raw[0].dtype, device=gt_bboxes_raw[0]\n .device)\n', (30142, 30218), False, 'import torch, mmcv\n'), ((30232, 30320), 'torch.arange', 'torch.arange', (['(0)', 'h', '(1)'], {'dtype': 'gt_bboxes_raw[0].dtype', 'device': 'gt_bboxes_raw[0].device'}), '(0, h, 1, dtype=gt_bboxes_raw[0].dtype, device=gt_bboxes_raw[0]\n .device)\n', (30244, 30320), False, 'import torch, mmcv\n'), ((30331, 30363), 'torch.meshgrid', 'torch.meshgrid', (['y_range', 'x_range'], {}), '(y_range, x_range)\n', (30345, 30363), False, 'import torch, mmcv\n'), ((31882, 31899), 'torch.cat', 'torch.cat', (['labels'], {}), '(labels)\n', (31891, 31899), False, 'import torch, mmcv\n'), ((31925, 31950), 'torch.cat', 'torch.cat', (['center_targets'], {}), '(center_targets)\n', (31934, 31950), False, 'import torch, mmcv\n'), ((32458, 32499), 'torch.cat', 'torch.cat', (['expanded_regress_ranges'], {'dim': '(0)'}), '(expanded_regress_ranges, dim=0)\n', (32467, 32499), False, 'import torch, mmcv\n'), ((32524, 32548), 'torch.cat', 'torch.cat', (['points'], {'dim': '(0)'}), '(points, dim=0)\n', (32533, 32548), False, 'import torch, mmcv\n'), ((32760, 32928), 'mmdet.core.multi_apply', 'multi_apply', (['self.fcos_target_single', 'gt_bboxes_list', 'gt_labels_list'], {'points': 'concat_points', 'regress_ranges': 'concat_regress_ranges', 'num_points_per_lvl': 'num_points'}), '(self.fcos_target_single, gt_bboxes_list, gt_labels_list, points\n =concat_points, regress_ranges=concat_regress_ranges,\n num_points_per_lvl=num_points)\n', (32771, 32928), False, 'from mmdet.core import distance2bbox, bbox_overlaps, force_fp32, multi_apply, multiclass_nms, multiclass_nms_idx\n'), ((34838, 34881), 'torch.stack', 'torch.stack', (['(left, top, right, bottom)', '(-1)'], {}), '((left, top, right, bottom), -1)\n', (34849, 34881), False, 'import torch, mmcv\n'), ((38077, 38107), 'torch.sqrt', 'torch.sqrt', (['centerness_targets'], {}), '(centerness_targets)\n', (38087, 38107), False, 'import torch, mmcv\n'), ((39826, 39868), 'torch.cat', 'torch.cat', (['[boxes, scores[:, None]]'], {'dim': '(1)'}), '([boxes, scores[:, None]], dim=1)\n', (39835, 39868), False, 'import torch, mmcv\n'), ((42073, 42108), 'torch.clamp', 'torch.clamp', (['(max_xy - min_xy)'], {'min': '(0)'}), '(max_xy - min_xy, min=0)\n', (42084, 42108), False, 'import torch, mmcv\n'), ((8485, 8519), 'torch.nn.Sequential', 'nn.Sequential', (['*self.convs_scoring'], {}), '(*self.convs_scoring)\n', (8498, 8519), True, 'import torch.nn as nn\n'), ((8552, 8591), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(self.num_classes - 1)', '(1)'], {}), '(128, self.num_classes - 1, 1)\n', (8561, 8591), True, 'import torch.nn as nn\n'), ((8680, 8721), 'mmcv.cnn.normal_init', 'normal_init', (['self.mask_scoring'], {'std': '(0.001)'}), '(self.mask_scoring, std=0.001)\n', (8691, 8721), False, 'from mmcv.cnn import normal_init, kaiming_init\n'), ((8948, 8977), 'mmcv.cnn.normal_init', 'normal_init', (['m.conv'], {'std': '(0.01)'}), '(m.conv, std=0.01)\n', (8959, 8977), False, 'from mmcv.cnn import normal_init, kaiming_init\n'), ((9023, 9052), 'mmcv.cnn.normal_init', 'normal_init', (['m.conv'], {'std': '(0.01)'}), '(m.conv, std=0.01)\n', (9034, 9052), False, 'from mmcv.cnn import normal_init, kaiming_init\n'), ((12988, 13012), 'torch.cat', 'torch.cat', (['bboxes'], {'dim': '(0)'}), '(bboxes, dim=0)\n', (12997, 13012), False, 'import torch, mmcv\n'), ((13035, 13060), 'torch.cat', 'torch.cat', (['targets'], {'dim': '(0)'}), '(targets, dim=0)\n', (13044, 13060), False, 'import torch, mmcv\n'), ((15109, 15164), 'mmdet.core.distance2bbox', 'distance2bbox', (['pos_points', '(pos_bbox_preds / pos_strides)'], {}), '(pos_points, pos_bbox_preds / pos_strides)\n', (15122, 15164), False, 'from mmdet.core import distance2bbox, bbox_overlaps, force_fp32, multi_apply, multiclass_nms, multiclass_nms_idx\n'), ((15202, 15259), 'mmdet.core.distance2bbox', 'distance2bbox', (['pos_points', '(pos_bbox_targets / pos_strides)'], {}), '(pos_points, pos_bbox_targets / pos_strides)\n', (15215, 15259), False, 'from mmdet.core import distance2bbox, bbox_overlaps, force_fp32, multi_apply, multiclass_nms, multiclass_nms_idx\n'), ((17462, 17522), 'mmdet.core.bbox_overlaps', 'bbox_overlaps', (['(bbox_gt[idx_gt] / 2)', 'bbox_dt'], {'is_aligned': '(True)'}), '(bbox_gt[idx_gt] / 2, bbox_dt, is_aligned=True)\n', (17475, 17522), False, 'from mmdet.core import distance2bbox, bbox_overlaps, force_fp32, multi_apply, multiclass_nms, multiclass_nms_idx\n'), ((17834, 17880), 'numpy.minimum', 'np.minimum', (['feat_masks[i].shape', 'gt_mask.shape'], {}), '(feat_masks[i].shape, gt_mask.shape)\n', (17844, 17880), True, 'import numpy as np\n'), ((18655, 18727), 'torch.stack', 'torch.stack', (['[pos_masks00, pos_masks01, pos_masks10, pos_masks11]'], {'dim': '(0)'}), '([pos_masks00, pos_masks01, pos_masks10, pos_masks11], dim=0)\n', (18666, 18727), False, 'import torch, mmcv\n'), ((19058, 19124), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['pred_masks', 'gt_mask_crop'], {'reduction': '"""none"""'}), "(pred_masks, gt_mask_crop, reduction='none')\n", (19080, 19124), True, 'import torch.nn.functional as F\n'), ((24624, 24677), 'mmdet.core.distance2bbox', 'distance2bbox', (['points', 'bbox_pred'], {'max_shape': 'img_shape'}), '(points, bbox_pred, max_shape=img_shape)\n', (24637, 24677), False, 'from mmdet.core import distance2bbox, bbox_overlaps, force_fp32, multi_apply, multiclass_nms, multiclass_nms_idx\n'), ((25323, 25444), 'mmdet.core.multiclass_nms_idx', 'multiclass_nms_idx', (['mlvl_bboxes', 'mlvl_scores', 'cfg.score_thr', 'cfg.nms', 'cfg.max_per_img'], {'score_factors': 'mlvl_centerness'}), '(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.\n max_per_img, score_factors=mlvl_centerness)\n', (25341, 25444), False, 'from mmdet.core import distance2bbox, bbox_overlaps, force_fp32, multi_apply, multiclass_nms, multiclass_nms_idx\n'), ((26560, 26632), 'torch.stack', 'torch.stack', (['[pos_masks00, pos_masks01, pos_masks10, pos_masks11]'], {'dim': '(0)'}), '([pos_masks00, pos_masks01, pos_masks10, pos_masks11], dim=0)\n', (26571, 26632), False, 'import torch, mmcv\n'), ((28264, 28318), 'numpy.zeros', 'np.zeros', (['(ori_shape[0], ori_shape[1])'], {'dtype': 'np.uint8'}), '((ori_shape[0], ori_shape[1]), dtype=np.uint8)\n', (28272, 28318), True, 'import numpy as np\n'), ((28339, 28377), 'numpy.minimum', 'np.minimum', (['mask.shape', 'ori_shape[0:2]'], {}), '(mask.shape, ori_shape[0:2])\n', (28349, 28377), True, 'import numpy as np\n'), ((35209, 35236), 'torch.zeros_like', 'torch.zeros_like', (['gt_bboxes'], {}), '(gt_bboxes)\n', (35225, 35236), False, 'import torch, mmcv\n'), ((35831, 35897), 'torch.where', 'torch.where', (['(x_mins > gt_bboxes[..., 0])', 'x_mins', 'gt_bboxes[..., 0]'], {}), '(x_mins > gt_bboxes[..., 0], x_mins, gt_bboxes[..., 0])\n', (35842, 35897), False, 'import torch, mmcv\n'), ((35976, 36042), 'torch.where', 'torch.where', (['(y_mins > gt_bboxes[..., 1])', 'y_mins', 'gt_bboxes[..., 1]'], {}), '(y_mins > gt_bboxes[..., 1], y_mins, gt_bboxes[..., 1])\n', (35987, 36042), False, 'import torch, mmcv\n'), ((36121, 36187), 'torch.where', 'torch.where', (['(x_maxs > gt_bboxes[..., 2])', 'gt_bboxes[..., 2]', 'x_maxs'], {}), '(x_maxs > gt_bboxes[..., 2], gt_bboxes[..., 2], x_maxs)\n', (36132, 36187), False, 'import torch, mmcv\n'), ((36266, 36332), 'torch.where', 'torch.where', (['(y_maxs > gt_bboxes[..., 3])', 'gt_bboxes[..., 3]', 'y_maxs'], {}), '(y_maxs > gt_bboxes[..., 3], gt_bboxes[..., 3], y_maxs)\n', (36277, 36332), False, 'import torch, mmcv\n'), ((36611, 36686), 'torch.stack', 'torch.stack', (['(cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom)', '(-1)'], {}), '((cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1)\n', (36622, 36686), False, 'import torch, mmcv\n'), ((6394, 6534), 'mmdet.ops.ConvModule', 'ConvModule', (['chn', 'self.feat_channels', '(3)'], {'stride': '(1)', 'padding': '(1)', 'conv_cfg': 'self.conv_cfg', 'norm_cfg': 'self.norm_cfg', 'bias': '(self.norm_cfg is None)'}), '(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.\n conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)\n', (6404, 6534), False, 'from mmdet.ops import ConvModule, Scale\n'), ((6856, 6996), 'mmdet.ops.ConvModule', 'ConvModule', (['chn', 'self.feat_channels', '(3)'], {'stride': '(1)', 'padding': '(1)', 'conv_cfg': 'self.conv_cfg', 'norm_cfg': 'self.norm_cfg', 'bias': '(self.norm_cfg is None)'}), '(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.\n conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)\n', (6866, 6996), False, 'from mmdet.ops import ConvModule, Scale\n'), ((7444, 7454), 'mmdet.ops.Scale', 'Scale', (['(1.0)'], {}), '(1.0)\n', (7449, 7454), False, 'from mmdet.ops import ConvModule, Scale\n'), ((8647, 8667), 'mmcv.cnn.kaiming_init', 'kaiming_init', (['m.conv'], {}), '(m.conv)\n', (8659, 8667), False, 'from mmcv.cnn import normal_init, kaiming_init\n'), ((17538, 17553), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17551, 17553), False, 'import torch, mmcv\n'), ((31305, 31348), 'mmcv.imrescale', 'mmcv.imrescale', (['gt_mask'], {'scale': '(1.0 / stride)'}), '(gt_mask, scale=1.0 / stride)\n', (31319, 31348), False, 'import torch, mmcv\n'), ((31374, 31395), 'torch.Tensor', 'torch.Tensor', (['gt_mask'], {}), '(gt_mask)\n', (31386, 31395), False, 'import torch, mmcv\n'), ((33438, 33486), 'torch.cat', 'torch.cat', (['[labels[i] for labels in labels_list]'], {}), '([labels[i] for labels in labels_list])\n', (33447, 33486), False, 'import torch, mmcv\n'), ((33548, 33614), 'torch.cat', 'torch.cat', (['[bbox_targets[i] for bbox_targets in bbox_targets_list]'], {}), '([bbox_targets[i] for bbox_targets in bbox_targets_list])\n', (33557, 33614), False, 'import torch, mmcv\n'), ((2189, 2246), 'torch.arange', 'torch.arange', (['w'], {'device': 'masks00.device', 'dtype': 'boxes.dtype'}), '(w, device=masks00.device, dtype=boxes.dtype)\n', (2201, 2246), False, 'import torch, mmcv\n'), ((2289, 2346), 'torch.arange', 'torch.arange', (['h'], {'device': 'masks00.device', 'dtype': 'boxes.dtype'}), '(h, device=masks00.device, dtype=boxes.dtype)\n', (2301, 2346), False, 'import torch, mmcv\n'), ((8246, 8333), 'mmdet.ops.ConvModule', 'ConvModule', (['in_channels', 'out_channels', '(3)'], {'stride': 'stride', 'padding': 'padding', 'bias': '(True)'}), '(in_channels, out_channels, 3, stride=stride, padding=padding,\n bias=True)\n', (8256, 8333), False, 'from mmdet.ops import ConvModule, Scale\n'), ((10806, 10896), 'torch.nn.functional.interpolate', 'F.interpolate', (['reg_feat'], {'scale_factor': '(2 ** count)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(reg_feat, scale_factor=2 ** count, mode='bilinear',\n align_corners=False)\n", (10819, 10896), True, 'import torch.nn.functional as F\n'), ((12865, 12897), 'mmdet.core.distance2bbox', 'distance2bbox', (['points', 'bbox_pred'], {}), '(points, bbox_pred)\n', (12878, 12897), False, 'from mmdet.core import distance2bbox, bbox_overlaps, force_fp32, multi_apply, multiclass_nms, multiclass_nms_idx\n'), ((12930, 12964), 'mmdet.core.distance2bbox', 'distance2bbox', (['points', 'bbox_target'], {}), '(points, bbox_target)\n', (12943, 12964), False, 'from mmdet.core import distance2bbox, bbox_overlaps, force_fp32, multi_apply, multiclass_nms, multiclass_nms_idx\n'), ((19930, 19945), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19943, 19945), False, 'import torch, mmcv\n'), ((28499, 28545), 'numpy.array', 'np.array', (['im_mask[:, :, np.newaxis]'], {'order': '"""F"""'}), "(im_mask[:, :, np.newaxis], order='F')\n", (28507, 28545), True, 'import numpy as np\n'), ((39339, 39385), 'torch.arange', 'torch.arange', (['num_classes'], {'device': 'boxes.device'}), '(num_classes, device=boxes.device)\n', (39351, 39385), False, 'import torch, mmcv\n'), ((13283, 13347), 'numpy.array', 'np.array', (['gt_masks_list[i][:gt_label.shape[0]]'], {'dtype': 'np.float32'}), '(gt_masks_list[i][:gt_label.shape[0]], dtype=np.float32)\n', (13291, 13347), True, 'import numpy as np\n'), ((17639, 17659), 'torch.sum', 'torch.sum', (['weighting'], {}), '(weighting)\n', (17648, 17659), False, 'import torch, mmcv\n'), ((18156, 18198), 'torch.index_select', 'torch.index_select', (['gt_mask_new', '(0)', 'idx_gt'], {}), '(gt_mask_new, 0, idx_gt)\n', (18174, 18198), False, 'import torch, mmcv\n'), ((30755, 30783), 'torch.floor', 'torch.floor', (['gt_bboxes[:, 0]'], {}), '(gt_bboxes[:, 0])\n', (30766, 30783), False, 'import torch, mmcv\n'), ((30855, 30882), 'torch.ceil', 'torch.ceil', (['gt_bboxes[:, 2]'], {}), '(gt_bboxes[:, 2])\n', (30865, 30882), False, 'import torch, mmcv\n'), ((30952, 30980), 'torch.floor', 'torch.floor', (['gt_bboxes[:, 1]'], {}), '(gt_bboxes[:, 1])\n', (30963, 30980), False, 'import torch, mmcv\n'), ((31051, 31078), 'torch.ceil', 'torch.ceil', (['gt_bboxes[:, 3]'], {}), '(gt_bboxes[:, 3])\n', (31061, 31078), False, 'import torch, mmcv\n')] |
from typing import Optional, Any
from functools import lru_cache
import numpy as np
from .form import Form, FormDict
from ..basis import Basis
from skfem.generic_utils import HashableNdArray
class BilinearForm(Form):
"""A bilinear form for finite element assembly.
Bilinear forms are defined using functions that takes three arguments:
trial function ``u``, test function ``v``, and a dictionary of additional
parameters ``w``.
>>> from skfem import BilinearForm, InteriorBasis, MeshTri, ElementTriP1
>>> form = BilinearForm(lambda u, v, w: u * v)
>>> form.assemble(InteriorBasis(MeshTri(), ElementTriP1())).todense()
matrix([[0.08333333, 0.04166667, 0.04166667, 0. ],
[0.04166667, 0.16666667, 0.08333333, 0.04166667],
[0.04166667, 0.08333333, 0.16666667, 0.04166667],
[0. , 0.04166667, 0.04166667, 0.08333333]])
Alternatively, you can use :class:`~skfem.assembly.BilinearForm` as a
decorator:
>>> @BilinearForm
... def form(u, v, w):
... return u * v
Inside the form definition, ``u`` and ``v`` are tuples containing the basis
function values at quadrature points. They also contain the values of
the derivatives:
>>> @BilinearForm
... def form(u, v, w):
... # u[1][0] is first derivative with respect to x, and so on
... return u[1][0] * v[1][0] + u[1][1] * v[1][1] # laplacian
In practice, we suggest you to use helper functions from
:mod:`skfem.helpers` to make the forms readable:
>>> from skfem.helpers import dot, grad
>>> @BilinearForm
... def form(u, v, w):
... return dot(grad(u), grad(v))
"""
def assemble(self,
ubasis: Basis,
vbasis: Optional[Basis] = None,
**kwargs) -> Any:
"""Assemble the bilinear form into a sparse matrix.
Parameters
----------
ubasis
The :class:`~skfem.assembly.Basis` for ``u``.
vbasis
Optionally, specify a different :class:`~skfem.assembly.Basis`
for ``v``.
**kwargs
Any additional keyword arguments are appended to ``w``.
"""
if vbasis is None:
vbasis = ubasis
elif ubasis.X.shape[1] != vbasis.X.shape[1]:
raise ValueError("Quadrature mismatch: trial and test functions "
"should have same number of integration points.")
nt = ubasis.nelems
dx = HashableNdArray(ubasis.dx)
wdict = FormDict({
**ubasis.default_parameters(),
**self.dictify(kwargs)
})
# initialize COO data structures
# Each data[i] rows[i] cols[i] triplet corresponds to an integral over
# a single element between basis functions rows[i] and cols[i].
sz = ubasis.Nbfun * vbasis.Nbfun * nt
data = np.zeros(sz, dtype=self.dtype)
rows = np.zeros(sz, dtype='int64')
cols = np.zeros(sz, dtype='int64')
# loop over the indices of local stiffness matrix
ixs = 0 # Track index in the (data, rows, cols) triplets.
for j in range(ubasis.Nbfun):
for i in range(vbasis.Nbfun):
d = self._kernel(
ubasis.basis[j],
vbasis.basis[i],
wdict,
dx
)
if (d != np.zeros_like(d)).any():
r = vbasis.element_dofs[i]
ix_slice = slice(ixs, ixs + len(r))
rows[ix_slice] = r
cols[ix_slice] = ubasis.element_dofs[j]
data[ix_slice] = d
ixs += len(r)
return self._assemble_scipy_matrix(
data[0:ixs],
rows[0:ixs],
cols[0:ixs],
(vbasis.N, ubasis.N)
)
@lru_cache(maxsize=128)
def _kernel(self, u, v, w, dx):
return np.sum(self.form(*u, *v, w) * dx, axis=1)
| [
"functools.lru_cache",
"skfem.generic_utils.HashableNdArray",
"numpy.zeros_like",
"numpy.zeros"
] | [((3913, 3935), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(128)'}), '(maxsize=128)\n', (3922, 3935), False, 'from functools import lru_cache\n'), ((2522, 2548), 'skfem.generic_utils.HashableNdArray', 'HashableNdArray', (['ubasis.dx'], {}), '(ubasis.dx)\n', (2537, 2548), False, 'from skfem.generic_utils import HashableNdArray\n'), ((2920, 2950), 'numpy.zeros', 'np.zeros', (['sz'], {'dtype': 'self.dtype'}), '(sz, dtype=self.dtype)\n', (2928, 2950), True, 'import numpy as np\n'), ((2966, 2993), 'numpy.zeros', 'np.zeros', (['sz'], {'dtype': '"""int64"""'}), "(sz, dtype='int64')\n", (2974, 2993), True, 'import numpy as np\n'), ((3009, 3036), 'numpy.zeros', 'np.zeros', (['sz'], {'dtype': '"""int64"""'}), "(sz, dtype='int64')\n", (3017, 3036), True, 'import numpy as np\n'), ((3444, 3460), 'numpy.zeros_like', 'np.zeros_like', (['d'], {}), '(d)\n', (3457, 3460), True, 'import numpy as np\n')] |
"""
Module for shapefile resampling methods.
This code was originailly developed by <NAME>.
(https://github.com/basaks)
See `uncoverml.scripts.shiftmap_cli` for a resampling CLI.
"""
import tempfile
import os
from os.path import abspath, exists, splitext
from os import remove
import logging
import geopandas as gpd
import pandas as pd
import pandas.core.algorithms as algos
import numpy as np
import sklearn
from shapely.geometry import Polygon
from fiona.errors import DriverError
import uncoverml as ls
import uncoverml.mllog
import uncoverml.targets
BIN = 'bin'
GEOMETRY = 'geometry'
_logger = logging.getLogger(__name__)
def bootstrap_data_indicies(population, samples=None, random_state=1):
samples = population if samples is None else samples
return np.random.RandomState(random_state).randint(0, population, samples)
def prepapre_dataframe(data, fields_to_keep):
if isinstance(data, gpd.GeoDataFrame):
gdf = data
elif isinstance(data, ls.targets.Targets):
gdf = data.to_geodataframe()
# Try to treat as shapefile.
else:
try:
gdf = gpd.read_file(data)
except DriverError:
_logger.error(
"Couldn't read data for resampling. Ensure a valid "
"shapefile path or Targets object is being provided "
"as input.")
raise
return filter_fields(fields_to_keep, gdf)
def filter_fields(fields_to_keep, gdf):
fields_to_keep = [GEOMETRY] + list(fields_to_keep) # add geometry
original_fields = gdf.columns
for f in fields_to_keep:
if f not in original_fields:
raise RuntimeError("field '{}' must exist in shapefile".format(f))
gdf_out = gdf[fields_to_keep]
return gdf_out
def resample_by_magnitude(input_data, target_field, bins=10, interval='percentile',
fields_to_keep=[], bootstrap=True, output_samples=None,
validation=False, validation_points=100):
"""
Parameters
----------
input_gdf : geopandas.GeoDataFrame
Geopandas dataframe containing targets to be resampled.
target_field : str
target field name based on which resampling is performed. Field
must exist in the input_shapefile
bins : int
number of bins for sampling
fields_to_keep : list
of strings to store in the output shapefile
bootstrap : bool, optional
whether to sample with replacement or not
output_samples : int, optional
number of samples in the output shpfile. If not provided, the
output samples will be assumed to be the same as the original
shapefile
validation : bool, optional
validation file name
validation_points : int, optional
approximate number of points in the validation shapefile
Returns
-------
"""
if bootstrap and validation:
raise ValueError('bootstrapping should not be use while'
'creating a validation shapefile.')
if interval not in ['percentile', 'linear']:
_logger.warning("Interval method '{}' not recognised, defaulting to 'percentile'"
.format(interval))
interval = 'percentile'
if len(fields_to_keep):
fields_to_keep.append(target_field)
else:
fields_to_keep = [target_field]
gdf_out = prepapre_dataframe(input_data, fields_to_keep)
# the idea is stolen from pandas.qcut
# pd.qcut does not work for cases when it result in non-unique bin edges
target = gdf_out[target_field].values
if interval == 'percentile':
bin_edges = algos.quantile(
np.unique(target), np.linspace(0, 1, bins+1))
elif interval == 'linear':
bin_edges = np.linspace(np.min(target), np.max(target), bins + 1)
result = pd.core.reshape.tile._bins_to_cuts(target, bin_edges,
labels=False,
include_lowest=True)
# add to output df for sampling
gdf_out[BIN] = result[0]
dfs_to_concat = []
validation_dfs_to_concat = []
total_samples = output_samples if output_samples else gdf_out.shape[0]
samples_per_bin = total_samples // bins
validate_array = np.ones(bins, dtype=np.bool)
if validation and bins > validation_points:
validate_array[validation_points:] = False
np.random.shuffle(validate_array)
gb = gdf_out.groupby(BIN)
for i, (b, gr) in enumerate(gb):
if bootstrap:
dfs_to_concat.append(gr.sample(n=samples_per_bin,
replace=bootstrap))
else:
_df, v_df = _sample_without_replacement(gr, samples_per_bin,
validate_array[i])
dfs_to_concat.append(_df)
validation_dfs_to_concat.append(v_df)
final_df = pd.concat(dfs_to_concat)
final_df.sort_index(inplace=True)
output_gdf = final_df.drop(BIN, axis=1)
if validation:
validation_df = pd.concat(validation_dfs_to_concat)
return output_gdf, validation_df
else:
return output_gdf
def resample_spatially(input_data, target_field, rows=10, cols=10,
fields_to_keep=[], bootstrap=True, output_samples=None,
validation_points=100):
"""
Parameters
----------
input_shapefile
output_shapefile
target_field: str
target field name based on which resampling is performed. Field
must exist in the input_shapefile
rows: int, optional
number of bins in y
cols: int, optional
number of bins in x
fields_to_keep: list of strings to store in the output shapefile
bootstrap: bool, optional
whether to sample with replacement or not
output_samples: int, optional
number of samples in the output shpfile. If not provided, the
output samples will be assumed to be the same as the original
shapefile
validation_points: int, optional
approximate number of points in the validation shapefile
Returns
-------
output_shapefile name
"""
if len(fields_to_keep):
fields_to_keep.append(target_field)
else:
fields_to_keep = [target_field]
gdf_out = prepapre_dataframe(input_data, fields_to_keep)
minx, miny, maxx, maxy = gdf_out[GEOMETRY].total_bounds
x_grid = np.linspace(minx, maxx, num=cols+1)
y_grid = np.linspace(miny, maxy, num=rows+1)
polygons = []
for xs, xe in zip(x_grid[:-1], x_grid[1:]):
for ys, ye in zip(y_grid[:-1], y_grid[1:]):
polygons.append(Polygon([(xs, ys), (xs, ye), (xe, ye), (xe, ys)]))
df_to_concat = []
validation_dfs_to_concat = []
total_samples = output_samples if output_samples else gdf_out.shape[0]
samples_per_group = total_samples // len(polygons)
validate_array = np.ones(len(polygons), dtype=np.bool)
if len(polygons) > validation_points:
validate_array[validation_points:] = False
np.random.shuffle(validate_array)
for i, p in enumerate(polygons):
df = gdf_out[gdf_out[GEOMETRY].within(p)]
if df.shape[0]:
if bootstrap:
# should probably discard if df.shape[0] < 10% of
# samples_per_group
df_to_concat.append(df.sample(n=samples_per_group,
replace=bootstrap))
else:
_df, v_df = _sample_without_replacement(df, samples_per_group,
validate_array[i])
df_to_concat.append(_df)
validation_dfs_to_concat.append(v_df)
else:
_logger.debug('{}th {} does not contain any sample'.format(i, p))
output_gdf = pd.concat(df_to_concat)
return output_gdf
def _sample_without_replacement(df, samples_per_group, validate):
"""
Parameters
----------
df
samples_per_group
validate: bool
whether to create a validate df
if False, second dataframe returned will be empty
Returns
-------
"""
if df.shape[0] >= samples_per_group + 1:
# if enough points take the number of samples
# the second df returned makes up the validation shapefile
_df = df.sample(n=samples_per_group+1, replace=False)
return _df.tail(samples_per_group), _df.head(int(validate))
else:
# else take everything, this will lead to uncertain number of
# points in the resulting shapefile
# return an empty df for the validation set for this bin
return df, gpd.GeoDataFrame(columns=df.columns)
| [
"logging.getLogger",
"numpy.ones",
"numpy.unique",
"geopandas.read_file",
"pandas.core.reshape.tile._bins_to_cuts",
"numpy.max",
"geopandas.GeoDataFrame",
"numpy.linspace",
"shapely.geometry.Polygon",
"numpy.min",
"pandas.concat",
"numpy.random.RandomState",
"numpy.random.shuffle"
] | [((603, 630), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (620, 630), False, 'import logging\n'), ((3840, 3932), 'pandas.core.reshape.tile._bins_to_cuts', 'pd.core.reshape.tile._bins_to_cuts', (['target', 'bin_edges'], {'labels': '(False)', 'include_lowest': '(True)'}), '(target, bin_edges, labels=False,\n include_lowest=True)\n', (3874, 3932), True, 'import pandas as pd\n'), ((4276, 4304), 'numpy.ones', 'np.ones', (['bins'], {'dtype': 'np.bool'}), '(bins, dtype=np.bool)\n', (4283, 4304), True, 'import numpy as np\n'), ((4923, 4947), 'pandas.concat', 'pd.concat', (['dfs_to_concat'], {}), '(dfs_to_concat)\n', (4932, 4947), True, 'import pandas as pd\n'), ((6464, 6501), 'numpy.linspace', 'np.linspace', (['minx', 'maxx'], {'num': '(cols + 1)'}), '(minx, maxx, num=cols + 1)\n', (6475, 6501), True, 'import numpy as np\n'), ((6513, 6550), 'numpy.linspace', 'np.linspace', (['miny', 'maxy'], {'num': '(rows + 1)'}), '(miny, maxy, num=rows + 1)\n', (6524, 6550), True, 'import numpy as np\n'), ((7879, 7902), 'pandas.concat', 'pd.concat', (['df_to_concat'], {}), '(df_to_concat)\n', (7888, 7902), True, 'import pandas as pd\n'), ((4412, 4445), 'numpy.random.shuffle', 'np.random.shuffle', (['validate_array'], {}), '(validate_array)\n', (4429, 4445), True, 'import numpy as np\n'), ((5073, 5108), 'pandas.concat', 'pd.concat', (['validation_dfs_to_concat'], {}), '(validation_dfs_to_concat)\n', (5082, 5108), True, 'import pandas as pd\n'), ((7096, 7129), 'numpy.random.shuffle', 'np.random.shuffle', (['validate_array'], {}), '(validate_array)\n', (7113, 7129), True, 'import numpy as np\n'), ((771, 806), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (792, 806), True, 'import numpy as np\n'), ((3676, 3693), 'numpy.unique', 'np.unique', (['target'], {}), '(target)\n', (3685, 3693), True, 'import numpy as np\n'), ((3695, 3722), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(bins + 1)'], {}), '(0, 1, bins + 1)\n', (3706, 3722), True, 'import numpy as np\n'), ((8715, 8751), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', ([], {'columns': 'df.columns'}), '(columns=df.columns)\n', (8731, 8751), True, 'import geopandas as gpd\n'), ((1106, 1125), 'geopandas.read_file', 'gpd.read_file', (['data'], {}), '(data)\n', (1119, 1125), True, 'import geopandas as gpd\n'), ((3785, 3799), 'numpy.min', 'np.min', (['target'], {}), '(target)\n', (3791, 3799), True, 'import numpy as np\n'), ((3801, 3815), 'numpy.max', 'np.max', (['target'], {}), '(target)\n', (3807, 3815), True, 'import numpy as np\n'), ((6696, 6745), 'shapely.geometry.Polygon', 'Polygon', (['[(xs, ys), (xs, ye), (xe, ye), (xe, ys)]'], {}), '([(xs, ys), (xs, ye), (xe, ye), (xe, ys)])\n', (6703, 6745), False, 'from shapely.geometry import Polygon\n')] |
from sorted_nearest import makewindows
from sorted_nearest import maketiles
import numpy as np
def _windows(df, kwargs):
window_size = kwargs["window_size"]
idxs, starts, ends = makewindows(df.index.values, df.Start.values,
df.End.values, window_size)
df = df.reindex(idxs)
df.loc[:, "Start"] = starts
df.loc[:, "End"] = ends
return df
def _intersect_tile(df):
overlap = np.minimum(df.End, df.__End__) - np.maximum(df.Start, df.__Start__)
df.insert(df.shape[1], "TileOverlap", overlap)
return df
def _tiles(df, kwargs):
overlap = kwargs.get("overlap")
if overlap:
df = df.copy()
df.insert(df.shape[1], "__Start__", df.Start)
df.insert(df.shape[1], "__End__", df.End)
window_size = kwargs["tile_size"]
idxs, starts, ends = maketiles(df.index.values, df.Start.values,
df.End.values, window_size)
df = df.reindex(idxs)
df.loc[:, "Start"] = starts
df.loc[:, "End"] = ends
if overlap:
df = _intersect_tile(df)
df = df.drop(["__Start__", "__End__"], axis=1)
return df
| [
"numpy.maximum",
"sorted_nearest.makewindows",
"numpy.minimum",
"sorted_nearest.maketiles"
] | [((191, 264), 'sorted_nearest.makewindows', 'makewindows', (['df.index.values', 'df.Start.values', 'df.End.values', 'window_size'], {}), '(df.index.values, df.Start.values, df.End.values, window_size)\n', (202, 264), False, 'from sorted_nearest import makewindows\n'), ((852, 923), 'sorted_nearest.maketiles', 'maketiles', (['df.index.values', 'df.Start.values', 'df.End.values', 'window_size'], {}), '(df.index.values, df.Start.values, df.End.values, window_size)\n', (861, 923), False, 'from sorted_nearest import maketiles\n'), ((446, 476), 'numpy.minimum', 'np.minimum', (['df.End', 'df.__End__'], {}), '(df.End, df.__End__)\n', (456, 476), True, 'import numpy as np\n'), ((479, 513), 'numpy.maximum', 'np.maximum', (['df.Start', 'df.__Start__'], {}), '(df.Start, df.__Start__)\n', (489, 513), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from collections import Counter
from maskrcnn_benchmark.layers.misc import Conv2d
from maskrcnn_benchmark.layers import FrozenBatchNorm2d
class h_sigmoid(nn.Module):
def __init__(self, inplace=True):
super(h_sigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3., self.inplace) / 6.
class h_swish(nn.Module):
def __init__(self, inplace=True):
super(h_swish, self).__init__()
self.inplace = inplace
def forward(self, x):
out = F.relu6(x + 3., self.inplace) / 6.
return out * x
def _make_divisible(v, divisor=8, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class SqueezeBlock(nn.Module):
def __init__(self, exp_size, divide=4):
super(SqueezeBlock, self).__init__()
self.dense = nn.Sequential(
nn.Linear(exp_size, exp_size // divide, bias=False),
nn.ReLU(inplace=True),
nn.Linear(exp_size // divide, exp_size, bias=False),
h_sigmoid()
)
def forward(self, x):
batch, channels, height, width = x.size()
out = F.avg_pool2d(x, kernel_size=(height, width)).view(batch, -1)
out = self.dense(out)
out = out.view(batch, channels, 1, 1)
return out * x
class MobileBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, nonLinear, SE, exp_size, dropout_rate=1.0):
super(MobileBlock, self).__init__()
self.out_channels = out_channels
self.nonLinear = nonLinear
self.SE = SE
self.dropout_rate = dropout_rate
padding = (kernel_size - 1) // 2
self.use_connect = (stride == 1 and in_channels == out_channels)
# RE: ReLU;HS: h_swish
if self.nonLinear == "RE":
activation = nn.ReLU
else:
activation = h_swish
self.conv = nn.Sequential(
nn.Conv2d(in_channels, exp_size, kernel_size=1, stride=1, padding=0, bias=False),
FrozenBatchNorm2d(exp_size),
activation(inplace=True)
)
self.depth_conv = nn.Sequential(
Conv2d(exp_size, exp_size, kernel_size=kernel_size, stride=stride, padding=padding, groups=exp_size),
FrozenBatchNorm2d(exp_size)
)
# Squeeze-and-Excite
if self.SE:
self.squeeze_block = SqueezeBlock(exp_size)
self.point_conv = nn.Sequential(
Conv2d(exp_size, out_channels, kernel_size=1, stride=1, padding=0),
FrozenBatchNorm2d(out_channels),
activation(inplace=True)
)
def forward(self, x):
# MobileNet V2
out = self.conv(x)
out = self.depth_conv(out)
if self.SE:
out = self.squeeze_block(out)
out = self.point_conv(out)
if self.use_connect:
return x + out
else:
return out
class MobileNetV3(nn.Module):
def __init__(self, cfg, multiplier=1.0):
super(MobileNetV3, self).__init__()
self.activation_HS = nn.ReLU6(inplace=True)
bneck2_in_channels = cfg.MODEL.MOBILENETS.STEM_OUT_CHANNELS
bneck2_out_channels = cfg.MODEL.MOBILENETS.BNECK2_OUT_CHANNELS
layers = [
[bneck2_in_channels, bneck2_in_channels, 3, 1, "RE", False, 16, 1],
[bneck2_in_channels, bneck2_out_channels, 3, 2, "RE", False, 64, 1],
[bneck2_out_channels, bneck2_out_channels, 3, 1, "RE", False, 72, 1],
[bneck2_out_channels, 40, 5, 2, "RE", True, 72, 2],
[40, 40, 5, 1, "RE", True, 120, 2],
[40, 40, 5, 1, "RE", True, 120, 2],
[40, 80, 3, 2, "HS", False, 240, 3],
[80, 80, 3, 1, "HS", False, 200, 3],
[80, 80, 3, 1, "HS", False, 184, 3],
[80, 80, 3, 1, "HS", False, 184, 3],
[80, 112, 3, 1, "HS", True, 480, 4],
[112, 112, 3, 1, "HS", True, 672, 4],
[112, 160, 5, 1, "HS", True, 672, 4],
[160, 160, 5, 2, "HS", True, 672, 4],
[160, 160, 5, 1, "HS", True, 960, 4],
]
indices = np.array(layers)[:, -1].tolist()
r = Counter(indices)
self.stages = []
self.init_conv = nn.Sequential(
Conv2d(in_channels=3, out_channels=bneck2_in_channels, kernel_size=3, stride=2, padding=1),
FrozenBatchNorm2d(bneck2_in_channels),
h_swish(inplace=True)
)
counts = [0, 3, 6, 10]
for index in r.keys():
blocks = []
name = "layer{}".format(index)
for _ in range(r[index]):
in_channels, out_channels, kernel_size, stride, nonlinear, se, exp_size = layers[counts[int(
index) - 1] + _][:7]
in_channels = _make_divisible(in_channels * multiplier)
out_channels = _make_divisible(out_channels * multiplier)
exp_size = _make_divisible(exp_size * multiplier)
blocks.append(MobileBlock(in_channels, out_channels, kernel_size, stride, nonlinear, se, exp_size))
self.add_module(name, nn.Sequential(*blocks))
self.stages.append(name)
# self.block = []
# for in_channels, out_channels, kernel_size, stride, nonlinear, se, exp_size, index in layers:
# in_channels = _make_divisible(in_channels * multiplier)
# out_channels = _make_divisible(out_channels * multiplier)
# exp_size = _make_divisible(exp_size * multiplier)
# self.block.append(MobileBlock(in_channels, out_channels, kernel_size, stride, nonlinear, se, exp_size))
# self.block = nn.Sequential(*self.block)
def forward(self, x):
outputs = []
out = self.init_conv(x)
for stage_name in self.stages:
out = getattr(self, stage_name)(out)
outputs.append(out)
return outputs
| [
"torch.nn.ReLU",
"torch.nn.Sequential",
"maskrcnn_benchmark.layers.misc.Conv2d",
"torch.nn.functional.avg_pool2d",
"torch.nn.functional.relu6",
"collections.Counter",
"torch.nn.Conv2d",
"numpy.array",
"torch.nn.Linear",
"maskrcnn_benchmark.layers.FrozenBatchNorm2d",
"torch.nn.ReLU6"
] | [((3386, 3408), 'torch.nn.ReLU6', 'nn.ReLU6', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3394, 3408), True, 'import torch.nn as nn\n'), ((4489, 4505), 'collections.Counter', 'Counter', (['indices'], {}), '(indices)\n', (4496, 4505), False, 'from collections import Counter\n'), ((408, 438), 'torch.nn.functional.relu6', 'F.relu6', (['(x + 3.0)', 'self.inplace'], {}), '(x + 3.0, self.inplace)\n', (415, 438), True, 'import torch.nn.functional as F\n'), ((621, 651), 'torch.nn.functional.relu6', 'F.relu6', (['(x + 3.0)', 'self.inplace'], {}), '(x + 3.0, self.inplace)\n', (628, 651), True, 'import torch.nn.functional as F\n'), ((1159, 1210), 'torch.nn.Linear', 'nn.Linear', (['exp_size', '(exp_size // divide)'], {'bias': '(False)'}), '(exp_size, exp_size // divide, bias=False)\n', (1168, 1210), True, 'import torch.nn as nn\n'), ((1224, 1245), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1231, 1245), True, 'import torch.nn as nn\n'), ((1259, 1310), 'torch.nn.Linear', 'nn.Linear', (['(exp_size // divide)', 'exp_size'], {'bias': '(False)'}), '(exp_size // divide, exp_size, bias=False)\n', (1268, 1310), True, 'import torch.nn as nn\n'), ((2235, 2320), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'exp_size'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(in_channels, exp_size, kernel_size=1, stride=1, padding=0, bias=False\n )\n', (2244, 2320), True, 'import torch.nn as nn\n'), ((2329, 2356), 'maskrcnn_benchmark.layers.FrozenBatchNorm2d', 'FrozenBatchNorm2d', (['exp_size'], {}), '(exp_size)\n', (2346, 2356), False, 'from maskrcnn_benchmark.layers import FrozenBatchNorm2d\n'), ((2459, 2564), 'maskrcnn_benchmark.layers.misc.Conv2d', 'Conv2d', (['exp_size', 'exp_size'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'groups': 'exp_size'}), '(exp_size, exp_size, kernel_size=kernel_size, stride=stride, padding=\n padding, groups=exp_size)\n', (2465, 2564), False, 'from maskrcnn_benchmark.layers.misc import Conv2d\n'), ((2573, 2600), 'maskrcnn_benchmark.layers.FrozenBatchNorm2d', 'FrozenBatchNorm2d', (['exp_size'], {}), '(exp_size)\n', (2590, 2600), False, 'from maskrcnn_benchmark.layers import FrozenBatchNorm2d\n'), ((2771, 2837), 'maskrcnn_benchmark.layers.misc.Conv2d', 'Conv2d', (['exp_size', 'out_channels'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(exp_size, out_channels, kernel_size=1, stride=1, padding=0)\n', (2777, 2837), False, 'from maskrcnn_benchmark.layers.misc import Conv2d\n'), ((2851, 2882), 'maskrcnn_benchmark.layers.FrozenBatchNorm2d', 'FrozenBatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (2868, 2882), False, 'from maskrcnn_benchmark.layers import FrozenBatchNorm2d\n'), ((4583, 4677), 'maskrcnn_benchmark.layers.misc.Conv2d', 'Conv2d', ([], {'in_channels': '(3)', 'out_channels': 'bneck2_in_channels', 'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels=3, out_channels=bneck2_in_channels, kernel_size=3,\n stride=2, padding=1)\n', (4589, 4677), False, 'from maskrcnn_benchmark.layers.misc import Conv2d\n'), ((4687, 4724), 'maskrcnn_benchmark.layers.FrozenBatchNorm2d', 'FrozenBatchNorm2d', (['bneck2_in_channels'], {}), '(bneck2_in_channels)\n', (4704, 4724), False, 'from maskrcnn_benchmark.layers import FrozenBatchNorm2d\n'), ((1437, 1481), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x'], {'kernel_size': '(height, width)'}), '(x, kernel_size=(height, width))\n', (1449, 1481), True, 'import torch.nn.functional as F\n'), ((5450, 5472), 'torch.nn.Sequential', 'nn.Sequential', (['*blocks'], {}), '(*blocks)\n', (5463, 5472), True, 'import torch.nn as nn\n'), ((4444, 4460), 'numpy.array', 'np.array', (['layers'], {}), '(layers)\n', (4452, 4460), True, 'import numpy as np\n')] |
import os
import sys
import unittest
import numpy
from os.path import join as pjn
import QENSmodels
# resolve path to reference_data
this_module_path = sys.modules[__name__].__file__
data_dir = pjn(os.path.dirname(this_module_path), 'reference_data')
class TestChudleyElliotDiffusion(unittest.TestCase):
""" Tests QENSmodels.chudley_elliot_diffusion function"""
def test_size_hwhm_chudley_elliot_diffusion(self):
""" Test size of output of hwhmChudleyElliotDiffusion
The output should contains 3 elements """
self.assertEqual(
len(QENSmodels.hwhmChudleyElliotDiffusion(1.)), 3)
self.assertEqual(
len(QENSmodels.hwhmChudleyElliotDiffusion([1., 2.])), 3)
def test_type_size_hwhm_chudley_elliot_diffusion_q_nb(self):
""" Tests type and size of outputs if input q is a float """
hwhm, eisf, qisf = QENSmodels.hwhmChudleyElliotDiffusion(1.)
self.assertIsInstance(hwhm, numpy.ndarray)
self.assertIsInstance(eisf, numpy.ndarray)
self.assertIsInstance(qisf, numpy.ndarray)
self.assertEqual(len(hwhm), 1)
self.assertEqual(len(eisf), 1)
self.assertEqual(len(qisf), 1)
self.assertEqual(eisf, 0.)
self.assertEqual(qisf, 1.)
def test_type_size_hwhm_chudley_elliot_diffusion_q_array(self):
""" Tests type and size of outputs if input q is an array """
# new parameters: q as an array of several values
q_input = [1., 2.]
hwhm1, eisf1, qisf1 = QENSmodels.hwhmChudleyElliotDiffusion(
q_input, 0.33)
self.assertIsInstance(hwhm1, numpy.ndarray)
self.assertIsInstance(eisf1, numpy.ndarray)
self.assertIsInstance(qisf1, numpy.ndarray)
# hwhm, eisf, qisf contain len(q) lists of 6 elements each
self.assertEqual(len(hwhm1), len(q_input))
self.assertEqual(len(eisf1), len(q_input))
self.assertEqual(len(qisf1), len(q_input))
numpy.testing.assert_array_almost_equal(hwhm1, [1.98, 1.98])
self.assertSequenceEqual(eisf1.tolist(), numpy.zeros(2).tolist())
self.assertSequenceEqual(qisf1.tolist(), numpy.ones(2).tolist())
def test_raised_error_negative_coeffs(self):
""" test that an error is raised if D or L are negative
"""
# D = -1, L = 1
self.assertRaises(ValueError,
QENSmodels.hwhmChudleyElliotDiffusion,
1,
-1, 1)
# D = 1, L = -1
self.assertRaises(ValueError,
QENSmodels.hwhmChudleyElliotDiffusion,
1,
1, -1)
# D = -1, L = -1
self.assertRaises(ValueError,
QENSmodels.hwhmChudleyElliotDiffusion,
1,
-1, -1)
def test_raised_error_no_q_input(self):
""" test that an error is raised if no values of q are given as input
"""
self.assertRaises(TypeError,
QENSmodels.sqwChudleyElliotDiffusion,
1)
def test_type_sqw_chudley_elliot_diffusion(self):
""" Test type of output """
# w, q are floats
self.assertIsInstance(QENSmodels.sqwChudleyElliotDiffusion(1, 1),
numpy.ndarray)
# w, q are vectors
output = QENSmodels.sqwChudleyElliotDiffusion([1, 2, 3],
[0.3, 0.4])
self.assertIsInstance(output, numpy.ndarray)
self.assertEqual(output.size, 6)
self.assertEqual(output.shape, (2, 3))
def test_reference_data(self):
""" Test output values in comparison with reference data
(file in 'reference data' folder) """
# load reference data
ref_data = numpy.loadtxt(
pjn(data_dir, "chudley_elliot_diffusion_ref_data.dat"))
# generate data from current model
# for info: the parameters' values used for the reference data are
# specified in the README file in the 'reference data' folder
w = numpy.arange(-2, 2.01, 0.01)
q = 0.7
actual_data = numpy.column_stack(
[w, QENSmodels.sqwChudleyElliotDiffusion(w,
q,
scale=1,
center=0,
D=0.23,
L=1.)])
numpy.testing.assert_array_almost_equal(ref_data,
actual_data,
decimal=12)
if __name__ == '__main__':
unittest.main()
| [
"numpy.testing.assert_array_almost_equal",
"numpy.ones",
"QENSmodels.hwhmChudleyElliotDiffusion",
"os.path.join",
"os.path.dirname",
"numpy.zeros",
"unittest.main",
"numpy.arange",
"QENSmodels.sqwChudleyElliotDiffusion"
] | [((200, 233), 'os.path.dirname', 'os.path.dirname', (['this_module_path'], {}), '(this_module_path)\n', (215, 233), False, 'import os\n'), ((4830, 4845), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4843, 4845), False, 'import unittest\n'), ((886, 928), 'QENSmodels.hwhmChudleyElliotDiffusion', 'QENSmodels.hwhmChudleyElliotDiffusion', (['(1.0)'], {}), '(1.0)\n', (923, 928), False, 'import QENSmodels\n'), ((1524, 1576), 'QENSmodels.hwhmChudleyElliotDiffusion', 'QENSmodels.hwhmChudleyElliotDiffusion', (['q_input', '(0.33)'], {}), '(q_input, 0.33)\n', (1561, 1576), False, 'import QENSmodels\n'), ((1976, 2036), 'numpy.testing.assert_array_almost_equal', 'numpy.testing.assert_array_almost_equal', (['hwhm1', '[1.98, 1.98]'], {}), '(hwhm1, [1.98, 1.98])\n', (2015, 2036), False, 'import numpy\n'), ((3426, 3485), 'QENSmodels.sqwChudleyElliotDiffusion', 'QENSmodels.sqwChudleyElliotDiffusion', (['[1, 2, 3]', '[0.3, 0.4]'], {}), '([1, 2, 3], [0.3, 0.4])\n', (3462, 3485), False, 'import QENSmodels\n'), ((4172, 4200), 'numpy.arange', 'numpy.arange', (['(-2)', '(2.01)', '(0.01)'], {}), '(-2, 2.01, 0.01)\n', (4184, 4200), False, 'import numpy\n'), ((4626, 4700), 'numpy.testing.assert_array_almost_equal', 'numpy.testing.assert_array_almost_equal', (['ref_data', 'actual_data'], {'decimal': '(12)'}), '(ref_data, actual_data, decimal=12)\n', (4665, 4700), False, 'import numpy\n'), ((3293, 3335), 'QENSmodels.sqwChudleyElliotDiffusion', 'QENSmodels.sqwChudleyElliotDiffusion', (['(1)', '(1)'], {}), '(1, 1)\n', (3329, 3335), False, 'import QENSmodels\n'), ((3915, 3969), 'os.path.join', 'pjn', (['data_dir', '"""chudley_elliot_diffusion_ref_data.dat"""'], {}), "(data_dir, 'chudley_elliot_diffusion_ref_data.dat')\n", (3918, 3969), True, 'from os.path import join as pjn\n'), ((581, 623), 'QENSmodels.hwhmChudleyElliotDiffusion', 'QENSmodels.hwhmChudleyElliotDiffusion', (['(1.0)'], {}), '(1.0)\n', (618, 623), False, 'import QENSmodels\n'), ((671, 720), 'QENSmodels.hwhmChudleyElliotDiffusion', 'QENSmodels.hwhmChudleyElliotDiffusion', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (708, 720), False, 'import QENSmodels\n'), ((4275, 4351), 'QENSmodels.sqwChudleyElliotDiffusion', 'QENSmodels.sqwChudleyElliotDiffusion', (['w', 'q'], {'scale': '(1)', 'center': '(0)', 'D': '(0.23)', 'L': '(1.0)'}), '(w, q, scale=1, center=0, D=0.23, L=1.0)\n', (4311, 4351), False, 'import QENSmodels\n'), ((2087, 2101), 'numpy.zeros', 'numpy.zeros', (['(2)'], {}), '(2)\n', (2098, 2101), False, 'import numpy\n'), ((2162, 2175), 'numpy.ones', 'numpy.ones', (['(2)'], {}), '(2)\n', (2172, 2175), False, 'import numpy\n')] |
"""
https://github.com/google/microscopeimagequality/blob/main/microscopeimagequality/prediction.py
"""
import logging
import sys
import numpy
import tensorflow
import cytokit.miq.constants
import cytokit.miq.evaluation
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
_SPLIT_NAME = 'test'
_TFRECORD_FILE_PATTERN = 'data_%s-%05d-of-%05d.tfrecord'
logger = logging.getLogger(__name__)
class ImageQualityClassifier(object):
"""Object for running image quality model inference.
Attributes:
graph: TensorFlow graph.
"""
def __init__(self,
model_ckpt,
model_patch_side_length,
num_classes,
graph=None,
session_config=None):
"""Initialize the model from a checkpoint.
Args:
model_ckpt: String, path to TensorFlow model checkpoint to load.
model_patch_side_length: Integer, the side length in pixels of the square
image passed to the model.
num_classes: Integer, the number of classes the model predicts.
graph: TensorFlow graph. If None, one will be created.
session_config: TensorFlow session configuration. If None, one will be created
"""
self._model_patch_side_length = model_patch_side_length
self._num_classes = num_classes
if graph is None:
graph = tensorflow.Graph()
self.graph = graph
with self.graph.as_default():
self._image_placeholder = tensorflow.placeholder(
tensorflow.float32, shape=[None, None, 1])
self._probabilities = self._probabilities_from_image(
self._image_placeholder, model_patch_side_length, num_classes)
self._sess = tensorflow.Session(config=session_config)
saver = tensorflow.train.Saver()
saver.restore(self._sess, model_ckpt)
logger.debug('Restored image focus prediction model from %s.', model_ckpt)
def __del__(self):
self._sess.close()
def _probabilities_from_image(self, image_placeholder,
model_patch_side_length, num_classes):
"""Get probabilities tensor from input image tensor.
Args:
image_placeholder: Float32 tensor, placeholder for input image.
model_patch_side_length: Integer, the side length in pixels of the square
image passed to the model.
num_classes: Integer, the number of classes the model predicts.
Returns:
Probabilities tensor, shape [num_classes] representing the predicted
probabilities for each class.
"""
labels_fake = tensorflow.zeros([self._num_classes])
image_path_fake = tensorflow.constant(['unused'])
tiles, labels, _ = _get_image_tiles_tensor(
image_placeholder, labels_fake, image_path_fake,
model_patch_side_length)
model_metrics = cytokit.miq.evaluation.get_model_and_metrics(
tiles,
num_classes=num_classes,
one_hot_labels=labels,
is_training=False)
return model_metrics.probabilities
def predict(self, image):
"""Run inference on an image.
Args:
image: Numpy float array, two-dimensional.
Returns:
A evaluation.WholeImagePrediction object.
"""
feed_dict = {self._image_placeholder: numpy.expand_dims(image, 2)}
[np_probabilities] = self._sess.run(
[self._probabilities], feed_dict=feed_dict)
return cytokit.miq.evaluation.aggregate_prediction_from_probabilities(
np_probabilities, cytokit.miq.evaluation.METHOD_AVERAGE)
def get_patch_predictions(self, image):
"""Run inference on each patch in an image, returning each patch score.
Args:
image: Numpy float array, of shape (height, width).
Returns:
List of tuples, with (upper_left_row, upper_left_col, height, width
evaluation.WholeImagePrediction) which denote the patch location,
dimensions and predition result.
"""
results = []
w = cytokit.miq.constants.PATCH_SIDE_LENGTH
for i in range(0, image.shape[0] - w, w):
for j in range(0, image.shape[1] - w, w):
results.append((i, j, w, w, self.predict(image[i:i + w, j:j + w])))
return results
def get_annotated_prediction(self, image):
"""Run inference to annotate the input image with patch predictions.
Args:
image: Numpy float array, two-dimensional.
Returns:
RGB image as uint8 numpy array of shape (image_height, image_width, 3),
representing the upper left crop of the input image, where:
image_height = floor(image.shape[0] / model_patch_side_length)
image_width = floor(image.shape[1] / model_patch_side_length)
"""
feed_dict = {self._image_placeholder: numpy.expand_dims(image, 2)}
with self.graph.as_default():
patches = _get_image_tiles_tensor(
self._image_placeholder,
tensorflow.constant([0]),
tensorflow.constant([0]),
patch_width=self._model_patch_side_length)[0]
[np_probabilities, np_patches] = self._sess.run(
[self._probabilities, patches], feed_dict=feed_dict)
# We use '-1' to denote no true label exists.
np_labels = -1 * numpy.ones((np_patches.shape[0]))
return numpy.squeeze(
cytokit.miq.evaluation.visualize_image_predictions(
np_patches,
np_probabilities,
np_labels,
image.shape[0],
image.shape[1],
show_plot=False,
output_path=None))
def patch_values_to_mask(values, patch_width):
"""Construct a mask from an array of patch values.
Args:
values: A uint16 2D numpy array.
patch_width: Width in pixels of each patch.
Returns:
The mask, a uint16 numpy array of width patch_width *
values.shape[0].
Raises:
ValueError: If the input values are invalid.
"""
if values.dtype != numpy.uint16 or len(values.shape) != 2:
logging.info('dtype: %s shape: %s', values.dtype, values.shape)
raise ValueError('Input must be a 2D np.uint16 array.')
patches_per_column = values.shape[0]
patches_per_row = values.shape[1]
mask = numpy.zeros(
(patches_per_column * patch_width, patches_per_row * patch_width),
dtype=numpy.uint16)
for i in range(patches_per_column):
for j in range(patches_per_row):
ymin = i * patch_width
xmin = j * patch_width
mask[ymin:ymin + patch_width, xmin:xmin + patch_width] = values[i, j]
return mask
def _get_image_tiles_tensor(image, label, image_path, patch_width):
"""Gets patches that tile the input image, starting at upper left.
Args:
image: Input image tensor, size [height x width x 1].
label: Input label tensor, size [num_classes].
image_path: Input image path tensor, size [1].
patch_width: Integer representing width of image patch.
Returns:
Tensors tiles, size [num_tiles x patch_width x patch_width x 1], labels,
size [num_tiles x num_classes], and image_paths, size [num_tiles x 1].
"""
tiles_before_reshape = tensorflow.extract_image_patches(
tensorflow.expand_dims(image, dim=0), [1, patch_width, patch_width, 1],
[1, patch_width, patch_width, 1], [1, 1, 1, 1], 'VALID')
tiles = tensorflow.reshape(tiles_before_reshape, [-1, patch_width, patch_width, 1])
labels = tensorflow.tile(tensorflow.expand_dims(label, dim=0), [tensorflow.shape(tiles)[0], 1])
image_paths = tensorflow.tile(
tensorflow.expand_dims(image_path, dim=0), [tensorflow.shape(tiles)[0], 1])
return tiles, labels, image_paths
| [
"logging.getLogger",
"tensorflow.Graph",
"tensorflow.shape",
"numpy.ones",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.zeros",
"tensorflow.constant",
"numpy.expand_dims",
"tensorflow.reshape",
"tensorflow.expand_dims",
"logging.info",
"tensorflow.zeros"... | [((376, 403), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (393, 403), False, 'import logging\n'), ((6530, 6632), 'numpy.zeros', 'numpy.zeros', (['(patches_per_column * patch_width, patches_per_row * patch_width)'], {'dtype': 'numpy.uint16'}), '((patches_per_column * patch_width, patches_per_row *\n patch_width), dtype=numpy.uint16)\n', (6541, 6632), False, 'import numpy\n'), ((7673, 7748), 'tensorflow.reshape', 'tensorflow.reshape', (['tiles_before_reshape', '[-1, patch_width, patch_width, 1]'], {}), '(tiles_before_reshape, [-1, patch_width, patch_width, 1])\n', (7691, 7748), False, 'import tensorflow\n'), ((2708, 2745), 'tensorflow.zeros', 'tensorflow.zeros', (['[self._num_classes]'], {}), '([self._num_classes])\n', (2724, 2745), False, 'import tensorflow\n'), ((2773, 2804), 'tensorflow.constant', 'tensorflow.constant', (["['unused']"], {}), "(['unused'])\n", (2792, 2804), False, 'import tensorflow\n'), ((6310, 6373), 'logging.info', 'logging.info', (['"""dtype: %s shape: %s"""', 'values.dtype', 'values.shape'], {}), "('dtype: %s shape: %s', values.dtype, values.shape)\n", (6322, 6373), False, 'import logging\n'), ((7524, 7560), 'tensorflow.expand_dims', 'tensorflow.expand_dims', (['image'], {'dim': '(0)'}), '(image, dim=0)\n', (7546, 7560), False, 'import tensorflow\n'), ((7779, 7815), 'tensorflow.expand_dims', 'tensorflow.expand_dims', (['label'], {'dim': '(0)'}), '(label, dim=0)\n', (7801, 7815), False, 'import tensorflow\n'), ((7893, 7934), 'tensorflow.expand_dims', 'tensorflow.expand_dims', (['image_path'], {'dim': '(0)'}), '(image_path, dim=0)\n', (7915, 7934), False, 'import tensorflow\n'), ((1407, 1425), 'tensorflow.Graph', 'tensorflow.Graph', ([], {}), '()\n', (1423, 1425), False, 'import tensorflow\n'), ((1530, 1595), 'tensorflow.placeholder', 'tensorflow.placeholder', (['tensorflow.float32'], {'shape': '[None, None, 1]'}), '(tensorflow.float32, shape=[None, None, 1])\n', (1552, 1595), False, 'import tensorflow\n'), ((1785, 1826), 'tensorflow.Session', 'tensorflow.Session', ([], {'config': 'session_config'}), '(config=session_config)\n', (1803, 1826), False, 'import tensorflow\n'), ((1847, 1871), 'tensorflow.train.Saver', 'tensorflow.train.Saver', ([], {}), '()\n', (1869, 1871), False, 'import tensorflow\n'), ((3457, 3484), 'numpy.expand_dims', 'numpy.expand_dims', (['image', '(2)'], {}), '(image, 2)\n', (3474, 3484), False, 'import numpy\n'), ((5022, 5049), 'numpy.expand_dims', 'numpy.expand_dims', (['image', '(2)'], {}), '(image, 2)\n', (5039, 5049), False, 'import numpy\n'), ((5534, 5565), 'numpy.ones', 'numpy.ones', (['np_patches.shape[0]'], {}), '(np_patches.shape[0])\n', (5544, 5565), False, 'import numpy\n'), ((7818, 7841), 'tensorflow.shape', 'tensorflow.shape', (['tiles'], {}), '(tiles)\n', (7834, 7841), False, 'import tensorflow\n'), ((7937, 7960), 'tensorflow.shape', 'tensorflow.shape', (['tiles'], {}), '(tiles)\n', (7953, 7960), False, 'import tensorflow\n'), ((5194, 5218), 'tensorflow.constant', 'tensorflow.constant', (['[0]'], {}), '([0])\n', (5213, 5218), False, 'import tensorflow\n'), ((5236, 5260), 'tensorflow.constant', 'tensorflow.constant', (['[0]'], {}), '([0])\n', (5255, 5260), False, 'import tensorflow\n')] |
# Tensorflow is not supported on Python 3.8. I used Python 3.7 to write this program.
# Packages to pip install: tensorflow (using 2.1.0), numpy, pillow, tkinter
import numpy as np
import tensorflow as tf
from PIL import Image
from tensorflow.keras.models import load_model, model_from_json
from tkinter.filedialog import askopenfilename
root = ".\\" # directory with saved_model.pb file
RESIZE_DIMS = (150, 150)
THRESHOLD = 0.5
my_model = load_model(root)
def to_arr(path):
x = []
x.append(
(np.array(
Image.open(path).convert("L").resize(RESIZE_DIMS),
np.float64
)) / 255 # normalization to [0, 1] values
)
return np.array(x)
try:
filename = askopenfilename() # pick image file (jpeg, png, gif, etc.)
img = to_arr(filename)
prediction = my_model.predict(img[:1])[0][0]
print(prediction) # Probablility (0, 1) of being positive for COVID-19
print(prediction > THRESHOLD) # Boolean: True is positive, False is negative
except:
print("No file selected")
| [
"numpy.array",
"PIL.Image.open",
"tensorflow.keras.models.load_model",
"tkinter.filedialog.askopenfilename"
] | [((459, 475), 'tensorflow.keras.models.load_model', 'load_model', (['root'], {}), '(root)\n', (469, 475), False, 'from tensorflow.keras.models import load_model, model_from_json\n'), ((707, 718), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (715, 718), True, 'import numpy as np\n'), ((745, 762), 'tkinter.filedialog.askopenfilename', 'askopenfilename', ([], {}), '()\n', (760, 762), False, 'from tkinter.filedialog import askopenfilename\n'), ((559, 575), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (569, 575), False, 'from PIL import Image\n')] |
import os
import pytrec_eval
import numpy as np
from capreolus.utils.loginit import get_logger
from capreolus.searcher import Searcher
logger = get_logger(__name__)
VALID_METRICS = {"P", "map", "map_cut", "ndcg_cut", "Rprec", "recip_rank", "set_recall"}
CUT_POINTS = [5, 10, 15, 20, 30, 100, 200, 500, 1000]
def _verify_metric(metrics):
"""
Verify if the metrics is in the returned list of TREC eval
Args:
metrics: a list of str
"""
assert isinstance(metrics, list)
expected_metrics = {m for m in VALID_METRICS if not m.endswith("_cut") and m != "P"} | {
m + "_" + str(cutoff) for cutoff in CUT_POINTS for m in VALID_METRICS if m.endswith("_cut") or m == "P"
}
for metric in metrics:
if metric not in expected_metrics:
raise ValueError(f"Unexpected evaluation metric: {metric}, should be one of { ' '.join(sorted(expected_metrics))}")
def _transform_metric(metrics):
"""
Remove the _NUM at the end of metric is applicable
Args:
metrics: a list of str
Returns:
a set of transformed metric
"""
assert isinstance(metrics, list)
metrics = {"_".join(metric.split("_")[:-1]) if "_cut" in metric or "P_" in metric else metric for metric in metrics}
return metrics
def _eval_runs(runs, qrels, metrics, dev_qids):
assert isinstance(metrics, list)
dev_qrels = {qid: labels for qid, labels in qrels.items() if qid in dev_qids}
evaluator = pytrec_eval.RelevanceEvaluator(dev_qrels, _transform_metric(metrics))
scores = [[metrics_dict.get(m, -1) for m in metrics] for metrics_dict in evaluator.evaluate(runs).values()]
scores = np.array(scores).mean(axis=0).tolist()
scores = dict(zip(metrics, scores))
return scores
def eval_runs(runs, qrels, metrics):
"""
Evaluate runs loaded by Searcher.load_trec_run
Args:
runs: a dict with format {qid: {docid: score}}, could be prepared by Searcher.load_trec_run
qrels: dict, containing the judgements provided by benchmark
metrics: str or list, metrics expected to calculate, e.g. ndcg_cut_20, etc
Returns:
a dict with format {metric: score}, containing the evaluation score of specified metrics
"""
metrics = [metrics] if isinstance(metrics, str) else list(metrics)
_verify_metric(metrics)
return _eval_runs(runs, qrels, metrics, dev_qids=list(qrels.keys()))
def eval_runfile(runfile, qrels, metrics):
"""
Evaluate a single runfile produced by ranker or reranker
Args:
runfile: str, path to runfile
qrels: dict, containing the judgements provided by benchmark
metrics: str or list, metrics expected to calculate, e.g. ndcg_cut_20, etc
Returns:
a dict with format {metric: score}, containing the evaluation score of specified metrics
"""
metrics = [metrics] if isinstance(metrics, str) else list(metrics)
_verify_metric(metrics)
runs = Searcher.load_trec_run(runfile)
return _eval_runs(runs, qrels, metrics, dev_qids=list(qrels.keys()))
def search_best_run(runfile_dir, benchmark, primary_metric, metrics=None, folds=None):
"""
Select the runfile with respect to the specified metric
Args:
runfile_dir: the directory path to all the runfiles to select from
benchmark: Benchmark class
primary_metric: str, metric used to select the best runfile , e.g. ndcg_cut_20, etc
metrics: str or list, metric expected by be calculated on the best runs
folds: str, the name of fold to select from
Returns:
a dict storing specified metric score and path to the corresponding runfile
"""
metrics = [] if not metrics else ([metrics] if isinstance(metrics, str) else list(metrics))
if primary_metric not in metrics:
metrics = [primary_metric] + metrics
_verify_metric(metrics)
folds = {s: benchmark.folds[s] for s in [folds]} if folds else benchmark.folds
runfiles = [
os.path.join(runfile_dir, f)
for f in os.listdir(runfile_dir)
if (f != "done" and not os.path.isdir(os.path.join(runfile_dir, f)))
]
if len(runfiles) == 1:
return {"score": eval_runfile(runfiles[0], benchmark.qrels, metrics), "path": {s: runfiles[0] for s in folds}}
best_scores = {s: {primary_metric: 0, "path": None} for s in folds}
for runfile in runfiles:
runs = Searcher.load_trec_run(runfile)
for s, v in folds.items():
score = _eval_runs(
runs, benchmark.qrels, [primary_metric], dev_qids=(set(v["train_qids"]) | set(v["predict"]["dev"]))
)[primary_metric]
if score > best_scores[s][primary_metric]:
best_scores[s] = {primary_metric: score, "path": runfile}
test_runs, test_qrels = {}, {}
for s, score_dict in best_scores.items():
test_qids = folds[s]["predict"]["test"]
test_runs.update({qid: v for qid, v in Searcher.load_trec_run(score_dict["path"]).items() if qid in test_qids})
test_qrels.update({qid: v for qid, v in benchmark.qrels.items() if qid in test_qids})
scores = eval_runs(test_runs, benchmark.qrels, metrics)
return {"score": scores, "path": {s: v["path"] for s, v in best_scores.items()}}
| [
"os.listdir",
"capreolus.utils.loginit.get_logger",
"os.path.join",
"numpy.array",
"capreolus.searcher.Searcher.load_trec_run"
] | [((147, 167), 'capreolus.utils.loginit.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (157, 167), False, 'from capreolus.utils.loginit import get_logger\n'), ((2958, 2989), 'capreolus.searcher.Searcher.load_trec_run', 'Searcher.load_trec_run', (['runfile'], {}), '(runfile)\n', (2980, 2989), False, 'from capreolus.searcher import Searcher\n'), ((3986, 4014), 'os.path.join', 'os.path.join', (['runfile_dir', 'f'], {}), '(runfile_dir, f)\n', (3998, 4014), False, 'import os\n'), ((4403, 4434), 'capreolus.searcher.Searcher.load_trec_run', 'Searcher.load_trec_run', (['runfile'], {}), '(runfile)\n', (4425, 4434), False, 'from capreolus.searcher import Searcher\n'), ((4032, 4055), 'os.listdir', 'os.listdir', (['runfile_dir'], {}), '(runfile_dir)\n', (4042, 4055), False, 'import os\n'), ((1665, 1681), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (1673, 1681), True, 'import numpy as np\n'), ((4102, 4130), 'os.path.join', 'os.path.join', (['runfile_dir', 'f'], {}), '(runfile_dir, f)\n', (4114, 4130), False, 'import os\n'), ((4954, 4996), 'capreolus.searcher.Searcher.load_trec_run', 'Searcher.load_trec_run', (["score_dict['path']"], {}), "(score_dict['path'])\n", (4976, 4996), False, 'from capreolus.searcher import Searcher\n')] |
from qcodes.instrument.base import Instrument
from qcodes.utils import validators as vals
from qcodes.instrument.parameter import ManualParameter
import numpy as np
class SimControlCZ_v2(Instrument):
"""
Noise and other parameters for cz_superoperator_simulation_v2
Created for VCZ simulation
"""
def __init__(self, name, **kw):
super().__init__(name, **kw)
# Noise parameters
self.add_parameter(
"T1_q0",
unit="s",
label="T1 fluxing qubit",
docstring="T1 fluxing qubit",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"T1_q1",
unit="s",
label="T1 static qubit",
docstring="T1 static qubit",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"T2_q1",
unit="s",
label="T2 static qubit",
docstring="T2 static qubit",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"T2_q0_amplitude_dependent",
docstring="fitcoefficients giving T2_q0 or Tphi_q0 as a function of inverse sensitivity (in units of w_q0/Phi_0): a, b. Function is ax+b",
parameter_class=ManualParameter,
vals=vals.Arrays(),
initial_value=np.array([-1, -1]),
)
# for flux noise simulations
self.add_parameter(
"sigma_q0",
unit="flux quanta",
docstring="standard deviation of the Gaussian from which we sample the flux bias, q0",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"sigma_q1",
unit="flux quanta",
docstring="standard deviation of the Gaussian from which we sample the flux bias, q1",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"w_q1_sweetspot",
docstring="NB: different from the operating point in general",
parameter_class=ManualParameter,
vals=vals.Numbers(),
)
self.add_parameter(
"w_q0_sweetspot",
docstring="NB: different from the operating point in general",
parameter_class=ManualParameter,
vals=vals.Numbers(),
)
# Control parameters for the simulations
self.add_parameter(
"dressed_compsub",
docstring="true if we use the definition of the comp subspace that uses the dressed 00,01,10,11 states",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=True,
)
self.add_parameter(
"distortions",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"voltage_scaling_factor",
unit="a.u.",
docstring="scaling factor for the voltage for a CZ pulse",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=1,
)
self.add_parameter(
"n_sampling_gaussian_vec",
docstring="array. each element is a number of samples from the gaussian distribution. Std to guarantee convergence is [11]. More are used only to verify convergence",
parameter_class=ManualParameter,
vals=vals.Arrays(),
initial_value=np.array([11]),
)
self.add_parameter(
"cluster",
docstring="true if we want to use the cluster",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"T2_scaling",
unit="a.u.",
docstring="scaling factor for T2_q0_amplitude_dependent",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=1,
)
self.add_parameter(
"which_gate",
docstring="Direction of the CZ gate. E.g. 'NE'. Used to extract parameters from the fluxlutman ",
parameter_class=ManualParameter,
vals=vals.Strings(),
initial_value="NE",
)
self.add_parameter(
"simstep_div",
docstring="Division of the simulation time step. 4 is a good one, corresponding to a time step of 0.1 ns. For smaller values landscapes can deviate significantly from experiment.",
parameter_class=ManualParameter,
vals=vals.Numbers(min_value=1),
initial_value=4,
)
self.add_parameter(
"gates_num",
docstring="Chain the same gate gates_num times.",
parameter_class=ManualParameter,
# It should be an integer but the measurement control cast to float when setting sweep points
vals=vals.Numbers(min_value=1),
initial_value=1,
)
self.add_parameter(
"gates_interval",
docstring="Time interval that separates the gates if gates_num > 1.",
parameter_class=ManualParameter,
unit="s",
vals=vals.Numbers(min_value=0),
initial_value=0,
)
self.add_parameter(
"cost_func",
docstring="Used to calculate the cost function based on the quantities of interest (qoi). Signature: cost_func(qoi). NB: qoi's that represent percentages will be in [0, 1] range. Inspect 'pycqed.simulations.cz_superoperator_simulation_new_functions.simulate_quantities_of_interest_superoperator_v2??' in notebook for available qoi's.",
parameter_class=ManualParameter,
unit="a.u.",
vals=vals.Callable(),
initial_value=None,
)
self.add_parameter(
"cost_func_str",
docstring="Not loaded automatically. Convenience parameter to store the cost function string and use `exec('sim_control_CZ.cost_func(' + sim_control_CZ.cost_func_str() + ')')` to load it.",
parameter_class=ManualParameter,
vals=vals.Strings(),
initial_value="lambda qoi: np.log10((1 - qoi['avgatefid_compsubspace_pc']) * (1 - 0.5) + qoi['L1'] * 0.5)",
)
# Was used to simulate the "refocusing pulses"
self.add_parameter(
"double_cz_pi_pulses",
docstring="If set to 'no_pi_pulses' or 'with_pi_pulses' will simulate two sequential CZs with or without Pi pulses simulated as an ideal superoperator multiplication.",
parameter_class=ManualParameter,
vals=vals.Strings(),
initial_value="", # Use empty string to evaluate to false
)
self.add_parameter(
"optimize_const_amp",
docstring="If true constant amplitude points in the pulse will be 'absorbed' to make simulation much faster",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=True,
)
self.add_parameter(
"look_for_minimum",
docstring="FB: If cost_func=None, if this is False my old cost func is used, if it's True that cost func is used to power 4",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"purcell_device",
docstring="FB: should be set to True only when we want to use the old way of defining T2_q0_amplitude_dependent, so it could be that we simulate the purcell device but we set this parameter to False",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"artificial_waiting_at_sweetspot",
docstring="FB: integer number of simstep_new in the middle of VCZ. Used for matching sim-exp",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"timestamp_for_contour",
docstring="FB: timestamp of previously generated heatmap. Used for contour scans along the 180 deg line",
parameter_class=ManualParameter,
vals=vals.Strings(),
initial_value="",
)
self.add_parameter(
"measurement_time",
docstring="FB: measurement time. Used to get the right missing fraction from the conditional-oscillations experiment",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"fluxbias_mean",
docstring="FB: used for scans wrt the fluxbias at one specific point in the landscape, for fluxing qubit",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"fluxbias_mean_q1",
docstring="FB: used for scans wrt the fluxbias at one specific point in the landscape, for static qubit",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
# for ramsey/Rabi simulations
self.add_parameter(
"detuning",
unit="Hz",
docstring="detuning of w_q0 from its sweet spot value",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"initial_state",
docstring="determines initial state for ramsey_simulations_new",
parameter_class=ManualParameter,
vals=vals.Strings(),
initial_value="changeme",
)
self.add_parameter(
"scanning_time",
unit="s",
docstring="time between the two pi/2 pulses",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=0,
)
self.add_parameter(
"czd_double_sided",
docstring="Ramsey or echo pulse. Used since it has been removed from fluxlutman",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
# for spectral tomo
self.add_parameter(
"repetitions",
docstring="Repetitions of CZ gate, used for spectral tomo",
parameter_class=ManualParameter,
vals=vals.Numbers(),
initial_value=1,
)
self.add_parameter(
"time_series",
docstring="",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"overrotation_sims",
docstring="instead of constant shift in flux, we use constant rotations around some axis",
parameter_class=ManualParameter,
vals=vals.Bool(),
initial_value=False,
)
self.add_parameter(
"axis_overrotation",
docstring="",
parameter_class=ManualParameter,
vals=vals.Arrays(),
initial_value=np.array([1, 0, 0]),
)
def set_cost_func(self, cost_func_str=None):
"""
Sets the self.cost_func from the self.cost_func_str string
or from the provided string
"""
if cost_func_str is None:
cost_func_str = self.cost_func_str()
else:
self.cost_func_str(cost_func_str)
exec("self.cost_func(" + self.cost_func_str() + ")")
| [
"qcodes.utils.validators.Numbers",
"qcodes.utils.validators.Callable",
"qcodes.utils.validators.Strings",
"qcodes.utils.validators.Arrays",
"numpy.array",
"qcodes.utils.validators.Bool"
] | [((630, 644), 'qcodes.utils.validators.Numbers', 'vals.Numbers', ([], {}), '()\n', (642, 644), True, 'from qcodes.utils import validators as vals\n'), ((896, 910), 'qcodes.utils.validators.Numbers', 'vals.Numbers', ([], {}), '()\n', (908, 910), True, 'from qcodes.utils import validators as vals\n'), ((1162, 1176), 'qcodes.utils.validators.Numbers', 'vals.Numbers', ([], {}), '()\n', (1174, 1176), True, 'from qcodes.utils import validators as vals\n'), ((1499, 1512), 'qcodes.utils.validators.Arrays', 'vals.Arrays', ([], {}), '()\n', (1510, 1512), True, 'from qcodes.utils import validators as vals\n'), ((1540, 1558), 'numpy.array', 'np.array', (['[-1, -1]'], {}), '([-1, -1])\n', (1548, 1558), True, 'import numpy as np\n'), ((1852, 1866), 'qcodes.utils.validators.Numbers', 'vals.Numbers', ([], {}), '()\n', (1864, 1866), True, 'from qcodes.utils import validators as vals\n'), ((2152, 2166), 'qcodes.utils.validators.Numbers', 'vals.Numbers', ([], {}), '()\n', (2164, 2166), True, 'from qcodes.utils import validators as vals\n'), ((2403, 2417), 'qcodes.utils.validators.Numbers', 'vals.Numbers', ([], {}), '()\n', (2415, 2417), True, 'from qcodes.utils import validators as vals\n'), ((2624, 2638), 'qcodes.utils.validators.Numbers', 'vals.Numbers', ([], {}), '()\n', (2636, 2638), True, 'from qcodes.utils import validators as vals\n'), ((2938, 2949), 'qcodes.utils.validators.Bool', 'vals.Bool', ([], {}), '()\n', (2947, 2949), True, 'from qcodes.utils import validators as vals\n'), ((3110, 3121), 'qcodes.utils.validators.Bool', 'vals.Bool', ([], {}), '()\n', (3119, 3121), True, 'from qcodes.utils import validators as vals\n'), ((3390, 3404), 'qcodes.utils.validators.Numbers', 'vals.Numbers', ([], {}), '()\n', (3402, 3404), True, 'from qcodes.utils import validators as vals\n'), ((3753, 3766), 'qcodes.utils.validators.Arrays', 'vals.Arrays', ([], {}), '()\n', (3764, 3766), True, 'from qcodes.utils import validators as vals\n'), ((3794, 3808), 'numpy.array', 'np.array', (['[11]'], {}), '([11])\n', (3802, 3808), True, 'import numpy as np\n'), ((3993, 4004), 'qcodes.utils.validators.Bool', 'vals.Bool', ([], {}), '()\n', (4002, 4004), True, 'from qcodes.utils import validators as vals\n'), ((4261, 4275), 'qcodes.utils.validators.Numbers', 'vals.Numbers', ([], {}), '()\n', (4273, 4275), True, 'from qcodes.utils import validators as vals\n'), ((4543, 4557), 'qcodes.utils.validators.Strings', 'vals.Strings', ([], {}), '()\n', (4555, 4557), True, 'from qcodes.utils import validators as vals\n'), ((4912, 4937), 'qcodes.utils.validators.Numbers', 'vals.Numbers', ([], {'min_value': '(1)'}), '(min_value=1)\n', (4924, 4937), True, 'from qcodes.utils import validators as vals\n'), ((5262, 5287), 'qcodes.utils.validators.Numbers', 'vals.Numbers', ([], {'min_value': '(1)'}), '(min_value=1)\n', (5274, 5287), True, 'from qcodes.utils import validators as vals\n'), ((5553, 5578), 'qcodes.utils.validators.Numbers', 'vals.Numbers', ([], {'min_value': '(0)'}), '(min_value=0)\n', (5565, 5578), True, 'from qcodes.utils import validators as vals\n'), ((6108, 6123), 'qcodes.utils.validators.Callable', 'vals.Callable', ([], {}), '()\n', (6121, 6123), True, 'from qcodes.utils import validators as vals\n'), ((6489, 6503), 'qcodes.utils.validators.Strings', 'vals.Strings', ([], {}), '()\n', (6501, 6503), True, 'from qcodes.utils import validators as vals\n'), ((6996, 7010), 'qcodes.utils.validators.Strings', 'vals.Strings', ([], {}), '()\n', (7008, 7010), True, 'from qcodes.utils import validators as vals\n'), ((7340, 7351), 'qcodes.utils.validators.Bool', 'vals.Bool', ([], {}), '()\n', (7349, 7351), True, 'from qcodes.utils import validators as vals\n'), ((7656, 7667), 'qcodes.utils.validators.Bool', 'vals.Bool', ([], {}), '()\n', (7665, 7667), True, 'from qcodes.utils import validators as vals\n'), ((8046, 8057), 'qcodes.utils.validators.Bool', 'vals.Bool', ([], {}), '()\n', (8055, 8057), True, 'from qcodes.utils import validators as vals\n'), ((8347, 8361), 'qcodes.utils.validators.Numbers', 'vals.Numbers', ([], {}), '()\n', (8359, 8361), True, 'from qcodes.utils import validators as vals\n'), ((8648, 8662), 'qcodes.utils.validators.Strings', 'vals.Strings', ([], {}), '()\n', (8660, 8662), True, 'from qcodes.utils import validators as vals\n'), ((8958, 8972), 'qcodes.utils.validators.Numbers', 'vals.Numbers', ([], {}), '()\n', (8970, 8972), True, 'from qcodes.utils import validators as vals\n'), ((9252, 9266), 'qcodes.utils.validators.Numbers', 'vals.Numbers', ([], {}), '()\n', (9264, 9266), True, 'from qcodes.utils import validators as vals\n'), ((9548, 9562), 'qcodes.utils.validators.Numbers', 'vals.Numbers', ([], {}), '()\n', (9560, 9562), True, 'from qcodes.utils import validators as vals\n'), ((9848, 9862), 'qcodes.utils.validators.Numbers', 'vals.Numbers', ([], {}), '()\n', (9860, 9862), True, 'from qcodes.utils import validators as vals\n'), ((10099, 10113), 'qcodes.utils.validators.Strings', 'vals.Strings', ([], {}), '()\n', (10111, 10113), True, 'from qcodes.utils import validators as vals\n'), ((10362, 10376), 'qcodes.utils.validators.Numbers', 'vals.Numbers', ([], {}), '()\n', (10374, 10376), True, 'from qcodes.utils import validators as vals\n'), ((10633, 10644), 'qcodes.utils.validators.Bool', 'vals.Bool', ([], {}), '()\n', (10642, 10644), True, 'from qcodes.utils import validators as vals\n'), ((10908, 10922), 'qcodes.utils.validators.Numbers', 'vals.Numbers', ([], {}), '()\n', (10920, 10922), True, 'from qcodes.utils import validators as vals\n'), ((11106, 11117), 'qcodes.utils.validators.Bool', 'vals.Bool', ([], {}), '()\n', (11115, 11117), True, 'from qcodes.utils import validators as vals\n'), ((11388, 11399), 'qcodes.utils.validators.Bool', 'vals.Bool', ([], {}), '()\n', (11397, 11399), True, 'from qcodes.utils import validators as vals\n'), ((11593, 11606), 'qcodes.utils.validators.Arrays', 'vals.Arrays', ([], {}), '()\n', (11604, 11606), True, 'from qcodes.utils import validators as vals\n'), ((11634, 11653), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (11642, 11653), True, 'import numpy as np\n')] |
'''
Extensions to Numpy, including finding array elements and smoothing data.
Highlights:
- ``sc.findinds()``: find indices of an array matching a condition
- ``sc.findnearest()``: find nearest matching value
- ``sc.smooth()``: simple smoothing of 1D or 2D arrays
- ``sc.smoothinterp()``: linear interpolation with smoothing
'''
import numpy as np
import warnings
from . import sc_utils as ut
##############################################################################
### Find and approximation functions
##############################################################################
__all__ = ['approx', 'safedivide', 'findinds', 'findfirst', 'findlast', 'findnearest', 'dataindex', 'getvalidinds', 'sanitize', 'getvaliddata', 'isprime']
def approx(val1=None, val2=None, eps=None, **kwargs):
'''
Determine whether two scalars (or an array and a scalar) approximately match.
Alias for np.isclose() and may be removed in future versions.
Args:
val1 (number or array): the first value
val2 (number): the second value
eps (float): absolute tolerance
kwargs (dict): passed to np.isclose()
**Examples**::
sc.approx(2*6, 11.9999999, eps=1e-6) # Returns True
sc.approx([3,12,11.9], 12) # Returns array([False, True, False], dtype=bool)
'''
if eps is not None:
kwargs['atol'] = eps # Rename kwarg to match np.isclose()
output = np.isclose(a=val1, b=val2, **kwargs)
return output
def safedivide(numerator=None, denominator=None, default=None, eps=None, warn=False):
'''
Handle divide-by-zero and divide-by-nan elegantly.
**Examples**::
sc.safedivide(numerator=0, denominator=0, default=1, eps=0) # Returns 1
sc.safedivide(numerator=5, denominator=2.0, default=1, eps=1e-3) # Returns 2.5
sc.safedivide(3, np.array([1,3,0]), -1, warn=True) # Returns array([ 3, 1, -1])
'''
# Set some defaults
if numerator is None: numerator = 1.0
if denominator is None: denominator = 1.0
if default is None: default = 0.0
# Handle the logic
invalid = approx(denominator, 0.0, eps=eps)
if ut.isnumber(denominator): # The denominator is a scalar
if invalid:
output = default
else:
output = numerator/denominator
elif ut.checktype(denominator, 'array'):
if not warn:
denominator[invalid] = 1.0 # Replace invalid values with 1
output = numerator/denominator
output[invalid] = default
else: # pragma: no cover # Unclear input, raise exception
errormsg = f'Input type {type(denominator)} not understood: must be number or array'
raise TypeError(errormsg)
return output
def findinds(arr, val=None, eps=1e-6, first=False, last=False, die=True, **kwargs):
'''
Little function to find matches even if two things aren't eactly equal (eg.
due to floats vs. ints). If one argument, find nonzero values. With two arguments,
check for equality using eps. Returns a tuple of arrays if val1 is multidimensional,
else returns an array. Similar to calling np.nonzero(np.isclose(arr, val))[0].
Args:
arr (array): the array to find values in
val (float): if provided, the value to match
eps (float): the precision for matching (default 1e-6, equivalent to np.isclose's atol)
first (bool): whether to return the first matching value
last (bool): whether to return the last matching value
die (bool): whether to raise an exception if first or last is true and no matches were found
kwargs (dict): passed to np.isclose()
**Examples**::
sc.findinds(rand(10)<0.5) # returns e.g. array([2, 4, 5, 9])
sc.findinds([2,3,6,3], 3) # returs array([1,3])
sc.findinds([2,3,6,3], 3, first=True) # returns 1
New in version 1.2.3: "die" argument
'''
# Handle first or last
if first and last: raise ValueError('Can use first or last but not both')
elif first: ind = 0
elif last: ind = -1
else: ind = None
# Handle kwargs
atol = kwargs.pop('atol', eps) # Ensure atol isn't specified twice
if 'val1' in kwargs or 'val2' in kwargs: # pragma: no cover
arr = kwargs.pop('val1', arr)
val = kwargs.pop('val2', val)
warnmsg = 'sc.findinds() arguments "val1" and "val2" have been deprecated as of v1.0.0; use "arr" and "val" instead'
warnings.warn(warnmsg, category=DeprecationWarning, stacklevel=2)
# Calculate matches
arr = ut.promotetoarray(arr)
if val is None: # Check for equality
output = np.nonzero(arr) # If not, just check the truth condition
else:
if ut.isstring(val):
output = np.nonzero(arr==val)
try: # Standard usage, use nonzero
output = np.nonzero(np.isclose(a=arr, b=val, atol=atol, **kwargs)) # If absolute difference between the two values is less than a certain amount
except Exception as E: # pragma: no cover # As a fallback, try simpler comparison
output = np.nonzero(abs(arr-val) < atol)
if kwargs: # Raise a warning if and only if special settings were passed
warnmsg = f'{str(E)}\nsc.findinds(): np.isclose() encountered an exception (above), falling back to direct comparison'
warnings.warn(warnmsg, category=RuntimeWarning, stacklevel=2)
# Process output
try:
if arr.ndim == 1: # Uni-dimensional
output = output[0] # Return an array rather than a tuple of arrays if one-dimensional
if ind is not None:
output = output[ind] # And get the first element
else:
if ind is not None:
output = [output[i][ind] for i in range(arr.ndim)]
except IndexError as E:
if die:
errormsg = 'No matching values found; use die=False to return None instead of raising an exception'
raise IndexError(errormsg) from E
else:
output = None
return output
def findfirst(*args, **kwargs):
''' Alias for findinds(..., first=True). New in version 1.0.0. '''
return findinds(*args, **kwargs, first=True)
def findlast(*args, **kwargs):
''' Alias for findinds(..., last=True). New in version 1.0.0. '''
return findinds(*args, **kwargs, last=True)
def findnearest(series=None, value=None):
'''
Return the index of the nearest match in series to value -- like findinds, but
always returns an object with the same type as value (i.e. findnearest with
a number returns a number, findnearest with an array returns an array).
**Examples**::
findnearest(rand(10), 0.5) # returns whichever index is closest to 0.5
findnearest([2,3,6,3], 6) # returns 2
findnearest([2,3,6,3], 6) # returns 2
findnearest([0,2,4,6,8,10], [3, 4, 5]) # returns array([1, 2, 2])
Version: 2017jan07
'''
series = ut.promotetoarray(series)
if ut.isnumber(value):
output = np.argmin(abs(series-value))
else:
output = []
for val in value: output.append(findnearest(series, val))
output = ut.promotetoarray(output)
return output
def dataindex(dataarray, index): # pragma: no cover
'''
Take an array of data and return either the first or last (or some other) non-NaN entry.
This function is deprecated.
'''
nrows = np.shape(dataarray)[0] # See how many rows need to be filled (either npops, nprogs, or 1).
output = np.zeros(nrows) # Create structure
for r in range(nrows):
output[r] = sanitize(dataarray[r])[index] # Return the specified index -- usually either the first [0] or last [-1]
return output
def getvalidinds(data=None, filterdata=None): # pragma: no cover
'''
Return the indices that are valid based on the validity of the input data from an arbitrary number
of 1-D vector inputs. Warning, closely related to getvaliddata().
This function is deprecated.
**Example**::
getvalidinds([3,5,8,13], [2000, nan, nan, 2004]) # Returns array([0,3])
'''
data = ut.promotetoarray(data)
if filterdata is None: filterdata = data # So it can work on a single input -- more or less replicates sanitize() then
filterdata = ut.promotetoarray(filterdata)
if filterdata.dtype=='bool': filterindices = filterdata # It's already boolean, so leave it as is
else: filterindices = findinds(~np.isnan(filterdata)) # Else, assume it's nans that need to be removed
dataindices = findinds(~np.isnan(data)) # Also check validity of data
validindices = np.intersect1d(dataindices, filterindices)
return validindices # Only return indices -- WARNING, not consistent with sanitize()
def getvaliddata(data=None, filterdata=None, defaultind=0): # pragma: no cover
'''
Return the data value indices that are valid based on the validity of the input data.
This function is deprecated.
**Example**::
getvaliddata(array([3,5,8,13]), array([2000, nan, nan, 2004])) # Returns array([3,13])
'''
data = np.array(data)
if filterdata is None: filterdata = data # So it can work on a single input -- more or less replicates sanitize() then
filterdata = np.array(filterdata)
if filterdata.dtype=='bool': validindices = filterdata # It's already boolean, so leave it as is
else: validindices = ~np.isnan(filterdata) # Else, assume it's nans that need to be removed
if validindices.any(): # There's at least one data point entered
if len(data)==len(validindices): # They're the same length: use for logical indexing
validdata = np.array(np.array(data)[validindices]) # Store each year
elif len(validindices)==1: # They're different lengths and it has length 1: it's an assumption
validdata = np.array([np.array(data)[defaultind]]) # Use the default index; usually either 0 (start) or -1 (end)
else:
errormsg = f'Array sizes are mismatched: {len(data)} vs. {len(validindices)}'
raise ValueError(errormsg)
else:
validdata = np.array([]) # No valid data, return an empty array
return validdata
def sanitize(data=None, returninds=False, replacenans=None, die=True, defaultval=None, label=None, verbose=True):
'''
Sanitize input to remove NaNs. Warning, does not work on multidimensional data!!
**Examples**::
sanitized,inds = sanitize(array([3,4,nan,8,2,nan,nan,nan,8]), returninds=True)
sanitized = sanitize(array([3,4,nan,8,2,nan,nan,nan,8]), replacenans=True)
sanitized = sanitize(array([3,4,nan,8,2,nan,nan,nan,8]), replacenans=0)
'''
try:
data = np.array(data,dtype=float) # Make sure it's an array of float type
inds = np.nonzero(~np.isnan(data))[0] # WARNING, nonzero returns tuple :(
sanitized = data[inds] # Trim data
if replacenans is not None:
newx = range(len(data)) # Create a new x array the size of the original array
if replacenans==True: replacenans = 'nearest'
if replacenans in ['nearest','linear']:
sanitized = smoothinterp(newx, inds, sanitized, method=replacenans, smoothness=0) # Replace nans with interpolated values
else:
naninds = inds = np.nonzero(np.isnan(data))[0]
sanitized = ut.dcp(data)
sanitized[naninds] = replacenans
if len(sanitized)==0:
if defaultval is not None:
sanitized = defaultval
else:
sanitized = 0.0
if verbose: # pragma: no cover
if label is None: label = 'this parameter'
print(f'sc.sanitize(): no data entered for {label}, assuming 0')
except Exception as E: # pragma: no cover
if die:
errormsg = f'Sanitization failed on array: "{repr(E)}":\n{data}'
raise RuntimeError(errormsg)
else:
sanitized = data # Give up and just return an empty array
inds = []
if returninds: return sanitized, inds
else: return sanitized
def isprime(n, verbose=False):
'''
Determine if a number is prime.
From https://stackoverflow.com/questions/15285534/isprime-function-for-python-language
**Example**::
for i in range(100): print(i) if sc.isprime(i) else None
'''
if n < 2:
if verbose: print('Not prime: n<2')
return False
if n == 2:
if verbose: print('Is prime: n=2')
return True
if n == 3:
if verbose: print('Is prime: n=3')
return True
if n%2 == 0:
if verbose: print('Not prime: divisible by 2')
return False
if n%3 == 0:
if verbose: print('Not prime: divisible by 3')
return False
if n < 9:
if verbose: print('Is prime: <9 and not divisible by 2')
return True
r = int(n**0.5)
f = 5
while f <= r:
if n%f == 0:
if verbose: print(f'Not prime: divisible by {f}')
return False
if n%(f+2) == 0:
if verbose: print(f'Not prime: divisible by {f+2}')
return False
f +=6
if verbose: print('Is prime!')
return True
##############################################################################
### Other functions
##############################################################################
__all__ += ['perturb', 'normsum', 'normalize', 'inclusiverange', 'rolling', 'smooth', 'smoothinterp', 'randround', 'cat']
def perturb(n=1, span=0.5, randseed=None, normal=False):
'''
Define an array of numbers uniformly perturbed with a mean of 1.
Args:
n (int): number of points
span (float): width of distribution on either side of 1
normal (bool): whether to use a normal distribution instead of uniform
**Example**::
sc.perturb(5, 0.3) # Returns e.g. array([0.73852362, 0.7088094 , 0.93713658, 1.13150755, 0.87183371])
'''
if randseed is not None:
np.random.seed(int(randseed)) # Optionally reset random seed
if normal:
output = 1.0 + span*np.random.randn(n)
else:
output = 1.0 + 2*span*(np.random.rand(n)-0.5)
return output
def normsum(arr, total=None):
'''
Multiply a list or array by some normalizing factor so that its sum is equal
to the total. Formerly called sc.scaleratio().
Args:
arr (array): array (or list) to normalize
total (float): amount to sum to (default 1)
**Example**::
normarr = sc.normsum([2,5,3,10], 100) # Scale so sum equals 100; returns [10.0, 25.0, 15.0, 50.0]
Renamed in version 1.0.0.
'''
if total is None: total = 1.0
origtotal = float(sum(arr))
ratio = float(total)/origtotal
out = np.array(arr)*ratio
if isinstance(arr, list): out = out.tolist() # Preserve type
return out
def normalize(arr, minval=0.0, maxval=1.0):
'''
Rescale an array between a minimum value and a maximum value.
Args:
arr (array): array to normalize
minval (float): minimum value in rescaled array
maxval (float): maximum value in rescaled array
**Example**::
normarr = sc.normalize([2,3,7,27]) # Returns array([0. , 0.04, 0.2 , 1. ])
'''
out = np.array(arr, dtype=float) # Ensure it's a float so divide works
out -= out.min()
out /= out.max()
out *= (maxval - minval)
out += minval
if isinstance(arr, list): out = out.tolist() # Preserve type
return out
def inclusiverange(*args, **kwargs):
'''
Like arange/linspace, but includes the start and stop points.
Accepts 0-3 args, or the kwargs start, stop, step.
**Examples**::
x = sc.inclusiverange(3,5,0.2)
x = sc.inclusiverange(stop=5)
x = sc.inclusiverange(6, step=2)
'''
# Handle args
if len(args)==0:
start, stop, step = None, None, None
elif len(args)==1:
stop = args[0]
start, step = None, None
elif len(args)==2:
start = args[0]
stop = args[1]
step = None
elif len(args)==3:
start = args[0]
stop = args[1]
step = args[2]
else: # pragma: no cover
raise ValueError('Too many arguments supplied: inclusiverange() accepts 0-3 arguments')
# Handle kwargs
start = kwargs.get('start', start)
stop = kwargs.get('stop', stop)
step = kwargs.get('step', step)
# Finalize defaults
if start is None: start = 0
if stop is None: stop = 1
if step is None: step = 1
# OK, actually generate
x = np.linspace(start, stop, int(round((stop-start)/float(step))+1)) # Can't use arange since handles floating point arithmetic badly, e.g. compare arange(2000, 2020, 0.2) with arange(2000, 2020.2, 0.2)
return x
def rolling(data, window=7, operation='mean', **kwargs):
'''
Alias to Pandas' rolling() (window) method to smooth a series.
Args:
data (list/arr): the 1D or 2D data to be smoothed
window (int): the length of the window
operation (str): the operation to perform: 'mean' (default), 'median', 'sum', or 'none'
kwargs (dict): passed to pd.Series.rolling()
**Example**::
data = [5,5,5,0,0,0,0,7,7,7,7,0,0,3,3,3]
rolled = sc.rolling(data)
'''
import pandas as pd # Optional import
# Handle the data
data = np.array(data)
data = pd.Series(data) if data.ndim == 1 else pd.DataFrame(data)
# Perform the roll
roll = data.rolling(window=window, **kwargs)
# Handle output
if operation in [None, 'none']: output = roll
elif operation == 'mean': output = roll.mean().values
elif operation == 'median': output = roll.median().values
elif operation == 'sum': output = roll.sum().values
else:
errormsg = f'Operation "{operation}" not recognized; must be mean, median, sum, or none'
raise ValueError(errormsg)
return output
def smooth(data, repeats=None):
'''
Very simple function to smooth a 2D array -- slow but simple and easy to use.
Args:
data (arr): 1D or 2D array to smooth
repeats (int): number of times to apply smoothing (by default, scale with length of data)
**Example**::
data = pl.randn(5,5)
smoothdata = sc.smooth(data)
'''
if repeats is None:
repeats = int(np.floor(len(data)/5))
output = np.array(data)
kernel = np.array([0.25,0.5,0.25])
for r in range(repeats):
if output.ndim == 1:
output = np.convolve(output, kernel, mode='same')
elif output.ndim == 2:
for i in range(output.shape[0]): output[i,:] = np.convolve(output[i,:], kernel, mode='same')
for j in range(output.shape[1]): output[:,j] = np.convolve(output[:,j], kernel, mode='same')
else: # pragma: no cover
errormsg = 'Simple smoothing only implemented for 1D and 2D arrays'
raise ValueError(errormsg)
return output
def smoothinterp(newx=None, origx=None, origy=None, smoothness=None, growth=None, ensurefinite=False, keepends=True, method='linear'):
'''
Smoothly interpolate over values and keep end points. Same format as numpy.interp().
Args:
newx (arr): the points at which to interpolate
origx (arr): the original x coordinates
origy (arr): the original y coordinates
smoothness (float): how much to smooth
growth (float): the growth rate to apply past the ends of the data [deprecated]
ensurefinite (bool): ensure all values are finite
keepends (bool): whether to keep the ends [deprecated]
method (str): the type of interpolation to use (options are 'linear' or 'nearest')
Returns:
newy (arr): the new y coordinates
**Example**::
origy = np.array([0,0.1,0.3,0.8,0.7,0.9,0.95,1])
origx = np.linspace(0,1,len(origy))
newx = np.linspace(0,1,5*len(origy))
newy = sc.smoothinterp(newx, origx, origy, smoothness=5)
pl.plot(newx,newy)
pl.scatter(origx,origy)
Version: 2018jan24
'''
# Ensure arrays and remove NaNs
if ut.isnumber(newx): newx = [newx] # Make sure it has dimension
if ut.isnumber(origx): origx = [origx] # Make sure it has dimension
if ut.isnumber(origy): origy = [origy] # Make sure it has dimension
newx = np.array(newx, dtype=float)
origx = np.array(origx, dtype=float)
origy = np.array(origy, dtype=float)
# If only a single element, just return it, without checking everything else
if len(origy)==1:
newy = np.zeros(newx.shape)+origy[0]
return newy
if not(newx.shape): raise ValueError('To interpolate, must have at least one new x value to interpolate to') # pragma: no cover
if not(origx.shape): raise ValueError('To interpolate, must have at least one original x value to interpolate to') # pragma: no cover
if not(origy.shape): raise ValueError('To interpolate, must have at least one original y value to interpolate to') # pragma: no cover
if not(origx.shape==origy.shape): # pragma: no cover
errormsg = f'To interpolate, original x and y vectors must be same length (x={len(origx)}, y={len(origy)})'
raise ValueError(errormsg)
# Make sure it's in the correct order
correctorder = np.argsort(origx)
origx = origx[correctorder]
origy = origy[correctorder]
neworder = np.argsort(newx)
newx = newx[neworder] # And sort newx just in case
# Only keep finite elements
finitey = np.isfinite(origy) # Boolean for whether it's finite
if finitey.any() and not finitey.all(): # If some but not all is finite, pull out indices that are
finiteorigy = origy[finitey]
finiteorigx = origx[finitey]
else: # Otherwise, just copy the original
finiteorigy = origy.copy()
finiteorigx = origx.copy()
# Perform actual interpolation
if method=='linear':
newy = np.interp(newx, finiteorigx, finiteorigy) # Perform standard interpolation without infinities
elif method=='nearest':
newy = np.zeros(newx.shape) # Create the new array of the right size
for i,x in enumerate(newx): # Iterate over each point
xind = np.argmin(abs(finiteorigx-x)) # Find the nearest neighbor
newy[i] = finiteorigy[xind] # Copy it
else: # pragma: no cover
errormsg = f'Method "{method}" not found; methods are "linear" or "nearest"'
raise ValueError(errormsg)
# Perform smoothing
if smoothness is None: smoothness = np.ceil(len(newx)/len(origx)) # Calculate smoothness: this is consistent smoothing regardless of the size of the arrays
smoothness = int(smoothness) # Make sure it's an appropriate number
if smoothness:
kernel = np.exp(-np.linspace(-2,2,2*smoothness+1)**2)
kernel /= kernel.sum()
validinds = findinds(~np.isnan(newy)) # Remove nans since these don't exactly smooth well
if len(validinds): # No point doing these steps if no non-nan values
validy = newy[validinds]
prepend = validy[0]*np.ones(smoothness)
postpend = validy[-1]*np.ones(smoothness)
if not keepends: # pragma: no cover
try: # Try to compute slope, but use original prepend if it doesn't work
dyinitial = (validy[0]-validy[1])
prepend = validy[0]*np.ones(smoothness) + dyinitial*np.arange(smoothness,0,-1)
except:
pass
try: # Try to compute slope, but use original postpend if it doesn't work
dyfinal = (validy[-1]-validy[-2])
postpend = validy[-1]*np.ones(smoothness) + dyfinal*np.arange(1,smoothness+1,1)
except:
pass
validy = np.concatenate([prepend, validy, postpend])
validy = np.convolve(validy, kernel, 'valid') # Smooth it out a bit
newy[validinds] = validy # Copy back into full vector
# Apply growth if required
if growth is not None: # pragma: no cover
pastindices = findinds(newx<origx[0])
futureindices = findinds(newx>origx[-1])
if len(pastindices): # If there are past data points
firstpoint = pastindices[-1]+1
newy[pastindices] = newy[firstpoint] * np.exp((newx[pastindices]-newx[firstpoint])*growth) # Get last 'good' data point and apply inverse growth
if len(futureindices): # If there are past data points
lastpoint = futureindices[0]-1
newy[futureindices] = newy[lastpoint] * np.exp((newx[futureindices]-newx[lastpoint])*growth) # Get last 'good' data point and apply growth
# Add infinities back in, if they exist
if any(~np.isfinite(origy)): # pragma: no cover # Infinities exist, need to add them back in manually since interp can only handle nan
if not ensurefinite: # If not ensuring all entries are finite, put nonfinite entries back in
orignan = np.zeros(len(origy)) # Start from scratch
origplusinf = np.zeros(len(origy)) # Start from scratch
origminusinf = np.zeros(len(origy)) # Start from scratch
orignan[np.isnan(origy)] = np.nan # Replace nan entries with nan
origplusinf[origy==np.inf] = np.nan # Replace plus infinite entries with nan
origminusinf[origy==-np.inf] = np.nan # Replace minus infinite entries with nan
newnan = np.interp(newx, origx, orignan) # Interpolate the nans
newplusinf = np.interp(newx, origx, origplusinf) # ...again, for positive
newminusinf = np.interp(newx, origx, origminusinf) # ...and again, for negative
newy[np.isnan(newminusinf)] = -np.inf # Add minus infinity back in first
newy[np.isnan(newplusinf)] = np.inf # Then, plus infinity
newy[np.isnan(newnan)] = np.nan # Finally, the nans
# Restore original sort order for newy
restoredorder = np.argsort(neworder)
newy = newy[restoredorder]
return newy
def randround(x):
'''
Round a float, list, or array probabilistically to the nearest integer. Works
for both positive and negative values.
Adapted from:
https://stackoverflow.com/questions/19045971/random-rounding-to-integer-in-python
Args:
x (int, list, arr): the floating point numbers to probabilistically convert to the nearest integer
Returns:
Array of integers
**Example**::
sc.randround(np.random.randn(8)) # Returns e.g. array([-1, 0, 1, -2, 2, 0, 0, 0])
New in version 1.0.0.
'''
if isinstance(x, np.ndarray):
output = np.array(np.floor(x+np.random.random(x.size)), dtype=int)
elif isinstance(x, list):
output = [randround(i) for i in x]
else:
output = int(np.floor(x+np.random.random()))
return output
def cat(*args, axis=None, copy=False, **kwargs):
'''
Like np.concatenate(), but takes anything and returns an array. Useful for
e.g. appending a single number onto the beginning or end of an array.
Args:
args (any): items to concatenate into an array
axis (int): axis along which to concatenate
copy (bool): whether or not to deepcopy the result
kwargs (dict): passed to ``np.array()``
**Examples**::
arr = sc.cat(4, np.ones(3))
arr = sc.cat(np.array([1,2,3]), [4,5], 6)
arr = sc.cat(np.random.rand(2,4), np.random.rand(2,6), axis=1)
New in version 1.0.0.
New in version 1.1.0: "copy" and keyword arguments.
'''
if not len(args):
return np.array([])
output = ut.promotetoarray(args[0])
for arg in args[1:]:
arg = ut.promotetoarray(arg)
output = np.concatenate((output, arg), axis=axis)
output = np.array(output, **kwargs)
if copy:
output = ut.dcp(output)
return output | [
"numpy.convolve",
"numpy.random.rand",
"numpy.argsort",
"numpy.array",
"numpy.isfinite",
"numpy.arange",
"numpy.random.random",
"numpy.exp",
"numpy.linspace",
"numpy.concatenate",
"pandas.DataFrame",
"warnings.warn",
"numpy.ones",
"numpy.isnan",
"numpy.nonzero",
"numpy.interp",
"nump... | [((1438, 1474), 'numpy.isclose', 'np.isclose', ([], {'a': 'val1', 'b': 'val2'}), '(a=val1, b=val2, **kwargs)\n', (1448, 1474), True, 'import numpy as np\n'), ((7550, 7565), 'numpy.zeros', 'np.zeros', (['nrows'], {}), '(nrows)\n', (7558, 7565), True, 'import numpy as np\n'), ((8681, 8723), 'numpy.intersect1d', 'np.intersect1d', (['dataindices', 'filterindices'], {}), '(dataindices, filterindices)\n', (8695, 8723), True, 'import numpy as np\n'), ((9160, 9174), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (9168, 9174), True, 'import numpy as np\n'), ((9315, 9335), 'numpy.array', 'np.array', (['filterdata'], {}), '(filterdata)\n', (9323, 9335), True, 'import numpy as np\n'), ((15580, 15606), 'numpy.array', 'np.array', (['arr'], {'dtype': 'float'}), '(arr, dtype=float)\n', (15588, 15606), True, 'import numpy as np\n'), ((17698, 17712), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (17706, 17712), True, 'import numpy as np\n'), ((18742, 18756), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (18750, 18756), True, 'import numpy as np\n'), ((18770, 18797), 'numpy.array', 'np.array', (['[0.25, 0.5, 0.25]'], {}), '([0.25, 0.5, 0.25])\n', (18778, 18797), True, 'import numpy as np\n'), ((20711, 20738), 'numpy.array', 'np.array', (['newx'], {'dtype': 'float'}), '(newx, dtype=float)\n', (20719, 20738), True, 'import numpy as np\n'), ((20751, 20779), 'numpy.array', 'np.array', (['origx'], {'dtype': 'float'}), '(origx, dtype=float)\n', (20759, 20779), True, 'import numpy as np\n'), ((20792, 20820), 'numpy.array', 'np.array', (['origy'], {'dtype': 'float'}), '(origy, dtype=float)\n', (20800, 20820), True, 'import numpy as np\n'), ((21670, 21687), 'numpy.argsort', 'np.argsort', (['origx'], {}), '(origx)\n', (21680, 21687), True, 'import numpy as np\n'), ((21767, 21783), 'numpy.argsort', 'np.argsort', (['newx'], {}), '(newx)\n', (21777, 21783), True, 'import numpy as np\n'), ((21886, 21904), 'numpy.isfinite', 'np.isfinite', (['origy'], {}), '(origy)\n', (21897, 21904), True, 'import numpy as np\n'), ((26371, 26391), 'numpy.argsort', 'np.argsort', (['neworder'], {}), '(neworder)\n', (26381, 26391), True, 'import numpy as np\n'), ((28211, 28237), 'numpy.array', 'np.array', (['output'], {}), '(output, **kwargs)\n', (28219, 28237), True, 'import numpy as np\n'), ((4468, 4533), 'warnings.warn', 'warnings.warn', (['warnmsg'], {'category': 'DeprecationWarning', 'stacklevel': '(2)'}), '(warnmsg, category=DeprecationWarning, stacklevel=2)\n', (4481, 4533), False, 'import warnings\n'), ((4650, 4665), 'numpy.nonzero', 'np.nonzero', (['arr'], {}), '(arr)\n', (4660, 4665), True, 'import numpy as np\n'), ((7446, 7465), 'numpy.shape', 'np.shape', (['dataarray'], {}), '(dataarray)\n', (7454, 7465), True, 'import numpy as np\n'), ((10200, 10212), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10208, 10212), True, 'import numpy as np\n'), ((10821, 10848), 'numpy.array', 'np.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (10829, 10848), True, 'import numpy as np\n'), ((15074, 15087), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (15082, 15087), True, 'import numpy as np\n'), ((17724, 17739), 'pandas.Series', 'pd.Series', (['data'], {}), '(data)\n', (17733, 17739), True, 'import pandas as pd\n'), ((17763, 17781), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (17775, 17781), True, 'import pandas as pd\n'), ((22308, 22349), 'numpy.interp', 'np.interp', (['newx', 'finiteorigx', 'finiteorigy'], {}), '(newx, finiteorigx, finiteorigy)\n', (22317, 22349), True, 'import numpy as np\n'), ((28025, 28037), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (28033, 28037), True, 'import numpy as np\n'), ((28157, 28197), 'numpy.concatenate', 'np.concatenate', (['(output, arg)'], {'axis': 'axis'}), '((output, arg), axis=axis)\n', (28171, 28197), True, 'import numpy as np\n'), ((4767, 4789), 'numpy.nonzero', 'np.nonzero', (['(arr == val)'], {}), '(arr == val)\n', (4777, 4789), True, 'import numpy as np\n'), ((8616, 8630), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (8624, 8630), True, 'import numpy as np\n'), ((9486, 9506), 'numpy.isnan', 'np.isnan', (['filterdata'], {}), '(filterdata)\n', (9494, 9506), True, 'import numpy as np\n'), ((18875, 18915), 'numpy.convolve', 'np.convolve', (['output', 'kernel'], {'mode': '"""same"""'}), "(output, kernel, mode='same')\n", (18886, 18915), True, 'import numpy as np\n'), ((20940, 20960), 'numpy.zeros', 'np.zeros', (['newx.shape'], {}), '(newx.shape)\n', (20948, 20960), True, 'import numpy as np\n'), ((22445, 22465), 'numpy.zeros', 'np.zeros', (['newx.shape'], {}), '(newx.shape)\n', (22453, 22465), True, 'import numpy as np\n'), ((24186, 24229), 'numpy.concatenate', 'np.concatenate', (['[prepend, validy, postpend]'], {}), '([prepend, validy, postpend])\n', (24200, 24229), True, 'import numpy as np\n'), ((24251, 24287), 'numpy.convolve', 'np.convolve', (['validy', 'kernel', '"""valid"""'], {}), "(validy, kernel, 'valid')\n", (24262, 24287), True, 'import numpy as np\n'), ((25124, 25142), 'numpy.isfinite', 'np.isfinite', (['origy'], {}), '(origy)\n', (25135, 25142), True, 'import numpy as np\n'), ((25852, 25883), 'numpy.interp', 'np.interp', (['newx', 'origx', 'orignan'], {}), '(newx, origx, orignan)\n', (25861, 25883), True, 'import numpy as np\n'), ((25933, 25968), 'numpy.interp', 'np.interp', (['newx', 'origx', 'origplusinf'], {}), '(newx, origx, origplusinf)\n', (25942, 25968), True, 'import numpy as np\n'), ((26020, 26056), 'numpy.interp', 'np.interp', (['newx', 'origx', 'origminusinf'], {}), '(newx, origx, origminusinf)\n', (26029, 26056), True, 'import numpy as np\n'), ((4863, 4908), 'numpy.isclose', 'np.isclose', ([], {'a': 'arr', 'b': 'val', 'atol': 'atol'}), '(a=arr, b=val, atol=atol, **kwargs)\n', (4873, 4908), True, 'import numpy as np\n'), ((8517, 8537), 'numpy.isnan', 'np.isnan', (['filterdata'], {}), '(filterdata)\n', (8525, 8537), True, 'import numpy as np\n'), ((14412, 14430), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (14427, 14430), True, 'import numpy as np\n'), ((23245, 23259), 'numpy.isnan', 'np.isnan', (['newy'], {}), '(newy)\n', (23253, 23259), True, 'import numpy as np\n'), ((23459, 23478), 'numpy.ones', 'np.ones', (['smoothness'], {}), '(smoothness)\n', (23466, 23478), True, 'import numpy as np\n'), ((23513, 23532), 'numpy.ones', 'np.ones', (['smoothness'], {}), '(smoothness)\n', (23520, 23532), True, 'import numpy as np\n'), ((24704, 24759), 'numpy.exp', 'np.exp', (['((newx[pastindices] - newx[firstpoint]) * growth)'], {}), '((newx[pastindices] - newx[firstpoint]) * growth)\n', (24710, 24759), True, 'import numpy as np\n'), ((24968, 25024), 'numpy.exp', 'np.exp', (['((newx[futureindices] - newx[lastpoint]) * growth)'], {}), '((newx[futureindices] - newx[lastpoint]) * growth)\n', (24974, 25024), True, 'import numpy as np\n'), ((25579, 25594), 'numpy.isnan', 'np.isnan', (['origy'], {}), '(origy)\n', (25587, 25594), True, 'import numpy as np\n'), ((26103, 26124), 'numpy.isnan', 'np.isnan', (['newminusinf'], {}), '(newminusinf)\n', (26111, 26124), True, 'import numpy as np\n'), ((26188, 26208), 'numpy.isnan', 'np.isnan', (['newplusinf'], {}), '(newplusinf)\n', (26196, 26208), True, 'import numpy as np\n'), ((26259, 26275), 'numpy.isnan', 'np.isnan', (['newnan'], {}), '(newnan)\n', (26267, 26275), True, 'import numpy as np\n'), ((5367, 5428), 'warnings.warn', 'warnings.warn', (['warnmsg'], {'category': 'RuntimeWarning', 'stacklevel': '(2)'}), '(warnmsg, category=RuntimeWarning, stacklevel=2)\n', (5380, 5428), False, 'import warnings\n'), ((9751, 9765), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (9759, 9765), True, 'import numpy as np\n'), ((10919, 10933), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (10927, 10933), True, 'import numpy as np\n'), ((14472, 14489), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (14486, 14489), True, 'import numpy as np\n'), ((19006, 19052), 'numpy.convolve', 'np.convolve', (['output[i, :]', 'kernel'], {'mode': '"""same"""'}), "(output[i, :], kernel, mode='same')\n", (19017, 19052), True, 'import numpy as np\n'), ((19111, 19157), 'numpy.convolve', 'np.convolve', (['output[:, j]', 'kernel'], {'mode': '"""same"""'}), "(output[:, j], kernel, mode='same')\n", (19122, 19157), True, 'import numpy as np\n'), ((23147, 23185), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(2 * smoothness + 1)'], {}), '(-2, 2, 2 * smoothness + 1)\n', (23158, 23185), True, 'import numpy as np\n'), ((27082, 27106), 'numpy.random.random', 'np.random.random', (['x.size'], {}), '(x.size)\n', (27098, 27106), True, 'import numpy as np\n'), ((11485, 11499), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (11493, 11499), True, 'import numpy as np\n'), ((27235, 27253), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (27251, 27253), True, 'import numpy as np\n'), ((9936, 9950), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (9944, 9950), True, 'import numpy as np\n'), ((23764, 23783), 'numpy.ones', 'np.ones', (['smoothness'], {}), '(smoothness)\n', (23771, 23783), True, 'import numpy as np\n'), ((23796, 23824), 'numpy.arange', 'np.arange', (['smoothness', '(0)', '(-1)'], {}), '(smoothness, 0, -1)\n', (23805, 23824), True, 'import numpy as np\n'), ((24058, 24077), 'numpy.ones', 'np.ones', (['smoothness'], {}), '(smoothness)\n', (24065, 24077), True, 'import numpy as np\n'), ((24088, 24119), 'numpy.arange', 'np.arange', (['(1)', '(smoothness + 1)', '(1)'], {}), '(1, smoothness + 1, 1)\n', (24097, 24119), True, 'import numpy as np\n')] |
import os
import numpy as np
from stompy.spatial import field
datadir=os.path.join( os.path.dirname(__file__), 'data')
#depth_bin_file = '/home/rusty/classes/research/spatialdata/us/ca/suntans/bathymetry/compiled2/final.bin'
def test_xyz():
depth_bin_file = os.path.join(datadir,'depth.xyz')
f = field.XYZText(fname=depth_bin_file)
f.build_index()
center = np.array([ 563379.6 , 4196117. ])
elev = f.inv_dist_interp(center,
min_n_closest=8,
min_radius=3900.0)
##
def test_lin_interp():
X=np.array([[0.,0.],[10.,0.],[10.,10.],[0.,10.]])
F=np.array([1.,2.,3.,4.])
f = field.XYZField(X=X,F=F)
elev = f.interpolate( [2,3] )
out=f.interpolate(X)
assert np.allclose(out,F)
| [
"stompy.spatial.field.XYZText",
"numpy.allclose",
"os.path.join",
"os.path.dirname",
"numpy.array",
"stompy.spatial.field.XYZField"
] | [((87, 112), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (102, 112), False, 'import os\n'), ((267, 301), 'os.path.join', 'os.path.join', (['datadir', '"""depth.xyz"""'], {}), "(datadir, 'depth.xyz')\n", (279, 301), False, 'import os\n'), ((310, 345), 'stompy.spatial.field.XYZText', 'field.XYZText', ([], {'fname': 'depth_bin_file'}), '(fname=depth_bin_file)\n', (323, 345), False, 'from stompy.spatial import field\n'), ((380, 411), 'numpy.array', 'np.array', (['[563379.6, 4196117.0]'], {}), '([563379.6, 4196117.0])\n', (388, 411), True, 'import numpy as np\n'), ((583, 645), 'numpy.array', 'np.array', (['[[0.0, 0.0], [10.0, 0.0], [10.0, 10.0], [0.0, 10.0]]'], {}), '([[0.0, 0.0], [10.0, 0.0], [10.0, 10.0], [0.0, 10.0]])\n', (591, 645), True, 'import numpy as np\n'), ((637, 667), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0, 4.0]'], {}), '([1.0, 2.0, 3.0, 4.0])\n', (645, 667), True, 'import numpy as np\n'), ((670, 694), 'stompy.spatial.field.XYZField', 'field.XYZField', ([], {'X': 'X', 'F': 'F'}), '(X=X, F=F)\n', (684, 694), False, 'from stompy.spatial import field\n'), ((766, 785), 'numpy.allclose', 'np.allclose', (['out', 'F'], {}), '(out, F)\n', (777, 785), True, 'import numpy as np\n')] |
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import kornia
from codes.models.resnet import resnet18
import matplotlib
from codes.models.region_proposal_network import RegionProposalNetwork
import cv2
from codes.EX_CONST import Const
import matplotlib.pyplot as plt
matplotlib.use('Agg')
class Reshape(nn.Module):
def __init__(self, *args):
super(Reshape, self).__init__()
self.shape = args
def forward(self, x):
return x.view(1, self.shape[0])
class PerspTransDetector(nn.Module):
def __init__(self, dataset = None):
super().__init__()
if dataset is not None:
self.num_cam = dataset.num_cam
self.img_shape, self.reducedgrid_shape = dataset.img_shape, dataset.reducedgrid_shape
imgcoord2worldgrid_matrices = self.get_imgcoord2worldgrid_matrices(dataset.base.intrinsic_matrices,
dataset.base.extrinsic_matrices,
dataset.base.worldgrid2worldcoord_mat)
self.coord_map = self.create_coord_map(self.reducedgrid_shape + [1])
# img
self.upsample_shape = list(map(lambda x: int(x / Const.reduce), self.img_shape))
img_reduce = np.array(self.img_shape) / np.array(self.upsample_shape)
img_zoom_mat = np.diag(np.append(img_reduce, [1]))
# map
map_zoom_mat = np.diag(np.append(np.ones([2]) / Const.reduce, [1]))
self.proj_mats = [torch.from_numpy(map_zoom_mat @ imgcoord2worldgrid_matrices[cam] @ img_zoom_mat)
for cam in range(self.num_cam)]
self.backbone = nn.Sequential(*list(resnet18(pretrained=True, replace_stride_with_dilation=[False, False, True]).children())[:-2]).cuda()
self.rpn = RegionProposalNetwork(in_channels=1026, mid_channels=1026, ratios=[0.9, 1.1], anchor_scales=[4]).cuda()
def forward(self, imgs,frame, gt_boxes = None, epoch = None, visualize=False, train = True, mark = None):
B, N, C, H, W = imgs.shape
world_features = []
img_featuremap = []
for cam in range(self.num_cam):
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
img_feature =self.backbone(imgs[:, cam].cuda())
img_feature = F.interpolate(img_feature, self.upsample_shape, mode='bilinear')
if cam == 0:
plt.imsave("img_norm_0.jpg", torch.norm(img_feature[0], dim=0).cpu().numpy())
else:
plt.imsave("img_norm_1.jpg", torch.norm(img_feature[0], dim=0).cpu().numpy())
img_featuremap.append(img_feature)
proj_mat = self.proj_mats[cam].repeat([B, 1, 1]).float().cuda()
world_feature = kornia.warp_perspective(img_feature.cuda(), proj_mat, self.reducedgrid_shape) # 0.0142 * 2 = 0.028
world_feature = kornia.vflip(world_feature)
if cam == 0:
plt.imsave("world_feature_0.jpg", torch.norm(world_feature[0], dim=0).cpu().numpy())
else:
plt.imsave("world_feature_1.jpg", torch.norm(world_feature[0], dim=0).cpu().numpy())
world_features.append(world_feature.cuda())
world_features = torch.cat(world_features + [self.coord_map.repeat([B, 1, 1, 1]).cuda()], dim=1)
plt.imsave("world_features.jpg", torch.norm(world_features[0], dim=0).cpu().numpy())
rpn_locs, rpn_scores, anchor, rois, roi_indices = self.rpn(world_features, Const.grid_size) # 0.08
return rpn_locs, rpn_scores, anchor, rois, roi_indices, img_featuremap, world_features
def get_imgcoord2worldgrid_matrices(self, intrinsic_matrices, extrinsic_matrices, worldgrid2worldcoord_mat):
projection_matrices = {}
for cam in range(self.num_cam):
worldcoord2imgcoord_mat = intrinsic_matrices[cam] @ np.delete(extrinsic_matrices[cam], 2, 1)
worldgrid2imgcoord_mat = worldcoord2imgcoord_mat @ worldgrid2worldcoord_mat
imgcoord2worldgrid_mat = np.linalg.inv(worldgrid2imgcoord_mat)
permutation_mat = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
projection_matrices[cam] = permutation_mat @ imgcoord2worldgrid_mat
return projection_matrices
def create_coord_map(self, img_size, with_r=False):
H, W, C = img_size
grid_x, grid_y = np.meshgrid(np.arange(W), np.arange(H))
grid_x = torch.from_numpy(grid_x / (W - 1) * 2 - 1).float()
grid_y = torch.from_numpy(grid_y / (H - 1) * 2 - 1).float()
ret = torch.stack([grid_x, grid_y], dim=0).unsqueeze(0)
if with_r:
rr = torch.sqrt(torch.pow(grid_x, 2) + torch.pow(grid_y, 2)).view([1, 1, H, W])
ret = torch.cat([ret, rr], dim=1)
return ret
def vis_feature(x, max_num=5, out_path='/home/dzc/Desktop/CASIA/proj/mvRPN-det/images/'):
for i in range(0, x.shape[1]):
if i >= max_num:
break
feature = x[0, i, :, :].view(x.shape[-2], x.shape[-1])
feature = feature.detach().cpu().numpy()
feature = 1.0 / (1 + np.exp(-1 * feature))
feature = np.round(feature * 255).astype(np.uint8)
feature_img = cv2.applyColorMap(feature, cv2.COLORMAP_JET)
dst_path = os.path.join(out_path, str(i) + '.jpg')
cv2.imwrite(dst_path, feature_img) | [
"codes.models.region_proposal_network.RegionProposalNetwork",
"torch.from_numpy",
"torch.pow",
"numpy.array",
"torch.nn.functional.interpolate",
"numpy.arange",
"numpy.delete",
"numpy.exp",
"numpy.round",
"kornia.vflip",
"numpy.ones",
"matplotlib.use",
"torch.norm",
"torch.cuda.empty_cache... | [((317, 338), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (331, 338), False, 'import matplotlib\n'), ((5343, 5387), 'cv2.applyColorMap', 'cv2.applyColorMap', (['feature', 'cv2.COLORMAP_JET'], {}), '(feature, cv2.COLORMAP_JET)\n', (5360, 5387), False, 'import cv2\n'), ((5455, 5489), 'cv2.imwrite', 'cv2.imwrite', (['dst_path', 'feature_img'], {}), '(dst_path, feature_img)\n', (5466, 5489), False, 'import cv2\n'), ((2450, 2514), 'torch.nn.functional.interpolate', 'F.interpolate', (['img_feature', 'self.upsample_shape'], {'mode': '"""bilinear"""'}), "(img_feature, self.upsample_shape, mode='bilinear')\n", (2463, 2514), True, 'import torch.nn.functional as F\n'), ((3029, 3056), 'kornia.vflip', 'kornia.vflip', (['world_feature'], {}), '(world_feature)\n', (3041, 3056), False, 'import kornia\n'), ((4178, 4215), 'numpy.linalg.inv', 'np.linalg.inv', (['worldgrid2imgcoord_mat'], {}), '(worldgrid2imgcoord_mat)\n', (4191, 4215), True, 'import numpy as np\n'), ((4246, 4289), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (4254, 4289), True, 'import numpy as np\n'), ((4526, 4538), 'numpy.arange', 'np.arange', (['W'], {}), '(W)\n', (4535, 4538), True, 'import numpy as np\n'), ((4540, 4552), 'numpy.arange', 'np.arange', (['H'], {}), '(H)\n', (4549, 4552), True, 'import numpy as np\n'), ((4883, 4910), 'torch.cat', 'torch.cat', (['[ret, rr]'], {'dim': '(1)'}), '([ret, rr], dim=1)\n', (4892, 4910), False, 'import torch\n'), ((1367, 1391), 'numpy.array', 'np.array', (['self.img_shape'], {}), '(self.img_shape)\n', (1375, 1391), True, 'import numpy as np\n'), ((1394, 1423), 'numpy.array', 'np.array', (['self.upsample_shape'], {}), '(self.upsample_shape)\n', (1402, 1423), True, 'import numpy as np\n'), ((1459, 1485), 'numpy.append', 'np.append', (['img_reduce', '[1]'], {}), '(img_reduce, [1])\n', (1468, 1485), True, 'import numpy as np\n'), ((1615, 1700), 'torch.from_numpy', 'torch.from_numpy', (['(map_zoom_mat @ imgcoord2worldgrid_matrices[cam] @ img_zoom_mat)'], {}), '(map_zoom_mat @ imgcoord2worldgrid_matrices[cam] @ img_zoom_mat\n )\n', (1631, 1700), False, 'import torch\n'), ((1924, 2025), 'codes.models.region_proposal_network.RegionProposalNetwork', 'RegionProposalNetwork', ([], {'in_channels': '(1026)', 'mid_channels': '(1026)', 'ratios': '[0.9, 1.1]', 'anchor_scales': '[4]'}), '(in_channels=1026, mid_channels=1026, ratios=[0.9, 1.1\n ], anchor_scales=[4])\n', (1945, 2025), False, 'from codes.models.region_proposal_network import RegionProposalNetwork\n'), ((2339, 2363), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (2361, 2363), False, 'import torch\n'), ((4011, 4051), 'numpy.delete', 'np.delete', (['extrinsic_matrices[cam]', '(2)', '(1)'], {}), '(extrinsic_matrices[cam], 2, 1)\n', (4020, 4051), True, 'import numpy as np\n'), ((4571, 4613), 'torch.from_numpy', 'torch.from_numpy', (['(grid_x / (W - 1) * 2 - 1)'], {}), '(grid_x / (W - 1) * 2 - 1)\n', (4587, 4613), False, 'import torch\n'), ((4639, 4681), 'torch.from_numpy', 'torch.from_numpy', (['(grid_y / (H - 1) * 2 - 1)'], {}), '(grid_y / (H - 1) * 2 - 1)\n', (4655, 4681), False, 'import torch\n'), ((4704, 4740), 'torch.stack', 'torch.stack', (['[grid_x, grid_y]'], {'dim': '(0)'}), '([grid_x, grid_y], dim=0)\n', (4715, 4740), False, 'import torch\n'), ((5240, 5260), 'numpy.exp', 'np.exp', (['(-1 * feature)'], {}), '(-1 * feature)\n', (5246, 5260), True, 'import numpy as np\n'), ((5280, 5303), 'numpy.round', 'np.round', (['(feature * 255)'], {}), '(feature * 255)\n', (5288, 5303), True, 'import numpy as np\n'), ((1550, 1562), 'numpy.ones', 'np.ones', (['[2]'], {}), '([2])\n', (1557, 1562), True, 'import numpy as np\n'), ((3504, 3540), 'torch.norm', 'torch.norm', (['world_features[0]'], {'dim': '(0)'}), '(world_features[0], dim=0)\n', (3514, 3540), False, 'import torch\n'), ((4801, 4821), 'torch.pow', 'torch.pow', (['grid_x', '(2)'], {}), '(grid_x, 2)\n', (4810, 4821), False, 'import torch\n'), ((4824, 4844), 'torch.pow', 'torch.pow', (['grid_y', '(2)'], {}), '(grid_y, 2)\n', (4833, 4844), False, 'import torch\n'), ((2586, 2619), 'torch.norm', 'torch.norm', (['img_feature[0]'], {'dim': '(0)'}), '(img_feature[0], dim=0)\n', (2596, 2619), False, 'import torch\n'), ((2698, 2731), 'torch.norm', 'torch.norm', (['img_feature[0]'], {'dim': '(0)'}), '(img_feature[0], dim=0)\n', (2708, 2731), False, 'import torch\n'), ((3132, 3167), 'torch.norm', 'torch.norm', (['world_feature[0]'], {'dim': '(0)'}), '(world_feature[0], dim=0)\n', (3142, 3167), False, 'import torch\n'), ((3251, 3286), 'torch.norm', 'torch.norm', (['world_feature[0]'], {'dim': '(0)'}), '(world_feature[0], dim=0)\n', (3261, 3286), False, 'import torch\n'), ((1803, 1879), 'codes.models.resnet.resnet18', 'resnet18', ([], {'pretrained': '(True)', 'replace_stride_with_dilation': '[False, False, True]'}), '(pretrained=True, replace_stride_with_dilation=[False, False, True])\n', (1811, 1879), False, 'from codes.models.resnet import resnet18\n')] |
from ..transformers.series_to_tabular import RandomIntervalSegmenter
from ..utils.testing import generate_df_from_array
from ..utils.transformations import tabularize
import pytest
import pandas as pd
import numpy as np
N_ITER = 10
# Test output format and dimensions.
def test_output_format_dim():
for n_cols in [1, 3]:
for n_rows in [1, 3]:
for n_obs in [2, 3]:
for n_intervals in [1, 3, 10, 'sqrt', 'random']:
X = generate_df_from_array(np.ones(n_obs), n_rows=n_rows, n_cols=n_cols)
trans = RandomIntervalSegmenter(n_intervals=n_intervals)
Xt = trans.fit_transform(X)
assert isinstance(Xt, (pd.DataFrame, pd.Series))
assert Xt.shape[0] == X.shape[0]
# Check that exception is raised for bad input args.
def test_bad_input_args():
bad_n_intervals = [0, 'abc', 1.0, -1]
for arg in bad_n_intervals:
with pytest.raises(ValueError):
RandomIntervalSegmenter(n_intervals=arg)
# Check if random state always gives same results
def test_random_state():
X = generate_df_from_array(np.random.normal(size=10))
random_state = 1234
trans = RandomIntervalSegmenter(n_intervals='random', random_state=random_state)
first_Xt = trans.fit_transform(X)
for _ in range(N_ITER):
trans = RandomIntervalSegmenter(n_intervals='random', random_state=random_state)
Xt = trans.fit_transform(X)
np.testing.assert_array_equal(tabularize(first_Xt).values, tabularize(Xt).values)
| [
"numpy.random.normal",
"pytest.raises",
"numpy.ones"
] | [((1155, 1180), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (1171, 1180), True, 'import numpy as np\n'), ((967, 992), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (980, 992), False, 'import pytest\n'), ((503, 517), 'numpy.ones', 'np.ones', (['n_obs'], {}), '(n_obs)\n', (510, 517), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import glob
import os
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Hiragino Maru Gothic Pro', 'Yu Gothic', 'Meirio', 'Takao', 'IPAexGothic', 'IPAPGothic', 'VL PGothic', 'Noto Sans CJK JP']
# In[2]:
# Make directory
#
# df_hospital_beds = pd.read_csv('data_Koro/hospital_beds.csv',index_col=0)
# dirnames = (df_hospital_beds['japan_prefecture_code']+df_hospital_beds['都道府県名']).values
# for i in range(len(dirnames)):
# path = 'resultD_transport_strategy_hospital/' + dirnames[i]
# os.makedirs(path, exist_ok=True)
# In[3]:
# MODE = 'all'
MODE = 'normal'
filenames = glob.glob('data_hospital/x_*')
filenames.sort()
forecast_dates = [filename.split('_')[-1].split('.')[0] for filename in filenames]
# In[ ]:
# In[18]:
def visualization(gamma,x_type,forecast_date):
print("forcasted date ={0}".format(f_date))
# 重みの入力
df_w = pd.read_csv('data_Kokudo/w_distance.csv',index_col=0)
W= df_w.values
w_pulp = W.T.reshape(-1)
# x, x_q0025も計算
df_x0975 = pd.read_csv('data_hospital/x0975_{0}.csv'.format(forecast_date),index_col=0 )
df_x0025 = pd.read_csv('data_hospital/x0025_{0}.csv'.format(forecast_date),index_col=0 )
df_xmean = pd.read_csv('data_hospital/x_{0}.csv'.format(forecast_date),index_col=0 )
gammas = np.load('data_hospital_transport/gammas_{0}_{1:03}_{2}.npy'.format(x_type,int(gamma*100),forecast_date))
x_mean = df_xmean.values
x_q0975 = df_x0975.values
x_q0025 = df_x0025.values
N = x_mean.shape[1]
T = x_mean.shape[0]
L = np.kron(np.ones((1,N)),np.eye(N)) - np.kron(np.eye(N),np.ones((1,N)))
uv = np.load('data_hospital_transport/u_{0}_{1:03}_{2}.npy'.format(x_type,int(gamma*100),forecast_date))
y_mean = np.zeros(x_mean.shape)
y_q0975 = np.zeros(x_mean.shape)
y_q0025 = np.zeros(x_mean.shape)
y_mean[0] = x_mean[0]
y_q0975[0] = x_q0975[0]
y_q0025[0] = x_q0025[0]
sum_u = np.zeros(T)
sum_cost = np.zeros(T)
for k in range(T-1):
y_mean[k+1] = y_mean[k] + x_mean[k+1] - x_mean[k] + L.dot(uv[k])
y_q0975[k+1] = y_q0975[k] + x_q0975[k+1] - x_q0975[k] + L.dot(uv[k])
y_q0025[k+1] = y_q0025[k] + x_q0025[k+1] - x_q0025[k] + L.dot(uv[k])
sum_u[k+1] = np.sum(uv[k])
sum_cost[k+1] = np.sum(w_pulp*uv[k])
# ベット数の入力
df_hospital_beds = pd.read_csv('data_Koro/hospital_beds.csv',index_col=0)
dirnames = (df_hospital_beds['japan_prefecture_code']+df_hospital_beds['都道府県名']).values
names = df_hospital_beds['都道府県名'].values
weeks = df_hospital_beds.columns[2:].values
new_week = max(weeks)
M = df_hospital_beds[new_week].values
times = pd.to_datetime(df_xmean.index)
date_s = min(times)
date_e = max(times)
# 全国の入院者数の予測値
plt.figure(figsize = (6,4))
plt.fill_between(times,x_q0025.sum(axis=1),x_q0975.sum(axis=1),facecolor = 'lime',alpha = 0.3,label = '95%信頼区間')
plt.plot(times,x_mean.sum(axis=1),'*-',color = 'lime',label = '平均値')
plt.plot([date_s,date_e],np.ones(2)*0.8*M.sum(),"--",label = '病床使用率 80%',color = 'red',linewidth = 2.0)
plt.plot([date_s,date_e],np.ones(2)*M.sum(),"--",label = '病床使用率 100%',color = 'purple',linewidth = 2.0)
plt.gca().tick_params(axis='x', rotation= -60)
plt.title('全国の入院者数の予測値, 予測日={0}'.format(forecast_date),fontsize = 15)
plt.xlim([date_s,date_e])
plt.ylim([0, 1.5* M.sum(),])
plt.ylabel('入院者数 [人]')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.grid()
plt.savefig('resultB_google_prediction/all_hospital_{0}.png'.format(forecast_date),bbox_inches='tight',dpi = 100)
if MODE == 'normal':
plt.savefig('resultB_google_prediction/all_hospital.png',bbox_inches='tight',dpi = 100)
plt.close()
# 県ごとの入院者数
plt.figure(figsize = (50,25))
plt.subplots_adjust(wspace=0.1, hspace=0.5)
for i in range(47):
plt.subplot(10,5,i+1)
plt.fill_between(times,x_q0025[:,i],x_q0975[:,i],facecolor = 'lime',alpha = 0.3,label = '95%信頼区間')
plt.plot(times,x_mean[:,i],'*-',color = 'lime',label = '平均値')
plt.plot([date_s,date_e],np.ones(2)*0.8*M[i],"--",label = '病床使用率 80%',color = 'red',linewidth = 2.0)
plt.plot([date_s,date_e],np.ones(2)*M[i],"--",label = '病床使用率 100%',color = 'purple',linewidth = 2.0)
plt.gca().tick_params(axis='x', rotation= -60)
plt.title(names[i],fontsize = 20)
plt.xlim([date_s,date_e])
plt.ylim([0, 1.5* M[i]])
plt.grid()
if i < 42:
plt.tick_params(labelbottom=False)
if i == 0:
plt.legend()
plt.savefig('resultB_google_prediction/each_hospital_{0}.png'.format(forecast_date),bbox_inches='tight',dpi = 100)
if MODE == 'normal':
plt.savefig('resultB_google_prediction/each_hospital.png',bbox_inches='tight',dpi = 100)
plt.close()
# 県ごとの感染者数の予測結果
plt.figure(figsize = (50,25))
plt.subplots_adjust(wspace=0.1, hspace=0.5)
for i in range(47):
plt.subplot(10,5,i+1)
max_beds = M[i]
# ベットの限界
plt.plot([date_s,date_e],[0.8*max_beds,0.8*max_beds],'--',label = '病床使用率80%',color = 'red',linewidth = 2.0)
plt.plot([date_s,date_e],[max_beds,max_beds],'--',label = '病床使用率100%',color = 'purple',linewidth = 2.0)
# 輸送なし
plt.fill_between(times,x_q0025[:,i],x_q0975[:,i],facecolor = 'lime',alpha = 0.5,label = '医療シェアリングなし',)
plt.plot(times,x_mean[:,i],"*-",linewidth = 2,color= 'lime')
# 輸送あり
plt.fill_between(times,y_q0025[:,i],y_q0975[:,i],facecolor = 'orange',alpha = 0.5,label = '医療シェアリングあり',)
plt.plot(times,y_mean[:,i],"*-",linewidth = 2,color = 'orange')
plt.xlim([date_s,date_e])
plt.ylim([0,1.5*max_beds])
plt.grid()
plt.gca().tick_params(axis='x', rotation= -60)
plt.title(names[i],fontsize = 20)
if i < 42:
plt.tick_params(labelbottom=False)
if i == 0:
plt.legend()
if MODE == 'normal':
plt.savefig('resultD_transport_strategy_hospital/main/each_severe_{0}_{1:03}.png'.format(x_type,int(gamma*100)),bbox_inches='tight',dpi = 100)
plt.savefig('resultD_transport_strategy_hospital/main/each_severe_{0}_{1:03}_{2}.png'.format(x_type,int(gamma*100),forecast_date),bbox_inches='tight',dpi = 100)
plt.close()
# コスト評価
times = pd.to_datetime(df_xmean.index)[:-1]
date_s = min(times)
date_e = max(times)
max_beds = M.sum()
# 輸送人数
plt.plot(times,sum_u[:-1],"*-",linewidth = 2,color= 'black',label = '入院者数')
plt.xlim([date_s,date_e])
plt.gca().tick_params(axis='x', rotation= -60)
# plt.title('',fontsize = 20)
plt.ylabel('毎日の医療シェアが必要な入院者の合計 [人]')
plt.legend()
if MODE == 'normal':
plt.savefig('resultD_transport_strategy_hospital/cost/num_{0}_{1:03}.png'.format(x_type,int(gamma*100)),bbox_inches='tight',dpi = 100)
plt.savefig('resultD_transport_strategy_hospital/cost/num_{0}_{1:03}_{2}.png'.format(x_type,int(gamma*100),forecast_date),bbox_inches='tight',dpi = 100)
plt.close()
times = pd.to_datetime(df_xmean.index)[:-1]
date_s = min(times)
date_e = max(times)
max_beds = M.sum()
# 輸送コスト
plt.plot(times,sum_cost[:-1],"*-",linewidth = 2,color= 'black',label = '医療シェアリングのコスト')
plt.xlim([date_s,date_e])
plt.gca().tick_params(axis='x', rotation= -60)
plt.legend()
plt.ylabel('毎日のコスト [km]')
if MODE == 'normal':
plt.savefig('resultD_transport_strategy_hospital/cost/cost_{0}_{1:03}.png'.format(x_type,int(gamma*100)),bbox_inches='tight',dpi = 100)
plt.savefig('resultD_transport_strategy_hospital/cost/cost_{0}_{1:03}_{2}.png'.format(x_type,int(gamma*100),forecast_date),bbox_inches='tight',dpi = 100)
plt.close()
times = pd.to_datetime(df_xmean.index)[:-1]
date_s = min(times)
date_e = max(times)
max_beds = M.sum()
# 輸送コスト
plt.plot(times,sum_cost[:-1]/sum_u[:-1],"*-",linewidth = 2,color= 'black',label = '入院者ごとの依頼コスト')
plt.xlim([date_s,date_e])
plt.gca().tick_params(axis='x', rotation= -60)
plt.legend()
plt.ylabel('入院者ごとの依頼コスト [km/人]')
if MODE == 'normal':
plt.savefig('resultD_transport_strategy_hospital/cost/performance_{0}_{1:03}.png'.format(x_type,int(gamma*100)),bbox_inches='tight',dpi = 100)
plt.savefig('resultD_transport_strategy_hospital/cost/performance_{0}_{1:03}_{2}.png'.format(x_type,int(gamma*100),forecast_date),bbox_inches='tight',dpi = 100)
plt.close()
times = pd.to_datetime(df_xmean.index)
plt.plot(times,gammas*100)
plt.gca().tick_params(axis='x', rotation= -60)
plt.ylabel('病床利用率の上限 [%]')
if MODE == 'normal':
plt.savefig('resultD_transport_strategy_hospital/cost/gammas_{0}_{1:03}.png'.format(x_type,int(gamma*100)),bbox_inches='tight',dpi = 300)
plt.savefig('resultD_transport_strategy_hospital/cost/gammas_{0}_{1:03}_{2}.png'.format(x_type,int(gamma*100),forecast_date),bbox_inches='tight',dpi = 300)
plt.close()
# 各県の搬送数
U = uv.reshape(T,N,N)
U_sum = np.zeros(U.shape)
U_sum[0] = U[0]
for i in range(U_sum.shape[0]-1):
U_sum[i+1] = U_sum[i] + U[i+1]
times_U = np.sum(U_sum>0,axis=0)
for target in range(N):
# if sum(U[:,target,:].sum(0)>0) >0:
plt.figure(figsize = (10,6))
times = pd.to_datetime(df_xmean.index)[:-1]
num_U=np.sum(times_U[target] !=0)
index_U = np.argsort(times_U[target,:])[::-1]
tmp_names = names[index_U[:num_U]]
for i in range(tmp_names.shape[0]):
out_target = U_sum[:-1,target,index_U[i]]
plt.plot(times,out_target,'-*',label = tmp_names[i])
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.grid()
plt.gca().tick_params(axis='x', rotation= -60)
plt.ylabel('地域間で医療シェアが必要な入院者数の合計 [人]')
plt.title(names[target]+'から他地域へのシェアリング')
if MODE == 'normal':
plt.savefig('resultD_transport_strategy_hospital/' + dirnames[target]+'/transport_{0}_{1:03}.png'.format(x_type,int(gamma*100)),bbox_inches='tight',dpi = 300)
plt.savefig('resultD_transport_strategy_hospital/' + dirnames[target]+'/transport_{0}_{1:03}_{2}.png'.format(x_type,int(gamma*100),forecast_date),bbox_inches='tight',dpi = 300)
plt.close()
# In[19]:
# 重傷者病床上限率の設定
gamma_list = [0.8,1.0]
# 使う条件の設定
x_type_list = ['upper','mean']
if MODE == 'all':
for f_date in forecast_dates:
for gamma in gamma_list:
for x_type in x_type_list:
visualization(gamma,x_type,f_date)
elif MODE == 'normal':
f_date = max(forecast_dates)
for gamma in gamma_list:
for x_type in x_type_list:
visualization(gamma,x_type,f_date)
| [
"matplotlib.pyplot.grid",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.fill_between",
"numpy.argsort",
"pandas.to_datetime",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.ylim",
"glob.glob",
"numpy.eye",
"matplotlib.pyplot.savefig",
"numpy.ones"... | [((764, 794), 'glob.glob', 'glob.glob', (['"""data_hospital/x_*"""'], {}), "('data_hospital/x_*')\n", (773, 794), False, 'import glob\n'), ((1050, 1104), 'pandas.read_csv', 'pd.read_csv', (['"""data_Kokudo/w_distance.csv"""'], {'index_col': '(0)'}), "('data_Kokudo/w_distance.csv', index_col=0)\n", (1061, 1104), True, 'import pandas as pd\n'), ((1918, 1940), 'numpy.zeros', 'np.zeros', (['x_mean.shape'], {}), '(x_mean.shape)\n', (1926, 1940), True, 'import numpy as np\n'), ((1955, 1977), 'numpy.zeros', 'np.zeros', (['x_mean.shape'], {}), '(x_mean.shape)\n', (1963, 1977), True, 'import numpy as np\n'), ((1993, 2015), 'numpy.zeros', 'np.zeros', (['x_mean.shape'], {}), '(x_mean.shape)\n', (2001, 2015), True, 'import numpy as np\n'), ((2112, 2123), 'numpy.zeros', 'np.zeros', (['T'], {}), '(T)\n', (2120, 2123), True, 'import numpy as np\n'), ((2139, 2150), 'numpy.zeros', 'np.zeros', (['T'], {}), '(T)\n', (2147, 2150), True, 'import numpy as np\n'), ((2526, 2581), 'pandas.read_csv', 'pd.read_csv', (['"""data_Koro/hospital_beds.csv"""'], {'index_col': '(0)'}), "('data_Koro/hospital_beds.csv', index_col=0)\n", (2537, 2581), True, 'import pandas as pd\n'), ((2850, 2880), 'pandas.to_datetime', 'pd.to_datetime', (['df_xmean.index'], {}), '(df_xmean.index)\n', (2864, 2880), True, 'import pandas as pd\n'), ((2954, 2980), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (2964, 2980), True, 'import matplotlib.pyplot as plt\n'), ((3521, 3547), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[date_s, date_e]'], {}), '([date_s, date_e])\n', (3529, 3547), True, 'import matplotlib.pyplot as plt\n'), ((3584, 3606), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""入院者数 [人]"""'], {}), "('入院者数 [人]')\n", (3594, 3606), True, 'import matplotlib.pyplot as plt\n'), ((3611, 3682), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '"""upper left"""', 'borderaxespad': '(0)'}), "(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)\n", (3621, 3682), True, 'import matplotlib.pyplot as plt\n'), ((3687, 3697), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3695, 3697), True, 'import matplotlib.pyplot as plt\n'), ((3943, 3954), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3952, 3954), True, 'import matplotlib.pyplot as plt\n'), ((3975, 4003), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(50, 25)'}), '(figsize=(50, 25))\n', (3985, 4003), True, 'import matplotlib.pyplot as plt\n'), ((4010, 4053), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.1)', 'hspace': '(0.5)'}), '(wspace=0.1, hspace=0.5)\n', (4029, 4053), True, 'import matplotlib.pyplot as plt\n'), ((5048, 5059), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5057, 5059), True, 'import matplotlib.pyplot as plt\n'), ((5094, 5122), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(50, 25)'}), '(figsize=(50, 25))\n', (5104, 5122), True, 'import matplotlib.pyplot as plt\n'), ((5129, 5172), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.1)', 'hspace': '(0.5)'}), '(wspace=0.1, hspace=0.5)\n', (5148, 5172), True, 'import matplotlib.pyplot as plt\n'), ((6535, 6546), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6544, 6546), True, 'import matplotlib.pyplot as plt\n'), ((6697, 6772), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'sum_u[:-1]', '"""*-"""'], {'linewidth': '(2)', 'color': '"""black"""', 'label': '"""入院者数"""'}), "(times, sum_u[:-1], '*-', linewidth=2, color='black', label='入院者数')\n", (6705, 6772), True, 'import matplotlib.pyplot as plt\n'), ((6779, 6805), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[date_s, date_e]'], {}), '([date_s, date_e])\n', (6787, 6805), True, 'import matplotlib.pyplot as plt\n'), ((6895, 6931), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""毎日の医療シェアが必要な入院者の合計 [人]"""'], {}), "('毎日の医療シェアが必要な入院者の合計 [人]')\n", (6905, 6931), True, 'import matplotlib.pyplot as plt\n'), ((6937, 6949), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6947, 6949), True, 'import matplotlib.pyplot as plt\n'), ((7279, 7290), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7288, 7290), True, 'import matplotlib.pyplot as plt\n'), ((7430, 7521), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'sum_cost[:-1]', '"""*-"""'], {'linewidth': '(2)', 'color': '"""black"""', 'label': '"""医療シェアリングのコスト"""'}), "(times, sum_cost[:-1], '*-', linewidth=2, color='black', label=\n '医療シェアリングのコスト')\n", (7438, 7521), True, 'import matplotlib.pyplot as plt\n'), ((7523, 7549), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[date_s, date_e]'], {}), '([date_s, date_e])\n', (7531, 7549), True, 'import matplotlib.pyplot as plt\n'), ((7606, 7618), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7616, 7618), True, 'import matplotlib.pyplot as plt\n'), ((7624, 7649), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""毎日のコスト [km]"""'], {}), "('毎日のコスト [km]')\n", (7634, 7649), True, 'import matplotlib.pyplot as plt\n'), ((7981, 7992), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7990, 7992), True, 'import matplotlib.pyplot as plt\n'), ((8132, 8235), 'matplotlib.pyplot.plot', 'plt.plot', (['times', '(sum_cost[:-1] / sum_u[:-1])', '"""*-"""'], {'linewidth': '(2)', 'color': '"""black"""', 'label': '"""入院者ごとの依頼コスト"""'}), "(times, sum_cost[:-1] / sum_u[:-1], '*-', linewidth=2, color=\n 'black', label='入院者ごとの依頼コスト')\n", (8140, 8235), True, 'import matplotlib.pyplot as plt\n'), ((8235, 8261), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[date_s, date_e]'], {}), '([date_s, date_e])\n', (8243, 8261), True, 'import matplotlib.pyplot as plt\n'), ((8317, 8329), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8327, 8329), True, 'import matplotlib.pyplot as plt\n'), ((8335, 8367), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""入院者ごとの依頼コスト [km/人]"""'], {}), "('入院者ごとの依頼コスト [km/人]')\n", (8345, 8367), True, 'import matplotlib.pyplot as plt\n'), ((8714, 8725), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8723, 8725), True, 'import matplotlib.pyplot as plt\n'), ((8740, 8770), 'pandas.to_datetime', 'pd.to_datetime', (['df_xmean.index'], {}), '(df_xmean.index)\n', (8754, 8770), True, 'import pandas as pd\n'), ((8776, 8805), 'matplotlib.pyplot.plot', 'plt.plot', (['times', '(gammas * 100)'], {}), '(times, gammas * 100)\n', (8784, 8805), True, 'import matplotlib.pyplot as plt\n'), ((8858, 8884), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""病床利用率の上限 [%]"""'], {}), "('病床利用率の上限 [%]')\n", (8868, 8884), True, 'import matplotlib.pyplot as plt\n'), ((9221, 9232), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9230, 9232), True, 'import matplotlib.pyplot as plt\n'), ((9288, 9305), 'numpy.zeros', 'np.zeros', (['U.shape'], {}), '(U.shape)\n', (9296, 9305), True, 'import numpy as np\n'), ((9418, 9443), 'numpy.sum', 'np.sum', (['(U_sum > 0)'], {'axis': '(0)'}), '(U_sum > 0, axis=0)\n', (9424, 9443), True, 'import numpy as np\n'), ((2428, 2441), 'numpy.sum', 'np.sum', (['uv[k]'], {}), '(uv[k])\n', (2434, 2441), True, 'import numpy as np\n'), ((2466, 2488), 'numpy.sum', 'np.sum', (['(w_pulp * uv[k])'], {}), '(w_pulp * uv[k])\n', (2472, 2488), True, 'import numpy as np\n'), ((3850, 3942), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""resultB_google_prediction/all_hospital.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(100)'}), "('resultB_google_prediction/all_hospital.png', bbox_inches=\n 'tight', dpi=100)\n", (3861, 3942), True, 'import matplotlib.pyplot as plt\n'), ((4087, 4112), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(10)', '(5)', '(i + 1)'], {}), '(10, 5, i + 1)\n', (4098, 4112), True, 'import matplotlib.pyplot as plt\n'), ((4118, 4221), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['times', 'x_q0025[:, i]', 'x_q0975[:, i]'], {'facecolor': '"""lime"""', 'alpha': '(0.3)', 'label': '"""95%信頼区間"""'}), "(times, x_q0025[:, i], x_q0975[:, i], facecolor='lime',\n alpha=0.3, label='95%信頼区間')\n", (4134, 4221), True, 'import matplotlib.pyplot as plt\n'), ((4225, 4287), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'x_mean[:, i]', '"""*-"""'], {'color': '"""lime"""', 'label': '"""平均値"""'}), "(times, x_mean[:, i], '*-', color='lime', label='平均値')\n", (4233, 4287), True, 'import matplotlib.pyplot as plt\n'), ((4571, 4603), 'matplotlib.pyplot.title', 'plt.title', (['names[i]'], {'fontsize': '(20)'}), '(names[i], fontsize=20)\n', (4580, 4603), True, 'import matplotlib.pyplot as plt\n'), ((4613, 4639), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[date_s, date_e]'], {}), '([date_s, date_e])\n', (4621, 4639), True, 'import matplotlib.pyplot as plt\n'), ((4647, 4672), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1.5 * M[i]]'], {}), '([0, 1.5 * M[i]])\n', (4655, 4672), True, 'import matplotlib.pyplot as plt\n'), ((4680, 4690), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4688, 4690), True, 'import matplotlib.pyplot as plt\n'), ((4955, 5048), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""resultB_google_prediction/each_hospital.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(100)'}), "('resultB_google_prediction/each_hospital.png', bbox_inches=\n 'tight', dpi=100)\n", (4966, 5048), True, 'import matplotlib.pyplot as plt\n'), ((5205, 5230), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(10)', '(5)', '(i + 1)'], {}), '(10, 5, i + 1)\n', (5216, 5230), True, 'import matplotlib.pyplot as plt\n'), ((5273, 5390), 'matplotlib.pyplot.plot', 'plt.plot', (['[date_s, date_e]', '[0.8 * max_beds, 0.8 * max_beds]', '"""--"""'], {'label': '"""病床使用率80%"""', 'color': '"""red"""', 'linewidth': '(2.0)'}), "([date_s, date_e], [0.8 * max_beds, 0.8 * max_beds], '--', label=\n '病床使用率80%', color='red', linewidth=2.0)\n", (5281, 5390), True, 'import matplotlib.pyplot as plt\n'), ((5389, 5497), 'matplotlib.pyplot.plot', 'plt.plot', (['[date_s, date_e]', '[max_beds, max_beds]', '"""--"""'], {'label': '"""病床使用率100%"""', 'color': '"""purple"""', 'linewidth': '(2.0)'}), "([date_s, date_e], [max_beds, max_beds], '--', label='病床使用率100%',\n color='purple', linewidth=2.0)\n", (5397, 5497), True, 'import matplotlib.pyplot as plt\n'), ((5512, 5618), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['times', 'x_q0025[:, i]', 'x_q0975[:, i]'], {'facecolor': '"""lime"""', 'alpha': '(0.5)', 'label': '"""医療シェアリングなし"""'}), "(times, x_q0025[:, i], x_q0975[:, i], facecolor='lime',\n alpha=0.5, label='医療シェアリングなし')\n", (5528, 5618), True, 'import matplotlib.pyplot as plt\n'), ((5627, 5689), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'x_mean[:, i]', '"""*-"""'], {'linewidth': '(2)', 'color': '"""lime"""'}), "(times, x_mean[:, i], '*-', linewidth=2, color='lime')\n", (5635, 5689), True, 'import matplotlib.pyplot as plt\n'), ((5709, 5817), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['times', 'y_q0025[:, i]', 'y_q0975[:, i]'], {'facecolor': '"""orange"""', 'alpha': '(0.5)', 'label': '"""医療シェアリングあり"""'}), "(times, y_q0025[:, i], y_q0975[:, i], facecolor='orange',\n alpha=0.5, label='医療シェアリングあり')\n", (5725, 5817), True, 'import matplotlib.pyplot as plt\n'), ((5826, 5890), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'y_mean[:, i]', '"""*-"""'], {'linewidth': '(2)', 'color': '"""orange"""'}), "(times, y_mean[:, i], '*-', linewidth=2, color='orange')\n", (5834, 5890), True, 'import matplotlib.pyplot as plt\n'), ((5901, 5927), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[date_s, date_e]'], {}), '([date_s, date_e])\n', (5909, 5927), True, 'import matplotlib.pyplot as plt\n'), ((5935, 5964), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1.5 * max_beds]'], {}), '([0, 1.5 * max_beds])\n', (5943, 5964), True, 'import matplotlib.pyplot as plt\n'), ((5970, 5980), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5978, 5980), True, 'import matplotlib.pyplot as plt\n'), ((6044, 6076), 'matplotlib.pyplot.title', 'plt.title', (['names[i]'], {'fontsize': '(20)'}), '(names[i], fontsize=20)\n', (6053, 6076), True, 'import matplotlib.pyplot as plt\n'), ((6573, 6603), 'pandas.to_datetime', 'pd.to_datetime', (['df_xmean.index'], {}), '(df_xmean.index)\n', (6587, 6603), True, 'import pandas as pd\n'), ((7305, 7335), 'pandas.to_datetime', 'pd.to_datetime', (['df_xmean.index'], {}), '(df_xmean.index)\n', (7319, 7335), True, 'import pandas as pd\n'), ((8007, 8037), 'pandas.to_datetime', 'pd.to_datetime', (['df_xmean.index'], {}), '(df_xmean.index)\n', (8021, 8037), True, 'import pandas as pd\n'), ((9524, 9551), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (9534, 9551), True, 'import matplotlib.pyplot as plt\n'), ((9621, 9649), 'numpy.sum', 'np.sum', (['(times_U[target] != 0)'], {}), '(times_U[target] != 0)\n', (9627, 9649), True, 'import numpy as np\n'), ((9920, 9991), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '"""upper left"""', 'borderaxespad': '(0)'}), "(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)\n", (9930, 9991), True, 'import matplotlib.pyplot as plt\n'), ((10000, 10010), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (10008, 10010), True, 'import matplotlib.pyplot as plt\n'), ((10075, 10113), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""地域間で医療シェアが必要な入院者数の合計 [人]"""'], {}), "('地域間で医療シェアが必要な入院者数の合計 [人]')\n", (10085, 10113), True, 'import matplotlib.pyplot as plt\n'), ((10122, 10164), 'matplotlib.pyplot.title', 'plt.title', (["(names[target] + 'から他地域へのシェアリング')"], {}), "(names[target] + 'から他地域へのシェアリング')\n", (10131, 10164), True, 'import matplotlib.pyplot as plt\n'), ((10556, 10567), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10565, 10567), True, 'import matplotlib.pyplot as plt\n'), ((1727, 1742), 'numpy.ones', 'np.ones', (['(1, N)'], {}), '((1, N))\n', (1734, 1742), True, 'import numpy as np\n'), ((1742, 1751), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (1748, 1751), True, 'import numpy as np\n'), ((1763, 1772), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (1769, 1772), True, 'import numpy as np\n'), ((1773, 1788), 'numpy.ones', 'np.ones', (['(1, N)'], {}), '((1, N))\n', (1780, 1788), True, 'import numpy as np\n'), ((3312, 3322), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (3319, 3322), True, 'import numpy as np\n'), ((3396, 3405), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3403, 3405), True, 'import matplotlib.pyplot as plt\n'), ((4723, 4757), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelbottom': '(False)'}), '(labelbottom=False)\n', (4738, 4757), True, 'import matplotlib.pyplot as plt\n'), ((4789, 4801), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4799, 4801), True, 'import matplotlib.pyplot as plt\n'), ((6109, 6143), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelbottom': '(False)'}), '(labelbottom=False)\n', (6124, 6143), True, 'import matplotlib.pyplot as plt\n'), ((6175, 6187), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6185, 6187), True, 'import matplotlib.pyplot as plt\n'), ((6810, 6819), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6817, 6819), True, 'import matplotlib.pyplot as plt\n'), ((7554, 7563), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7561, 7563), True, 'import matplotlib.pyplot as plt\n'), ((8266, 8275), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8273, 8275), True, 'import matplotlib.pyplot as plt\n'), ((8807, 8816), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8814, 8816), True, 'import matplotlib.pyplot as plt\n'), ((9571, 9601), 'pandas.to_datetime', 'pd.to_datetime', (['df_xmean.index'], {}), '(df_xmean.index)\n', (9585, 9601), True, 'import pandas as pd\n'), ((9667, 9697), 'numpy.argsort', 'np.argsort', (['times_U[target, :]'], {}), '(times_U[target, :])\n', (9677, 9697), True, 'import numpy as np\n'), ((9857, 9910), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'out_target', '"""-*"""'], {'label': 'tmp_names[i]'}), "(times, out_target, '-*', label=tmp_names[i])\n", (9865, 9910), True, 'import matplotlib.pyplot as plt\n'), ((3204, 3214), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (3211, 3214), True, 'import numpy as np\n'), ((4430, 4440), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (4437, 4440), True, 'import numpy as np\n'), ((4516, 4525), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4523, 4525), True, 'import matplotlib.pyplot as plt\n'), ((5989, 5998), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5996, 5998), True, 'import matplotlib.pyplot as plt\n'), ((10019, 10028), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10026, 10028), True, 'import matplotlib.pyplot as plt\n'), ((4321, 4331), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (4328, 4331), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import tools.processing as pre
text = pre.get_text("data/ref_text2.txt")
sentences = text.replace("\n", ";")
vocab = pre.Vocabulary(sentences)
embedding_dimension = 3
word2index_map = {}
index = 0
# for sent in sentences:
# for word in sent.lower().split():
# if word not in word2index_map:
# word2index_map[word] = index
# index += 1
#index2word_map = {index: word for word, index in word2index_map.items()}
index2word_map = vocab.index2word_map
word2index_map = vocab._dict
vocabulary_size = len(index2word_map)
tf.reset_default_graph()
with tf.name_scope("embeddings"):
embeddings = tf.get_variable("embedding", shape=[vocabulary_size, embedding_dimension])
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))
normalized_embeddings = embeddings / norm
saver = tf.train.Saver(var_list = {"embeddings": embeddings})
import sys
# Later, launch the model, use the saver to restore variables from disk, and
# do some work with the model.
with tf.Session() as sess:
# Restore variables from disk.
# saver.restore(sess, "logs/word2vec_intro/final_embeddings.ckpt")
saver.restore(sess, "logs/word2vec_intro/embeddings.ckpt-" + sys.argv[1])
#print(vars_in_checkpoint)
print("Model restored.")
normalized_embeddings_matrix = sess.run(normalized_embeddings)
ref_word = normalized_embeddings_matrix[word2index_map[sys.argv[2]]]
cosine_dists = np.dot(normalized_embeddings_matrix, ref_word)
ff = np.argsort(cosine_dists)[::-1][0:10]
for f in ff:
print(index2word_map[f])
print(cosine_dists[f])
| [
"tools.processing.Vocabulary",
"tensorflow.reset_default_graph",
"tensorflow.get_variable",
"tensorflow.Session",
"tensorflow.train.Saver",
"tools.processing.get_text",
"numpy.argsort",
"numpy.dot",
"tensorflow.name_scope",
"tensorflow.square"
] | [((82, 116), 'tools.processing.get_text', 'pre.get_text', (['"""data/ref_text2.txt"""'], {}), "('data/ref_text2.txt')\n", (94, 116), True, 'import tools.processing as pre\n'), ((161, 186), 'tools.processing.Vocabulary', 'pre.Vocabulary', (['sentences'], {}), '(sentences)\n', (175, 186), True, 'import tools.processing as pre\n'), ((602, 626), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (624, 626), True, 'import tensorflow as tf\n'), ((877, 928), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': "{'embeddings': embeddings}"}), "(var_list={'embeddings': embeddings})\n", (891, 928), True, 'import tensorflow as tf\n'), ((633, 660), 'tensorflow.name_scope', 'tf.name_scope', (['"""embeddings"""'], {}), "('embeddings')\n", (646, 660), True, 'import tensorflow as tf\n'), ((679, 753), 'tensorflow.get_variable', 'tf.get_variable', (['"""embedding"""'], {'shape': '[vocabulary_size, embedding_dimension]'}), "('embedding', shape=[vocabulary_size, embedding_dimension])\n", (694, 753), True, 'import tensorflow as tf\n'), ((1057, 1069), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1067, 1069), True, 'import tensorflow as tf\n'), ((1490, 1536), 'numpy.dot', 'np.dot', (['normalized_embeddings_matrix', 'ref_word'], {}), '(normalized_embeddings_matrix, ref_word)\n', (1496, 1536), True, 'import numpy as np\n'), ((784, 805), 'tensorflow.square', 'tf.square', (['embeddings'], {}), '(embeddings)\n', (793, 805), True, 'import tensorflow as tf\n'), ((1546, 1570), 'numpy.argsort', 'np.argsort', (['cosine_dists'], {}), '(cosine_dists)\n', (1556, 1570), True, 'import numpy as np\n')] |
import numpy as np
from specklepy.utils.box import Box
class SubWindow(Box):
@classmethod
def from_str(cls, s=None, full=None, order='yx'):
# Create full box window if no string provided
if s is None:
return cls(indexes=None)
# Unravel coordinates from string
indexes = cls.unravel_string(s=s, order=order)
# Provide indexes relative to a full window
if full is not None:
full = cls.unravel_string(s=full, order=order)
indexes = indexes - full
indexes = np.where(indexes == 0, None, indexes) # Avoid IndexErrors by substituting the edges by Nones
return cls(indexes=indexes)
@staticmethod
def unravel_string(s, order='yx'):
s = s.replace(' ', '')
s = s.replace('[', '')
s = s.replace(']', '')
if order == 'xy':
x_intv, y_intv = s.split(',')
elif order == 'yx':
y_intv, x_intv = s.split(',')
else:
raise ValueError(f"Order {order!r} not understood in unraveling sub-window indexes!")
x_min, x_max = x_intv.split(':')
y_min, y_max = y_intv.split(':')
return np.array([x_min, x_max, y_min, y_max]).astype(int)
# def switch_to_zero_based_indexing(self):
# self.x_min -= 1
# # self.x_max -= 1
# self.y_min -= 1
# # self.y_max -= 1
| [
"numpy.where",
"numpy.array"
] | [((565, 602), 'numpy.where', 'np.where', (['(indexes == 0)', 'None', 'indexes'], {}), '(indexes == 0, None, indexes)\n', (573, 602), True, 'import numpy as np\n'), ((1194, 1232), 'numpy.array', 'np.array', (['[x_min, x_max, y_min, y_max]'], {}), '([x_min, x_max, y_min, y_max])\n', (1202, 1232), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.