code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# ---------------------------
# <NAME>, <NAME>, <NAME> -- 2019
# The University of Oxford, The Alan Turing Institute
# contact: <EMAIL>, <EMAIL>, <EMAIL>
# ---------------------------
from src_tf2.data_processing.utils import load_dataset
from src_tf2.GNIs import GNIs
import tensorflow as tf
from absl import flags
import os
import pickle
from tensorflow.python import debug as tf_debug
import json
import numpy as np
tf.compat.v1.set_random_seed(0)
np.random.seed(0)
flags.DEFINE_string("loss", default='mse', help="loss, cross_entropy or mse")
flags.DEFINE_string(
"dataset",
default='house_prices',
help="Dataset, choice 'mnist', 'fmnist', 'cifar10','svhn'.")
flags.DEFINE_integer("n_epochs",
default=100,
help="Number of training epochs to run.")
flags.DEFINE_integer("n_samples", default=1, help="Number of samples to draw.")
flags.DEFINE_float("learning_rate",
default=0.001,
help="Initial learning rate.")
flags.DEFINE_float("L2", default=0.0, help="L2 penalisation on weights.")
flags.DEFINE_integer("N",
default=2,
help="Number of hidden layers in discriminant network")
flags.DEFINE_multi_integer(
"non_targeted_layers",
default=[],
help="Layers for which we do not add GNIs. Layer 0 refers to data layer.")
flags.DEFINE_integer("H",
default=512,
help="Size of hidden layers in discriminant network")
flags.DEFINE_bool("dropout",
default=True,
help="Dropout for hidden layers AND input")
flags.DEFINE_float("var", default=1.0, help="GNI variance")
flags.DEFINE_string("activation",
default="linear",
help="Activation function for all hidden layers.")
flags.DEFINE_string("noise_type",
default=None,
help="Noise type for model, input, gradient, None")
flags.DEFINE_string("noise_mode",
default='add',
help="Noise node for model, add, mult, None")
flags.DEFINE_string("noise_dist", default="Normal", help="Noise dist")
flags.DEFINE_string("run_name", default='run', help="name of run")
flags.DEFINE_integer("B", default=512, help="Batch size.")
flags.DEFINE_string(
"data_dir",
default=os.path.join(os.getenv("TEST_TMPDIR", "/tmp"), "vae/data"),
help="Directory where data is stored (if using real data).")
flags.DEFINE_bool(
"calc_hessian",
default=False,
help="If true, calculates the trace of the hessian for each layer.")
flags.DEFINE_bool("input_inject",
default=False,
help="If true, only injects noise into data layer.")
flags.DEFINE_string(
"debug",
default="",
help="If tensorboard, connects to tensorboard debug. Else CLI")
flags.DEFINE_string("disc_type",
default='mlp',
help="type of discriminator to use, convnet or mlp")
FLAGS = flags.FLAGS
def set_up_estimator(estimator,
params,
steps,
train_input_fn,
eval_input_fn,
config,
warm_start_dir=None):
estimator = tf.estimator.Estimator(estimator,
params=params,
config=tf.estimator.RunConfig(**config),
warm_start_from=warm_start_dir)
# --- We force the graph to finalize because of some memory leaks we had.
tf.get_default_graph().finalize()
# --- Setup our train and eval specs.
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
max_steps=steps,
hooks=[])
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn,
steps=None,
start_delay_secs=0,
throttle_secs=1)
return estimator, train_spec, eval_spec
def main(argv):
del argv # unused
params = FLAGS.flag_values_dict()
# --- Get rain and eval functions
train_input_fn, eval_input_fn, params['image_shape'], n_d = load_dataset(
params['dataset'], params)
# --- Calculate number of steps per epoch
params["epoch_steps"] = int(float(n_d) / float(params["B"])) + 1
# --- Calculate total number of steps
params["max_steps"] = int(params["n_epochs"]) * params["epoch_steps"]
if params["disc_type"] == 'mlp':
warm_start_dir = './init_weights/' + str(params['N']) + '_' + str(
params['H']) + '_' + params["dataset"]
elif params["disc_type"] == 'convnet':
warm_start_dir = './init_weights/conv_' + params["dataset"]
elif params["disc_type"] == 'vgg':
warm_start_dir = './init_weights/vgg_' + params["dataset"]
config = dict(save_checkpoints_steps=10 * params["epoch_steps"],
keep_checkpoint_max=1,
save_summary_steps=params["epoch_steps"],
model_dir=warm_start_dir)
# --------------------------- Init Model -------------------------------------
# --- Run for 1 step to dump out the model to the warm_start_dir if it
# --- doesn't already exist
print("--------- Initialising model ------------")
tf.estimator.train_and_evaluate(*set_up_estimator(
GNIs, params, 1, train_input_fn, eval_input_fn, config))
config['model_dir'] = './checkpoints/' + params["run_name"]
if not os.path.exists(config["model_dir"]):
os.makedirs(config["model_dir"])
with open(config["model_dir"] + '/params.json', 'w+') as fp:
json.dump(params, fp)
estimator, train_spec, eval_spec = set_up_estimator(
GradientNoiseInjectLR2,
params,
params["max_steps"],
train_input_fn,
eval_input_fn,
config,
warm_start_dir=warm_start_dir)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# --- We force the graph to finalize because of some memory leaks we had.
tf.compat.v1.get_default_graph().finalize()
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
if __name__ == "__main__":
tf.compat.v1.app.run()
| [
"numpy.random.seed",
"tensorflow.estimator.TrainSpec",
"tensorflow.get_default_graph",
"tensorflow.compat.v1.app.run",
"absl.flags.DEFINE_bool",
"os.path.exists",
"src_tf2.data_processing.utils.load_dataset",
"absl.flags.DEFINE_integer",
"absl.flags.DEFINE_float",
"json.dump",
"tensorflow.estima... | [((423, 454), 'tensorflow.compat.v1.set_random_seed', 'tf.compat.v1.set_random_seed', (['(0)'], {}), '(0)\n', (451, 454), True, 'import tensorflow as tf\n'), ((455, 472), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (469, 472), True, 'import numpy as np\n'), ((474, 551), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""loss"""'], {'default': '"""mse"""', 'help': '"""loss, cross_entropy or mse"""'}), "('loss', default='mse', help='loss, cross_entropy or mse')\n", (493, 551), False, 'from absl import flags\n'), ((552, 672), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""dataset"""'], {'default': '"""house_prices"""', 'help': '"""Dataset, choice \'mnist\', \'fmnist\', \'cifar10\',\'svhn\'."""'}), '(\'dataset\', default=\'house_prices\', help=\n "Dataset, choice \'mnist\', \'fmnist\', \'cifar10\',\'svhn\'.")\n', (571, 672), False, 'from absl import flags\n'), ((681, 773), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""n_epochs"""'], {'default': '(100)', 'help': '"""Number of training epochs to run."""'}), "('n_epochs', default=100, help=\n 'Number of training epochs to run.')\n", (701, 773), False, 'from absl import flags\n'), ((811, 890), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""n_samples"""'], {'default': '(1)', 'help': '"""Number of samples to draw."""'}), "('n_samples', default=1, help='Number of samples to draw.')\n", (831, 890), False, 'from absl import flags\n'), ((891, 977), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""learning_rate"""'], {'default': '(0.001)', 'help': '"""Initial learning rate."""'}), "('learning_rate', default=0.001, help=\n 'Initial learning rate.')\n", (909, 977), False, 'from absl import flags\n'), ((1011, 1084), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""L2"""'], {'default': '(0.0)', 'help': '"""L2 penalisation on weights."""'}), "('L2', default=0.0, help='L2 penalisation on weights.')\n", (1029, 1084), False, 'from absl import flags\n'), ((1085, 1182), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""N"""'], {'default': '(2)', 'help': '"""Number of hidden layers in discriminant network"""'}), "('N', default=2, help=\n 'Number of hidden layers in discriminant network')\n", (1105, 1182), False, 'from absl import flags\n'), ((1220, 1361), 'absl.flags.DEFINE_multi_integer', 'flags.DEFINE_multi_integer', (['"""non_targeted_layers"""'], {'default': '[]', 'help': '"""Layers for which we do not add GNIs. Layer 0 refers to data layer."""'}), "('non_targeted_layers', default=[], help=\n 'Layers for which we do not add GNIs. Layer 0 refers to data layer.')\n", (1246, 1361), False, 'from absl import flags\n'), ((1370, 1467), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""H"""'], {'default': '(512)', 'help': '"""Size of hidden layers in discriminant network"""'}), "('H', default=512, help=\n 'Size of hidden layers in discriminant network')\n", (1390, 1467), False, 'from absl import flags\n'), ((1505, 1596), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""dropout"""'], {'default': '(True)', 'help': '"""Dropout for hidden layers AND input"""'}), "('dropout', default=True, help=\n 'Dropout for hidden layers AND input')\n", (1522, 1596), False, 'from absl import flags\n'), ((1628, 1687), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""var"""'], {'default': '(1.0)', 'help': '"""GNI variance"""'}), "('var', default=1.0, help='GNI variance')\n", (1646, 1687), False, 'from absl import flags\n'), ((1688, 1795), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""activation"""'], {'default': '"""linear"""', 'help': '"""Activation function for all hidden layers."""'}), "('activation', default='linear', help=\n 'Activation function for all hidden layers.')\n", (1707, 1795), False, 'from absl import flags\n'), ((1831, 1935), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""noise_type"""'], {'default': 'None', 'help': '"""Noise type for model, input, gradient, None"""'}), "('noise_type', default=None, help=\n 'Noise type for model, input, gradient, None')\n", (1850, 1935), False, 'from absl import flags\n'), ((1971, 2070), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""noise_mode"""'], {'default': '"""add"""', 'help': '"""Noise node for model, add, mult, None"""'}), "('noise_mode', default='add', help=\n 'Noise node for model, add, mult, None')\n", (1990, 2070), False, 'from absl import flags\n'), ((2106, 2176), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""noise_dist"""'], {'default': '"""Normal"""', 'help': '"""Noise dist"""'}), "('noise_dist', default='Normal', help='Noise dist')\n", (2125, 2176), False, 'from absl import flags\n'), ((2177, 2243), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""run_name"""'], {'default': '"""run"""', 'help': '"""name of run"""'}), "('run_name', default='run', help='name of run')\n", (2196, 2243), False, 'from absl import flags\n'), ((2244, 2302), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""B"""'], {'default': '(512)', 'help': '"""Batch size."""'}), "('B', default=512, help='Batch size.')\n", (2264, 2302), False, 'from absl import flags\n'), ((2477, 2599), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""calc_hessian"""'], {'default': '(False)', 'help': '"""If true, calculates the trace of the hessian for each layer."""'}), "('calc_hessian', default=False, help=\n 'If true, calculates the trace of the hessian for each layer.')\n", (2494, 2599), False, 'from absl import flags\n'), ((2608, 2714), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""input_inject"""'], {'default': '(False)', 'help': '"""If true, only injects noise into data layer."""'}), "('input_inject', default=False, help=\n 'If true, only injects noise into data layer.')\n", (2625, 2714), False, 'from absl import flags\n'), ((2746, 2855), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""debug"""'], {'default': '""""""', 'help': '"""If tensorboard, connects to tensorboard debug. Else CLI"""'}), "('debug', default='', help=\n 'If tensorboard, connects to tensorboard debug. Else CLI')\n", (2765, 2855), False, 'from absl import flags\n'), ((2864, 2969), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""disc_type"""'], {'default': '"""mlp"""', 'help': '"""type of discriminator to use, convnet or mlp"""'}), "('disc_type', default='mlp', help=\n 'type of discriminator to use, convnet or mlp')\n", (2883, 2969), False, 'from absl import flags\n'), ((3695, 3769), 'tensorflow.estimator.TrainSpec', 'tf.estimator.TrainSpec', ([], {'input_fn': 'train_input_fn', 'max_steps': 'steps', 'hooks': '[]'}), '(input_fn=train_input_fn, max_steps=steps, hooks=[])\n', (3717, 3769), True, 'import tensorflow as tf\n'), ((3867, 3966), 'tensorflow.estimator.EvalSpec', 'tf.estimator.EvalSpec', ([], {'input_fn': 'eval_input_fn', 'steps': 'None', 'start_delay_secs': '(0)', 'throttle_secs': '(1)'}), '(input_fn=eval_input_fn, steps=None, start_delay_secs=\n 0, throttle_secs=1)\n', (3888, 3966), True, 'import tensorflow as tf\n'), ((4304, 4343), 'src_tf2.data_processing.utils.load_dataset', 'load_dataset', (["params['dataset']", 'params'], {}), "(params['dataset'], params)\n", (4316, 4343), False, 'from src_tf2.data_processing.utils import load_dataset\n'), ((6040, 6105), 'tensorflow.estimator.train_and_evaluate', 'tf.estimator.train_and_evaluate', (['estimator', 'train_spec', 'eval_spec'], {}), '(estimator, train_spec, eval_spec)\n', (6071, 6105), True, 'import tensorflow as tf\n'), ((6238, 6303), 'tensorflow.estimator.train_and_evaluate', 'tf.estimator.train_and_evaluate', (['estimator', 'train_spec', 'eval_spec'], {}), '(estimator, train_spec, eval_spec)\n', (6269, 6303), True, 'import tensorflow as tf\n'), ((6337, 6359), 'tensorflow.compat.v1.app.run', 'tf.compat.v1.app.run', ([], {}), '()\n', (6357, 6359), True, 'import tensorflow as tf\n'), ((5625, 5660), 'os.path.exists', 'os.path.exists', (["config['model_dir']"], {}), "(config['model_dir'])\n", (5639, 5660), False, 'import os\n'), ((5670, 5702), 'os.makedirs', 'os.makedirs', (["config['model_dir']"], {}), "(config['model_dir'])\n", (5681, 5702), False, 'import os\n'), ((5776, 5797), 'json.dump', 'json.dump', (['params', 'fp'], {}), '(params, fp)\n', (5785, 5797), False, 'import json\n'), ((2365, 2397), 'os.getenv', 'os.getenv', (['"""TEST_TMPDIR"""', '"""/tmp"""'], {}), "('TEST_TMPDIR', '/tmp')\n", (2374, 2397), False, 'import os\n'), ((3413, 3445), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {}), '(**config)\n', (3435, 3445), True, 'import tensorflow as tf\n'), ((3601, 3623), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3621, 3623), True, 'import tensorflow as tf\n'), ((6189, 6221), 'tensorflow.compat.v1.get_default_graph', 'tf.compat.v1.get_default_graph', ([], {}), '()\n', (6219, 6221), True, 'import tensorflow as tf\n')] |
from typing import Dict, List
import numpy as np
from xain.types import FederatedDataset, Partition
PartitionStat = Dict[str, List[int]]
class DSStats:
def __init__(self, name: str, ds: FederatedDataset):
self.name = name
self.ds = ds
def __repr__(self) -> str:
width = 120
line = "=" * width + "\n"
output = "\nname: {}\n".format(self.name)
all_stats = self.all()
topic = "number_of_examples_per_label_per_shard"
stat = all_stats[topic]
output += "{}\n".format(topic)
for part_index, part in stat.items():
output += "partition: {}\t".format(part_index)
output += "total: {}\t".format(part["total"])
output += "per_label: {}".format(
"\t".join([str(v).rjust(4) for v in part["per_label"]])
)
output += "\n"
output += line
return output
def all(self) -> Dict[str, Dict[str, PartitionStat]]:
stats = {}
stats[
"number_of_examples_per_label_per_shard"
] = self.number_of_examples_per_label_per_shard()
return stats
def number_of_examples_per_label_per_shard(self) -> Dict[str, PartitionStat]:
xy_partitions, xy_val, xy_test = self.ds
stats = {}
zfill_width = int(np.log(len(xy_partitions)))
ys = [y for (_, y) in xy_partitions]
all_labels = np.unique(np.concatenate(ys, axis=0))
for index, xy_par in enumerate(xy_partitions):
key = str(index).zfill(zfill_width)
stats[key] = self.number_of_examples_per_label(
xy=xy_par, possible_labels=all_labels
)
stats["val"] = self.number_of_examples_per_label(
xy=xy_val, possible_labels=all_labels
)
stats["test"] = self.number_of_examples_per_label(
xy=xy_test, possible_labels=all_labels
)
return stats
@staticmethod
def number_of_examples_per_label(
xy: Partition, possible_labels: List
) -> PartitionStat:
x, y = xy
possible_labels = list(possible_labels)
per_label_counts = [0] * len(possible_labels)
assert x.shape[0] == y.shape[0], "Number of examples and labels don't match"
[unique_labels, unique_counts] = np.unique(y, return_counts=True)
for i, l in enumerate(unique_labels):
per_label_counts_index = possible_labels.index(l)
per_label_counts[per_label_counts_index] = unique_counts[i]
return {"total": x.shape[0], "per_label": per_label_counts}
| [
"numpy.concatenate",
"numpy.unique"
] | [((2336, 2368), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (2345, 2368), True, 'import numpy as np\n'), ((1439, 1465), 'numpy.concatenate', 'np.concatenate', (['ys'], {'axis': '(0)'}), '(ys, axis=0)\n', (1453, 1465), True, 'import numpy as np\n')] |
import sys
import numpy
from PyQt5.QtWidgets import QApplication, QMessageBox, QSizePolicy
from PyQt5.QtGui import QIntValidator, QDoubleValidator
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui, congruence
from oasys.widgets.exchange import DataExchangeObject
# from orangecontrib.xoppy.util.xoppy_exchange import RadiationDataExchangeObject #as DataExchangeObject
from orangecontrib.xoppy.widgets.gui.ow_xoppy_widget import XoppyWidget
from orangecontrib.xoppy.util.xoppy_xraylib_util import reflectivity_fresnel
import scipy.constants as codata
import xraylib
class OWpower3D(XoppyWidget):
name = "POWER3D"
id = "orange.widgets.datapower3D"
description = "Power (vs Energy and spatial coordinates) Absorbed and Transmitted by Optical Elements"
icon = "icons/xoppy_power3d.png"
priority = 3
category = ""
keywords = ["xoppy", "power3D"]
inputs = [{"name": "ExchangeData",
"type": DataExchangeObject,
"handler": "acceptExchangeData" } ]
# [("ExchangeData", DataExchangeObject, "acceptExchangeData")]
# inputs = [("xoppy_data", DataExchangeObject, "acceptExchangeData")]
NELEMENTS = Setting(1)
EL1_FOR = Setting("Be")
EL1_FLAG = Setting(0)
EL1_THI = Setting(0.5)
EL1_ANG = Setting(3.0)
EL1_ROU = Setting(0.0)
EL1_DEN = Setting("?")
EL2_FOR = Setting("Rh")
EL2_FLAG = Setting(1)
EL2_THI = Setting(0.5)
EL2_ANG = Setting(3.0)
EL2_ROU = Setting(0.0)
EL2_DEN = Setting("?")
EL3_FOR = Setting("Al")
EL3_FLAG = Setting(0)
EL3_THI = Setting(0.5)
EL3_ANG = Setting(3.0)
EL3_ROU = Setting(0.0)
EL3_DEN = Setting("?")
EL4_FOR = Setting("B")
EL4_FLAG = Setting(0)
EL4_THI = Setting(0.5)
EL4_ANG = Setting(3.0)
EL4_ROU = Setting(0.0)
EL4_DEN = Setting("?")
EL5_FOR = Setting("Pt")
EL5_FLAG = Setting(1)
EL5_THI = Setting(0.5)
EL5_ANG = Setting(3.0)
EL5_ROU = Setting(0.0)
EL5_DEN = Setting("?")
PLOT_SETS = Setting(1)
FILE_DUMP = 0
def build_gui(self):
self.leftWidgetPart.setSizePolicy(QSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding))
self.leftWidgetPart.setMaximumWidth(self.CONTROL_AREA_WIDTH + 20)
self.leftWidgetPart.updateGeometry()
box = oasysgui.widgetBox(self.controlArea, self.name + " Input Parameters", orientation="vertical", width=self.CONTROL_AREA_WIDTH-10)
idx = -1
#widget index 10
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "NELEMENTS",
label=self.unitLabels()[idx], addSpace=False,
items=['0','1', '2', '3', '4', '5'],
valueType=int, orientation="horizontal", callback=self.set_NELEMENTS, labelWidth=330)
self.show_at(self.unitFlags()[idx], box1)
#widget index 11
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
oasysgui.lineEdit(box1, self, "EL1_FOR",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 12
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "EL1_FLAG",
label=self.unitLabels()[idx], addSpace=False,
items=['Filter', 'Mirror'],
valueType=int, orientation="horizontal", callback=self.set_EL_FLAG, labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 13
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL1_THI",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 14
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL1_ANG",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 15
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL1_ROU",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 16
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL1_DEN",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 17
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
oasysgui.lineEdit(box1, self, "EL2_FOR",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 18
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "EL2_FLAG",
label=self.unitLabels()[idx], addSpace=False,
items=['Filter', 'Mirror'],
valueType=int, orientation="horizontal", callback=self.set_EL_FLAG, labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 19
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL2_THI",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 20
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL2_ANG",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 21
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL2_ROU",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 22
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL2_DEN",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 23
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
oasysgui.lineEdit(box1, self, "EL3_FOR",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 24
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "EL3_FLAG",
label=self.unitLabels()[idx], addSpace=False,
items=['Filter', 'Mirror'],
valueType=int, orientation="horizontal", callback=self.set_EL_FLAG, labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 25
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL3_THI",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 26
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL3_ANG",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 27
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL3_ROU",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 28
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL3_DEN",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 29
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
oasysgui.lineEdit(box1, self, "EL4_FOR",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 30
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "EL4_FLAG",
label=self.unitLabels()[idx], addSpace=False,
items=['Filter', 'Mirror'],
valueType=int, orientation="horizontal", callback=self.set_EL_FLAG, labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 31
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL4_THI",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 32
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL4_ANG",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 33
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL4_ROU",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 34
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL4_DEN",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 35
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
oasysgui.lineEdit(box1, self, "EL5_FOR",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 36
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "EL5_FLAG",
label=self.unitLabels()[idx], addSpace=False,
items=['Filter', 'Mirror'],
valueType=int, orientation="horizontal", callback=self.set_EL_FLAG, labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 37
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL5_THI",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 38
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL5_ANG",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 39
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL5_ROU",
label=self.unitLabels()[idx], addSpace=False,
valueType=float, validator=QDoubleValidator(), orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 40
idx += 1
box1 = gui.widgetBox(box)
oasysgui.lineEdit(box1, self, "EL5_DEN",
label=self.unitLabels()[idx], addSpace=False, orientation="horizontal", labelWidth=250)
self.show_at(self.unitFlags()[idx], box1)
#widget index 41
idx += 1
box1 = gui.widgetBox(box)
gui.separator(box1, height=7)
gui.comboBox(box1, self, "PLOT_SETS",
label=self.unitLabels()[idx], addSpace=False,
items=['Input beam',
'Beam transmitted after last element',
'Absorption by ALL elements',
'Absorption by LAST element'],
valueType=int, orientation="horizontal", labelWidth=250, callback=self.replot_results)
self.show_at(self.unitFlags()[idx], box1)
#widget index 42
# idx += 1
# box1 = gui.widgetBox(box)
# gui.separator(box1, height=7)
#
# gui.comboBox(box1, self, "FILE_DUMP",
# label=self.unitLabels()[idx], addSpace=False,
# items=['No', 'Yes (power.spec)'],
# valueType=int, orientation="horizontal", labelWidth=250)
# self.show_at(self.unitFlags()[idx], box1)
def set_NELEMENTS(self):
self.initializeTabs()
def set_EL_FLAG(self):
self.initializeTabs()
def unitLabels(self):
return ['Number of elements:',
'1st oe formula','kind:','Filter thick[mm]','Mirror angle[mrad]','Roughness[A]','Density [g/cm^3]',
'2nd oe formula','kind:','Filter thick[mm]','Mirror angle[mrad]','Roughness[A]','Density [g/cm^3]',
'3rd oe formula','kind:','Filter thick[mm]','Mirror angle[mrad]','Roughness[A]','Density [g/cm^3]',
'4th oe formula','kind:','Filter thick[mm]','Mirror angle[mrad]','Roughness[A]','Density [g/cm^3]',
'5th oe formula','kind:','Filter thick[mm]','Mirror angle[mrad]','Roughness[A]','Density [g/cm^3]',
"Plot","Dump file"]
def unitFlags(self):
return ['True',
'self.NELEMENTS >= 1+0',' self.NELEMENTS >= 1+0','self.EL1_FLAG == 0 and self.NELEMENTS >= 1+0','self.EL1_FLAG != 0 and self.NELEMENTS >= 1+0','self.EL1_FLAG != 0 and self.NELEMENTS >= 1+0',' self.NELEMENTS >= 1+0',
'self.NELEMENTS >= 1+1',' self.NELEMENTS >= 1+1','self.EL2_FLAG == 0 and self.NELEMENTS >= 1+1','self.EL2_FLAG != 0 and self.NELEMENTS >= 1+1','self.EL2_FLAG != 0 and self.NELEMENTS >= 1+1',' self.NELEMENTS >= 1+1',
'self.NELEMENTS >= 1+2',' self.NELEMENTS >= 1+2','self.EL3_FLAG == 0 and self.NELEMENTS >= 1+2','self.EL3_FLAG != 0 and self.NELEMENTS >= 1+2','self.EL3_FLAG != 0 and self.NELEMENTS >= 1+2',' self.NELEMENTS >= 1+2',
'self.NELEMENTS >= 1+3',' self.NELEMENTS >= 1+3','self.EL4_FLAG == 0 and self.NELEMENTS >= 1+3','self.EL4_FLAG != 0 and self.NELEMENTS >= 1+3','self.EL4_FLAG != 0 and self.NELEMENTS >= 1+3',' self.NELEMENTS >= 1+3',
'self.NELEMENTS >= 1+4',' self.NELEMENTS >= 1+4','self.EL5_FLAG == 0 and self.NELEMENTS >= 1+4','self.EL5_FLAG != 0 and self.NELEMENTS >= 1+4','self.EL5_FLAG != 0 and self.NELEMENTS >= 1+4',' self.NELEMENTS >= 1+4',
'True','True']
def get_help_name(self):
return 'power3d'
def acceptExchangeData(self, exchangeData):
try:
if not exchangeData is None:
if exchangeData.get_program_name() == "XOPPY":
if exchangeData.get_widget_name() =="UNDULATOR_RADIATION" :
pass
elif exchangeData.get_widget_name() =="POWER3D" :
pass
else:
raise Exception("Xoppy Input beam not recognized")
self.input_beam = exchangeData
self.output_beam = None
self.process_showers()
self.compute()
except Exception as exception:
QMessageBox.critical(self, "Error",
str(exception),
QMessageBox.Ok)
def check_fields(self):
if self.NELEMENTS >= 1:
self.EL1_FOR = congruence.checkEmptyString(self.EL1_FOR, "1st oe formula")
if self.EL1_FLAG == 0: # filter
self.EL1_THI = congruence.checkStrictlyPositiveNumber(self.EL1_THI, "1st oe filter thickness")
elif self.EL1_FLAG == 1: # mirror
self.EL1_ANG = congruence.checkStrictlyPositiveNumber(self.EL1_ANG, "1st oe mirror angle")
self.EL1_ROU = congruence.checkPositiveNumber(self.EL1_ROU, "1st oe mirror roughness")
if not self.EL1_DEN.strip() == "?":
self.EL1_DEN = str(congruence.checkStrictlyPositiveNumber(float(congruence.checkNumber(self.EL1_DEN, "1st oe density")), "1st oe density"))
if self.NELEMENTS >= 2:
self.EL2_FOR = congruence.checkEmptyString(self.EL2_FOR, "2nd oe formula")
if self.EL2_FLAG == 0: # filter
self.EL2_THI = congruence.checkStrictlyPositiveNumber(self.EL2_THI, "2nd oe filter thickness")
elif self.EL2_FLAG == 1: # mirror
self.EL2_ANG = congruence.checkStrictlyPositiveNumber(self.EL2_ANG, "2nd oe mirror angle")
self.EL2_ROU = congruence.checkPositiveNumber(self.EL2_ROU, "2nd oe mirror roughness")
if not self.EL2_DEN.strip() == "?":
self.EL2_DEN = str(congruence.checkStrictlyPositiveNumber(float(congruence.checkNumber(self.EL2_DEN, "2nd oe density")), "2nd oe density"))
if self.NELEMENTS >= 3:
self.EL3_FOR = congruence.checkEmptyString(self.EL3_FOR, "3rd oe formula")
if self.EL3_FLAG == 0: # filter
self.EL3_THI = congruence.checkStrictlyPositiveNumber(self.EL3_THI, "3rd oe filter thickness")
elif self.EL3_FLAG == 1: # mirror
self.EL3_ANG = congruence.checkStrictlyPositiveNumber(self.EL3_ANG, "3rd oe mirror angle")
self.EL3_ROU = congruence.checkPositiveNumber(self.EL3_ROU, "3rd oe mirror roughness")
if not self.EL3_DEN.strip() == "?":
self.EL3_DEN = str(congruence.checkStrictlyPositiveNumber(float(congruence.checkNumber(self.EL3_DEN, "3rd oe density")), "3rd oe density"))
if self.NELEMENTS >= 4:
self.EL4_FOR = congruence.checkEmptyString(self.EL4_FOR, "4th oe formula")
if self.EL4_FLAG == 0: # filter
self.EL4_THI = congruence.checkStrictlyPositiveNumber(self.EL4_THI, "4th oe filter thickness")
elif self.EL4_FLAG == 1: # mirror
self.EL4_ANG = congruence.checkStrictlyPositiveNumber(self.EL4_ANG, "4th oe mirror angle")
self.EL4_ROU = congruence.checkPositiveNumber(self.EL4_ROU, "4th oe mirror roughness")
if not self.EL4_DEN.strip() == "?":
self.EL4_DEN = str(congruence.checkStrictlyPositiveNumber(float(congruence.checkNumber(self.EL4_DEN, "4th oe density")), "4th oe density"))
if self.NELEMENTS >= 5:
self.EL5_FOR = congruence.checkEmptyString(self.EL5_FOR, "5th oe formula")
if self.EL5_FLAG == 0: # filter
self.EL5_THI = congruence.checkStrictlyPositiveNumber(self.EL5_THI, "5th oe filter thickness")
elif self.EL5_FLAG == 1: # mirror
self.EL5_ANG = congruence.checkStrictlyPositiveNumber(self.EL5_ANG, "5th oe mirror angle")
self.EL5_ROU = congruence.checkPositiveNumber(self.EL5_ROU, "5th oe mirror roughness")
if not self.EL5_DEN.strip() == "?":
self.EL5_DEN = str(congruence.checkStrictlyPositiveNumber(float(congruence.checkNumber(self.EL5_DEN, "5th oe density")), "5th oe density"))
def do_xoppy_calculation(self):
return self.xoppy_calc_power3D()
# TODO THIS TO SEND DATA
def extract_data_from_xoppy_output(self, calculation_output):
[p, e, h, v] = self.input_beam.get_content("xoppy_data")
data_to_send = DataExchangeObject("XOPPY", self.get_data_exchange_widget_name())
data_to_send.add_content("xoppy_data", [p*calculation_output.prod(axis=0), e, h, v])
data_to_send.add_content("xoppy_transmittivity", calculation_output)
data_to_send.add_content("xoppy_code", "power3")
self.output_beam = data_to_send
return data_to_send
# def extract_data_from_xoppy_output(self, calculation_output):
# return calculation_output
def get_data_exchange_widget_name(self):
return "POWER3D"
def getKind(self, oe_n):
if oe_n == 1:
return self.EL1_FLAG
elif oe_n == 2:
return self.EL2_FLAG
elif oe_n == 3:
return self.EL3_FLAG
elif oe_n == 4:
return self.EL4_FLAG
elif oe_n == 5:
return self.EL5_FLAG
def getTitles(self):
return ['Transmittance vs X,Y,E','Transmittance vs E',
'Spectral Power Density vs E,X,Y','Power Density vs X,Y','Spectral Power vs E']
def replot_results(self):
if self.output_beam is None:
pass
else:
self.plot_results(self.output_beam, progressBarValue=80)
def plot_results(self, calculated_data, progressBarValue=80):
current_index = self.tabs.currentIndex()
if not self.view_type == 0:
if not calculated_data is None:
self.initializeTabs() # added by srio to avoid overlapping graphs
self.view_type_combo.setEnabled(False)
p,e,h,v = self.input_beam.get_content("xoppy_data")
code = self.input_beam.get_content("xoppy_code")
p_spectral_power = p * codata.e * 1e3
p_to_plot = p_spectral_power
transmittivity = calculated_data.get_content("xoppy_transmittivity")
transmittivity_total = transmittivity.prod(axis=0)
if self.PLOT_SETS == 0: # source
p_to_plot = p_spectral_power
pre_title = "Input beam"
elif self.PLOT_SETS == 1:
p_to_plot = p_spectral_power * transmittivity_total
pre_title = "Beam transmitted after LAST element"
elif self.PLOT_SETS == 2:
p_to_plot = p_spectral_power * ( numpy.ones_like(transmittivity_total) - transmittivity_total)
pre_title = "Absorption by ALL elements"
elif self.PLOT_SETS == 3:
transmittivity_before_last_element = transmittivity_total / transmittivity[-1,:,:,:]
p_to_plot = p_spectral_power * (transmittivity_before_last_element - transmittivity_total)
pre_title = "Absorption by the LAST element"
# plot transmittance stack
try:
self.plot_data3D(transmittivity_total, e, h, v, 0, 0,
xtitle='H [mm]',
ytitle='V [mm]',
title='Code '+code+'; Flux [photons/s/0.1%bw/mm^2]',)
self.tabs.setCurrentIndex(0)
except Exception as e:
self.view_type_combo.setEnabled(True)
raise Exception("Data not plottable: bad content\n" + str(e))
# plot transmittance spectrum
try:
self.plot_data1D(e,transmittivity_total.sum(axis=2).sum(axis=1)/h.size/v.size, 1, 0,
xtitle='Photon Energy [eV]',
ytitle= 'Tramsmittance',
title='Transmittance',)
# self.tabs.setCurrentIndex(2)
except Exception as e:
self.view_type_combo.setEnabled(True)
raise Exception("Data not plottable: bad content\n" + str(e))
# plot result s E,X,Y
try:
self.plot_data3D(p_to_plot, e, h, v, 2, 0,
xtitle='H [mm]',
ytitle='V [mm]',
title=pre_title+' Spectral power density[W/eV/mm^2]',)
self.tabs.setCurrentIndex(0)
except Exception as e:
self.view_type_combo.setEnabled(True)
raise Exception("Data not plottable: bad content\n" + str(e))
# plot result vs X,Y
try:
if len(e) > 1:
energy_step = e[1]-e[0]
else:
energy_step = 1.0
self.plot_data2D(p_to_plot.sum(axis=0)*energy_step, h, v, 3, 0,
xtitle='H [mm]',
ytitle='V [mm]',
title=pre_title+' Power density [W/mm^2]',)
# self.tabs.setCurrentIndex(1)
except Exception as e:
self.view_type_combo.setEnabled(True)
raise Exception("Data not plottable: bad content\n" + str(e))
# plot result vs E
try:
self.plot_data1D(e,p_to_plot.sum(axis=2).sum(axis=1)*(h[1]-h[0])*(v[1]-v[0]), 4, 0,
xtitle='Photon Energy [eV]',
ytitle= 'Spectral power [W/eV]',
title=pre_title+' Spectral power',)
# self.tabs.setCurrentIndex(2)
except Exception as e:
self.view_type_combo.setEnabled(True)
raise Exception("Data not plottable: bad content\n" + str(e))
self.view_type_combo.setEnabled(True)
try:
self.tabs.setCurrentIndex(current_index)
except:
pass
else:
raise Exception("Empty Data")
def xoppy_calc_power3D(self):
#
# prepare input for xpower_calc
# Note that the input for xpower_calc accepts any number of elements.
#
substance = [self.EL1_FOR,self.EL2_FOR,self.EL3_FOR,self.EL4_FOR,self.EL5_FOR]
thick = numpy.array( (self.EL1_THI,self.EL2_THI,self.EL3_THI,self.EL4_THI,self.EL5_THI))
angle = numpy.array( (self.EL1_ANG,self.EL2_ANG,self.EL3_ANG,self.EL4_ANG,self.EL5_ANG))
dens = [self.EL1_DEN,self.EL2_DEN,self.EL3_DEN,self.EL4_DEN,self.EL5_DEN]
roughness = numpy.array( (self.EL1_ROU,self.EL2_ROU,self.EL3_ROU,self.EL4_ROU,self.EL5_ROU))
flags = numpy.array( (self.EL1_FLAG,self.EL2_FLAG,self.EL3_FLAG,self.EL4_FLAG,self.EL5_FLAG))
substance = substance[0:self.NELEMENTS+1]
thick = thick[0:self.NELEMENTS+1]
angle = angle[0:self.NELEMENTS+1]
dens = dens[0:self.NELEMENTS+1]
roughness = roughness[0:self.NELEMENTS+1]
flags = flags[0:self.NELEMENTS+1]
p,e,h,v = self.input_beam.get_content("xoppy_data")
nelem_including_source = self.NELEMENTS + 1
energies = e
# initialize results
# note that element of zero index corresponds to source!!!
transmittance = numpy.zeros((nelem_including_source,p.shape[0],p.shape[1],p.shape[2]))
for i in range(nelem_including_source):
transmittance[i] = numpy.ones_like(p)
#
# get undefined densities
#
for i in range(nelem_including_source):
try:
rho = float(dens[i])
except:
rho = xraylib.ElementDensity(xraylib.SymbolToAtomicNumber(substance[i]))
print("Density for %s: %g g/cm3"%(substance[i],rho))
dens[i] = rho
#info oe
txt = ""
for i in range(self.NELEMENTS):
if flags[i] == 0:
txt += ' ***** oe '+str(i+1)+' [Filter] *************\n'
txt += ' Material: %s\n'%(substance[i])
txt += ' Density [g/cm^3]: %f \n'%(dens[i])
txt += ' thickness [mm] : %f \n'%(thick[i])
else:
txt += ' ***** oe '+str(i+1)+' [Mirror] *************\n'
txt += ' Material: %s\n'%(substance[i])
txt += ' Density [g/cm^3]: %f \n'%(dens[i])
txt += ' grazing angle [mrad]: %f \n'%(angle[i])
txt += ' roughness [A]: %f \n'%(roughness[i])
for i in range(self.NELEMENTS):
if flags[i] == 0: # filter
for j,energy in enumerate(energies):
tmp = xraylib.CS_Total_CP(substance[i],energy/1000.0)
# pay attention to the element index...
transmittance[i+1,j,:,:] = numpy.exp(-tmp*dens[i]*(thick[i]/10.0))
if flags[i] == 1: # mirror
tmp = numpy.zeros(energies.size)
for j,energy in enumerate(energies):
tmp[j] = xraylib.Refractive_Index_Re(substance[i],energy/1000.0,dens[i])
delta = 1.0 - tmp
beta = numpy.zeros(energies.size)
for j,energy in enumerate(energies):
beta[j] = xraylib.Refractive_Index_Im(substance[i],energy/1000.0,dens[i])
(rs,rp,runp) = reflectivity_fresnel(refraction_index_beta=beta,refraction_index_delta=delta,\
grazing_angle_mrad=angle[i],roughness_rms_A=roughness[i],\
photon_energy_ev=energies)
for j,energy in enumerate(energies):
transmittance[i+1,j,:,:] = rs[j]
txt += "\n\n\n"
integration_constante = (e[1] - e[0]) * (h[1] - h[0]) * (v[1] - v[0]) * codata.e * 1e3
p_cumulated = p.copy()
power_cumulated = p_cumulated.sum()*integration_constante
txt += ' Input beam power: %f W\n'%(power_cumulated)
for i in range(self.NELEMENTS):
p_cumulated *= transmittance[i+1]
power_transmitted = (p_cumulated).sum()*integration_constante
txt += ' Beam power after optical element %d: %6.3f W (absorbed: %6.3f W)\n'%\
(i+1,power_transmitted,power_cumulated-power_transmitted)
power_cumulated = power_transmitted
print(txt)
return transmittance
if __name__ == "__main__":
# create unulator_radiation xoppy exchange data
from orangecontrib.xoppy.util.xoppy_undulators import xoppy_calc_undulator_radiation
from oasys.widgets.exchange import DataExchangeObject
e, h, v, p, code = xoppy_calc_undulator_radiation(ELECTRONENERGY=6.04,ELECTRONENERGYSPREAD=0.001,ELECTRONCURRENT=0.2,\
ELECTRONBEAMSIZEH=0.000395,ELECTRONBEAMSIZEV=9.9e-06,\
ELECTRONBEAMDIVERGENCEH=1.05e-05,ELECTRONBEAMDIVERGENCEV=3.9e-06,\
PERIODID=0.018,NPERIODS=222,KV=1.68,DISTANCE=30.0,
SETRESONANCE=0,HARMONICNUMBER=1,
GAPH=0.001,GAPV=0.001,\
HSLITPOINTS=41,VSLITPOINTS=41,METHOD=2,
PHOTONENERGYMIN=7000,PHOTONENERGYMAX=8100,PHOTONENERGYPOINTS=20,
USEEMITTANCES=1)
# received_data = DataExchangeObject("XOPPY", "UNDULATOR_RADIATION")
received_data = DataExchangeObject("XOPPY", "POWER3D")
received_data.add_content("xoppy_data", [p, e, h, v])
received_data.add_content("xoppy_code", code)
#
app = QApplication(sys.argv)
w = OWpower3D()
w.acceptExchangeData(received_data)
w.show()
app.exec()
w.saveSettings()
| [
"PyQt5.QtWidgets.QSizePolicy",
"orangewidget.settings.Setting",
"numpy.exp",
"oasys.widgets.gui.widgetBox",
"PyQt5.QtWidgets.QApplication",
"xraylib.Refractive_Index_Im",
"xraylib.Refractive_Index_Re",
"numpy.ones_like",
"PyQt5.QtGui.QDoubleValidator",
"oasys.widgets.congruence.checkNumber",
"xr... | [((1232, 1242), 'orangewidget.settings.Setting', 'Setting', (['(1)'], {}), '(1)\n', (1239, 1242), False, 'from orangewidget.settings import Setting\n'), ((1257, 1270), 'orangewidget.settings.Setting', 'Setting', (['"""Be"""'], {}), "('Be')\n", (1264, 1270), False, 'from orangewidget.settings import Setting\n'), ((1286, 1296), 'orangewidget.settings.Setting', 'Setting', (['(0)'], {}), '(0)\n', (1293, 1296), False, 'from orangewidget.settings import Setting\n'), ((1311, 1323), 'orangewidget.settings.Setting', 'Setting', (['(0.5)'], {}), '(0.5)\n', (1318, 1323), False, 'from orangewidget.settings import Setting\n'), ((1338, 1350), 'orangewidget.settings.Setting', 'Setting', (['(3.0)'], {}), '(3.0)\n', (1345, 1350), False, 'from orangewidget.settings import Setting\n'), ((1365, 1377), 'orangewidget.settings.Setting', 'Setting', (['(0.0)'], {}), '(0.0)\n', (1372, 1377), False, 'from orangewidget.settings import Setting\n'), ((1392, 1404), 'orangewidget.settings.Setting', 'Setting', (['"""?"""'], {}), "('?')\n", (1399, 1404), False, 'from orangewidget.settings import Setting\n'), ((1419, 1432), 'orangewidget.settings.Setting', 'Setting', (['"""Rh"""'], {}), "('Rh')\n", (1426, 1432), False, 'from orangewidget.settings import Setting\n'), ((1448, 1458), 'orangewidget.settings.Setting', 'Setting', (['(1)'], {}), '(1)\n', (1455, 1458), False, 'from orangewidget.settings import Setting\n'), ((1473, 1485), 'orangewidget.settings.Setting', 'Setting', (['(0.5)'], {}), '(0.5)\n', (1480, 1485), False, 'from orangewidget.settings import Setting\n'), ((1500, 1512), 'orangewidget.settings.Setting', 'Setting', (['(3.0)'], {}), '(3.0)\n', (1507, 1512), False, 'from orangewidget.settings import Setting\n'), ((1527, 1539), 'orangewidget.settings.Setting', 'Setting', (['(0.0)'], {}), '(0.0)\n', (1534, 1539), False, 'from orangewidget.settings import Setting\n'), ((1554, 1566), 'orangewidget.settings.Setting', 'Setting', (['"""?"""'], {}), "('?')\n", (1561, 1566), False, 'from orangewidget.settings import Setting\n'), ((1581, 1594), 'orangewidget.settings.Setting', 'Setting', (['"""Al"""'], {}), "('Al')\n", (1588, 1594), False, 'from orangewidget.settings import Setting\n'), ((1610, 1620), 'orangewidget.settings.Setting', 'Setting', (['(0)'], {}), '(0)\n', (1617, 1620), False, 'from orangewidget.settings import Setting\n'), ((1635, 1647), 'orangewidget.settings.Setting', 'Setting', (['(0.5)'], {}), '(0.5)\n', (1642, 1647), False, 'from orangewidget.settings import Setting\n'), ((1662, 1674), 'orangewidget.settings.Setting', 'Setting', (['(3.0)'], {}), '(3.0)\n', (1669, 1674), False, 'from orangewidget.settings import Setting\n'), ((1689, 1701), 'orangewidget.settings.Setting', 'Setting', (['(0.0)'], {}), '(0.0)\n', (1696, 1701), False, 'from orangewidget.settings import Setting\n'), ((1716, 1728), 'orangewidget.settings.Setting', 'Setting', (['"""?"""'], {}), "('?')\n", (1723, 1728), False, 'from orangewidget.settings import Setting\n'), ((1743, 1755), 'orangewidget.settings.Setting', 'Setting', (['"""B"""'], {}), "('B')\n", (1750, 1755), False, 'from orangewidget.settings import Setting\n'), ((1771, 1781), 'orangewidget.settings.Setting', 'Setting', (['(0)'], {}), '(0)\n', (1778, 1781), False, 'from orangewidget.settings import Setting\n'), ((1796, 1808), 'orangewidget.settings.Setting', 'Setting', (['(0.5)'], {}), '(0.5)\n', (1803, 1808), False, 'from orangewidget.settings import Setting\n'), ((1823, 1835), 'orangewidget.settings.Setting', 'Setting', (['(3.0)'], {}), '(3.0)\n', (1830, 1835), False, 'from orangewidget.settings import Setting\n'), ((1850, 1862), 'orangewidget.settings.Setting', 'Setting', (['(0.0)'], {}), '(0.0)\n', (1857, 1862), False, 'from orangewidget.settings import Setting\n'), ((1877, 1889), 'orangewidget.settings.Setting', 'Setting', (['"""?"""'], {}), "('?')\n", (1884, 1889), False, 'from orangewidget.settings import Setting\n'), ((1904, 1917), 'orangewidget.settings.Setting', 'Setting', (['"""Pt"""'], {}), "('Pt')\n", (1911, 1917), False, 'from orangewidget.settings import Setting\n'), ((1933, 1943), 'orangewidget.settings.Setting', 'Setting', (['(1)'], {}), '(1)\n', (1940, 1943), False, 'from orangewidget.settings import Setting\n'), ((1958, 1970), 'orangewidget.settings.Setting', 'Setting', (['(0.5)'], {}), '(0.5)\n', (1965, 1970), False, 'from orangewidget.settings import Setting\n'), ((1985, 1997), 'orangewidget.settings.Setting', 'Setting', (['(3.0)'], {}), '(3.0)\n', (1992, 1997), False, 'from orangewidget.settings import Setting\n'), ((2012, 2024), 'orangewidget.settings.Setting', 'Setting', (['(0.0)'], {}), '(0.0)\n', (2019, 2024), False, 'from orangewidget.settings import Setting\n'), ((2039, 2051), 'orangewidget.settings.Setting', 'Setting', (['"""?"""'], {}), "('?')\n", (2046, 2051), False, 'from orangewidget.settings import Setting\n'), ((2068, 2078), 'orangewidget.settings.Setting', 'Setting', (['(1)'], {}), '(1)\n', (2075, 2078), False, 'from orangewidget.settings import Setting\n'), ((32464, 32949), 'orangecontrib.xoppy.util.xoppy_undulators.xoppy_calc_undulator_radiation', 'xoppy_calc_undulator_radiation', ([], {'ELECTRONENERGY': '(6.04)', 'ELECTRONENERGYSPREAD': '(0.001)', 'ELECTRONCURRENT': '(0.2)', 'ELECTRONBEAMSIZEH': '(0.000395)', 'ELECTRONBEAMSIZEV': '(9.9e-06)', 'ELECTRONBEAMDIVERGENCEH': '(1.05e-05)', 'ELECTRONBEAMDIVERGENCEV': '(3.9e-06)', 'PERIODID': '(0.018)', 'NPERIODS': '(222)', 'KV': '(1.68)', 'DISTANCE': '(30.0)', 'SETRESONANCE': '(0)', 'HARMONICNUMBER': '(1)', 'GAPH': '(0.001)', 'GAPV': '(0.001)', 'HSLITPOINTS': '(41)', 'VSLITPOINTS': '(41)', 'METHOD': '(2)', 'PHOTONENERGYMIN': '(7000)', 'PHOTONENERGYMAX': '(8100)', 'PHOTONENERGYPOINTS': '(20)', 'USEEMITTANCES': '(1)'}), '(ELECTRONENERGY=6.04, ELECTRONENERGYSPREAD=\n 0.001, ELECTRONCURRENT=0.2, ELECTRONBEAMSIZEH=0.000395,\n ELECTRONBEAMSIZEV=9.9e-06, ELECTRONBEAMDIVERGENCEH=1.05e-05,\n ELECTRONBEAMDIVERGENCEV=3.9e-06, PERIODID=0.018, NPERIODS=222, KV=1.68,\n DISTANCE=30.0, SETRESONANCE=0, HARMONICNUMBER=1, GAPH=0.001, GAPV=0.001,\n HSLITPOINTS=41, VSLITPOINTS=41, METHOD=2, PHOTONENERGYMIN=7000,\n PHOTONENERGYMAX=8100, PHOTONENERGYPOINTS=20, USEEMITTANCES=1)\n', (32494, 32949), False, 'from orangecontrib.xoppy.util.xoppy_undulators import xoppy_calc_undulator_radiation\n'), ((33321, 33359), 'oasys.widgets.exchange.DataExchangeObject', 'DataExchangeObject', (['"""XOPPY"""', '"""POWER3D"""'], {}), "('XOPPY', 'POWER3D')\n", (33339, 33359), False, 'from oasys.widgets.exchange import DataExchangeObject\n'), ((33486, 33508), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (33498, 33508), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QSizePolicy\n'), ((2373, 2506), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['self.controlArea', "(self.name + ' Input Parameters')"], {'orientation': '"""vertical"""', 'width': '(self.CONTROL_AREA_WIDTH - 10)'}), "(self.controlArea, self.name + ' Input Parameters',\n orientation='vertical', width=self.CONTROL_AREA_WIDTH - 10)\n", (2391, 2506), True, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((2580, 2598), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (2593, 2598), False, 'from orangewidget import gui\n'), ((2985, 3003), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (2998, 3003), False, 'from orangewidget import gui\n'), ((3013, 3042), 'orangewidget.gui.separator', 'gui.separator', (['box1'], {'height': '(7)'}), '(box1, height=7)\n', (3026, 3042), False, 'from orangewidget import gui\n'), ((3321, 3339), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (3334, 3339), False, 'from orangewidget import gui\n'), ((3724, 3742), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (3737, 3742), False, 'from orangewidget import gui\n'), ((4088, 4106), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (4101, 4106), False, 'from orangewidget import gui\n'), ((4452, 4470), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (4465, 4470), False, 'from orangewidget import gui\n'), ((4816, 4834), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (4829, 4834), False, 'from orangewidget import gui\n'), ((5103, 5121), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (5116, 5121), False, 'from orangewidget import gui\n'), ((5131, 5160), 'orangewidget.gui.separator', 'gui.separator', (['box1'], {'height': '(7)'}), '(box1, height=7)\n', (5144, 5160), False, 'from orangewidget import gui\n'), ((5439, 5457), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (5452, 5457), False, 'from orangewidget import gui\n'), ((5842, 5860), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (5855, 5860), False, 'from orangewidget import gui\n'), ((6206, 6224), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (6219, 6224), False, 'from orangewidget import gui\n'), ((6570, 6588), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (6583, 6588), False, 'from orangewidget import gui\n'), ((6934, 6952), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (6947, 6952), False, 'from orangewidget import gui\n'), ((7231, 7249), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (7244, 7249), False, 'from orangewidget import gui\n'), ((7259, 7288), 'orangewidget.gui.separator', 'gui.separator', (['box1'], {'height': '(7)'}), '(box1, height=7)\n', (7272, 7288), False, 'from orangewidget import gui\n'), ((7567, 7585), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (7580, 7585), False, 'from orangewidget import gui\n'), ((7970, 7988), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (7983, 7988), False, 'from orangewidget import gui\n'), ((8334, 8352), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (8347, 8352), False, 'from orangewidget import gui\n'), ((8698, 8716), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (8711, 8716), False, 'from orangewidget import gui\n'), ((9062, 9080), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (9075, 9080), False, 'from orangewidget import gui\n'), ((9359, 9377), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (9372, 9377), False, 'from orangewidget import gui\n'), ((9387, 9416), 'orangewidget.gui.separator', 'gui.separator', (['box1'], {'height': '(7)'}), '(box1, height=7)\n', (9400, 9416), False, 'from orangewidget import gui\n'), ((9695, 9713), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (9708, 9713), False, 'from orangewidget import gui\n'), ((10098, 10116), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (10111, 10116), False, 'from orangewidget import gui\n'), ((10462, 10480), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (10475, 10480), False, 'from orangewidget import gui\n'), ((10826, 10844), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (10839, 10844), False, 'from orangewidget import gui\n'), ((11190, 11208), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (11203, 11208), False, 'from orangewidget import gui\n'), ((11487, 11505), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (11500, 11505), False, 'from orangewidget import gui\n'), ((11514, 11543), 'orangewidget.gui.separator', 'gui.separator', (['box1'], {'height': '(7)'}), '(box1, height=7)\n', (11527, 11543), False, 'from orangewidget import gui\n'), ((11822, 11840), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (11835, 11840), False, 'from orangewidget import gui\n'), ((12225, 12243), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (12238, 12243), False, 'from orangewidget import gui\n'), ((12589, 12607), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (12602, 12607), False, 'from orangewidget import gui\n'), ((12953, 12971), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (12966, 12971), False, 'from orangewidget import gui\n'), ((13317, 13335), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (13330, 13335), False, 'from orangewidget import gui\n'), ((13603, 13621), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['box'], {}), '(box)\n', (13616, 13621), False, 'from orangewidget import gui\n'), ((13630, 13659), 'orangewidget.gui.separator', 'gui.separator', (['box1'], {'height': '(7)'}), '(box1, height=7)\n', (13643, 13659), False, 'from orangewidget import gui\n'), ((28002, 28090), 'numpy.array', 'numpy.array', (['(self.EL1_THI, self.EL2_THI, self.EL3_THI, self.EL4_THI, self.EL5_THI)'], {}), '((self.EL1_THI, self.EL2_THI, self.EL3_THI, self.EL4_THI, self.\n EL5_THI))\n', (28013, 28090), False, 'import numpy\n'), ((28103, 28191), 'numpy.array', 'numpy.array', (['(self.EL1_ANG, self.EL2_ANG, self.EL3_ANG, self.EL4_ANG, self.EL5_ANG)'], {}), '((self.EL1_ANG, self.EL2_ANG, self.EL3_ANG, self.EL4_ANG, self.\n EL5_ANG))\n', (28114, 28191), False, 'import numpy\n'), ((28291, 28379), 'numpy.array', 'numpy.array', (['(self.EL1_ROU, self.EL2_ROU, self.EL3_ROU, self.EL4_ROU, self.EL5_ROU)'], {}), '((self.EL1_ROU, self.EL2_ROU, self.EL3_ROU, self.EL4_ROU, self.\n EL5_ROU))\n', (28302, 28379), False, 'import numpy\n'), ((28392, 28484), 'numpy.array', 'numpy.array', (['(self.EL1_FLAG, self.EL2_FLAG, self.EL3_FLAG, self.EL4_FLAG, self.EL5_FLAG)'], {}), '((self.EL1_FLAG, self.EL2_FLAG, self.EL3_FLAG, self.EL4_FLAG,\n self.EL5_FLAG))\n', (28403, 28484), False, 'import numpy\n'), ((29005, 29078), 'numpy.zeros', 'numpy.zeros', (['(nelem_including_source, p.shape[0], p.shape[1], p.shape[2])'], {}), '((nelem_including_source, p.shape[0], p.shape[1], p.shape[2]))\n', (29016, 29078), False, 'import numpy\n'), ((2166, 2237), 'PyQt5.QtWidgets.QSizePolicy', 'QSizePolicy', (['QSizePolicy.MinimumExpanding', 'QSizePolicy.MinimumExpanding'], {}), '(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)\n', (2177, 2237), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QSizePolicy\n'), ((17739, 17798), 'oasys.widgets.congruence.checkEmptyString', 'congruence.checkEmptyString', (['self.EL1_FOR', '"""1st oe formula"""'], {}), "(self.EL1_FOR, '1st oe formula')\n", (17766, 17798), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((18476, 18535), 'oasys.widgets.congruence.checkEmptyString', 'congruence.checkEmptyString', (['self.EL2_FOR', '"""2nd oe formula"""'], {}), "(self.EL2_FOR, '2nd oe formula')\n", (18503, 18535), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((19213, 19272), 'oasys.widgets.congruence.checkEmptyString', 'congruence.checkEmptyString', (['self.EL3_FOR', '"""3rd oe formula"""'], {}), "(self.EL3_FOR, '3rd oe formula')\n", (19240, 19272), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((19950, 20009), 'oasys.widgets.congruence.checkEmptyString', 'congruence.checkEmptyString', (['self.EL4_FOR', '"""4th oe formula"""'], {}), "(self.EL4_FOR, '4th oe formula')\n", (19977, 20009), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((20688, 20747), 'oasys.widgets.congruence.checkEmptyString', 'congruence.checkEmptyString', (['self.EL5_FOR', '"""5th oe formula"""'], {}), "(self.EL5_FOR, '5th oe formula')\n", (20715, 20747), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((29155, 29173), 'numpy.ones_like', 'numpy.ones_like', (['p'], {}), '(p)\n', (29170, 29173), False, 'import numpy\n'), ((3907, 3925), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (3923, 3925), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((4271, 4289), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (4287, 4289), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((4635, 4653), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (4651, 4653), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((6025, 6043), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (6041, 6043), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((6389, 6407), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (6405, 6407), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((6753, 6771), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (6769, 6771), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((8153, 8171), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (8169, 8171), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((8517, 8535), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (8533, 8535), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((8881, 8899), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (8897, 8899), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((10281, 10299), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (10297, 10299), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((10645, 10663), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (10661, 10663), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((11009, 11027), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (11025, 11027), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((12408, 12426), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (12424, 12426), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((12772, 12790), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (12788, 12790), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((13136, 13154), 'PyQt5.QtGui.QDoubleValidator', 'QDoubleValidator', ([], {}), '()\n', (13152, 13154), False, 'from PyQt5.QtGui import QIntValidator, QDoubleValidator\n'), ((17875, 17954), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL1_THI', '"""1st oe filter thickness"""'], {}), "(self.EL1_THI, '1st oe filter thickness')\n", (17913, 17954), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((18612, 18691), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL2_THI', '"""2nd oe filter thickness"""'], {}), "(self.EL2_THI, '2nd oe filter thickness')\n", (18650, 18691), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((19349, 19428), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL3_THI', '"""3rd oe filter thickness"""'], {}), "(self.EL3_THI, '3rd oe filter thickness')\n", (19387, 19428), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((20086, 20165), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL4_THI', '"""4th oe filter thickness"""'], {}), "(self.EL4_THI, '4th oe filter thickness')\n", (20124, 20165), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((20824, 20903), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL5_THI', '"""5th oe filter thickness"""'], {}), "(self.EL5_THI, '5th oe filter thickness')\n", (20862, 20903), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((30697, 30723), 'numpy.zeros', 'numpy.zeros', (['energies.size'], {}), '(energies.size)\n', (30708, 30723), False, 'import numpy\n'), ((30928, 30954), 'numpy.zeros', 'numpy.zeros', (['energies.size'], {}), '(energies.size)\n', (30939, 30954), False, 'import numpy\n'), ((31134, 31307), 'orangecontrib.xoppy.util.xoppy_xraylib_util.reflectivity_fresnel', 'reflectivity_fresnel', ([], {'refraction_index_beta': 'beta', 'refraction_index_delta': 'delta', 'grazing_angle_mrad': 'angle[i]', 'roughness_rms_A': 'roughness[i]', 'photon_energy_ev': 'energies'}), '(refraction_index_beta=beta, refraction_index_delta=\n delta, grazing_angle_mrad=angle[i], roughness_rms_A=roughness[i],\n photon_energy_ev=energies)\n', (31154, 31307), False, 'from orangecontrib.xoppy.util.xoppy_xraylib_util import reflectivity_fresnel\n'), ((18032, 18107), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL1_ANG', '"""1st oe mirror angle"""'], {}), "(self.EL1_ANG, '1st oe mirror angle')\n", (18070, 18107), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((18139, 18210), 'oasys.widgets.congruence.checkPositiveNumber', 'congruence.checkPositiveNumber', (['self.EL1_ROU', '"""1st oe mirror roughness"""'], {}), "(self.EL1_ROU, '1st oe mirror roughness')\n", (18169, 18210), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((18769, 18844), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL2_ANG', '"""2nd oe mirror angle"""'], {}), "(self.EL2_ANG, '2nd oe mirror angle')\n", (18807, 18844), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((18876, 18947), 'oasys.widgets.congruence.checkPositiveNumber', 'congruence.checkPositiveNumber', (['self.EL2_ROU', '"""2nd oe mirror roughness"""'], {}), "(self.EL2_ROU, '2nd oe mirror roughness')\n", (18906, 18947), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((19506, 19581), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL3_ANG', '"""3rd oe mirror angle"""'], {}), "(self.EL3_ANG, '3rd oe mirror angle')\n", (19544, 19581), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((19613, 19684), 'oasys.widgets.congruence.checkPositiveNumber', 'congruence.checkPositiveNumber', (['self.EL3_ROU', '"""3rd oe mirror roughness"""'], {}), "(self.EL3_ROU, '3rd oe mirror roughness')\n", (19643, 19684), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((20243, 20318), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL4_ANG', '"""4th oe mirror angle"""'], {}), "(self.EL4_ANG, '4th oe mirror angle')\n", (20281, 20318), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((20350, 20421), 'oasys.widgets.congruence.checkPositiveNumber', 'congruence.checkPositiveNumber', (['self.EL4_ROU', '"""4th oe mirror roughness"""'], {}), "(self.EL4_ROU, '4th oe mirror roughness')\n", (20380, 20421), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((20981, 21056), 'oasys.widgets.congruence.checkStrictlyPositiveNumber', 'congruence.checkStrictlyPositiveNumber', (['self.EL5_ANG', '"""5th oe mirror angle"""'], {}), "(self.EL5_ANG, '5th oe mirror angle')\n", (21019, 21056), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((21088, 21159), 'oasys.widgets.congruence.checkPositiveNumber', 'congruence.checkPositiveNumber', (['self.EL5_ROU', '"""5th oe mirror roughness"""'], {}), "(self.EL5_ROU, '5th oe mirror roughness')\n", (21118, 21159), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((30438, 30488), 'xraylib.CS_Total_CP', 'xraylib.CS_Total_CP', (['substance[i]', '(energy / 1000.0)'], {}), '(substance[i], energy / 1000.0)\n', (30457, 30488), False, 'import xraylib\n'), ((30594, 30639), 'numpy.exp', 'numpy.exp', (['(-tmp * dens[i] * (thick[i] / 10.0))'], {}), '(-tmp * dens[i] * (thick[i] / 10.0))\n', (30603, 30639), False, 'import numpy\n'), ((30806, 30873), 'xraylib.Refractive_Index_Re', 'xraylib.Refractive_Index_Re', (['substance[i]', '(energy / 1000.0)', 'dens[i]'], {}), '(substance[i], energy / 1000.0, dens[i])\n', (30833, 30873), False, 'import xraylib\n'), ((31038, 31105), 'xraylib.Refractive_Index_Im', 'xraylib.Refractive_Index_Im', (['substance[i]', '(energy / 1000.0)', 'dens[i]'], {}), '(substance[i], energy / 1000.0, dens[i])\n', (31065, 31105), False, 'import xraylib\n'), ((29396, 29438), 'xraylib.SymbolToAtomicNumber', 'xraylib.SymbolToAtomicNumber', (['substance[i]'], {}), '(substance[i])\n', (29424, 29438), False, 'import xraylib\n'), ((18340, 18394), 'oasys.widgets.congruence.checkNumber', 'congruence.checkNumber', (['self.EL1_DEN', '"""1st oe density"""'], {}), "(self.EL1_DEN, '1st oe density')\n", (18362, 18394), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((19077, 19131), 'oasys.widgets.congruence.checkNumber', 'congruence.checkNumber', (['self.EL2_DEN', '"""2nd oe density"""'], {}), "(self.EL2_DEN, '2nd oe density')\n", (19099, 19131), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((19814, 19868), 'oasys.widgets.congruence.checkNumber', 'congruence.checkNumber', (['self.EL3_DEN', '"""3rd oe density"""'], {}), "(self.EL3_DEN, '3rd oe density')\n", (19836, 19868), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((20552, 20606), 'oasys.widgets.congruence.checkNumber', 'congruence.checkNumber', (['self.EL4_DEN', '"""4th oe density"""'], {}), "(self.EL4_DEN, '4th oe density')\n", (20574, 20606), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((21289, 21343), 'oasys.widgets.congruence.checkNumber', 'congruence.checkNumber', (['self.EL5_DEN', '"""5th oe density"""'], {}), "(self.EL5_DEN, '5th oe density')\n", (21311, 21343), False, 'from oasys.widgets import gui as oasysgui, congruence\n'), ((23987, 24024), 'numpy.ones_like', 'numpy.ones_like', (['transmittivity_total'], {}), '(transmittivity_total)\n', (24002, 24024), False, 'import numpy\n')] |
#!/usr/bin/env python3
import os
import json
import csv
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import cv2
from PIL import Image as PilImage
import numpy as np
import tf
from panoptic_mapping_msgs.msg import DetectronLabel, DetectronLabels
class DetectronPlayer(object):
def __init__(self):
""" Initialize ros node and read params """
# params
self.data_path = rospy.get_param(
'~data_path', '/home/lukas/Documents/Datasets/flat_dataset/run1')
self.global_frame_name = rospy.get_param('~global_frame_name', 'world')
# ROS
self.img_pub = rospy.Publisher("~predicted_image",
Image,
queue_size=10)
self.depth_pub = rospy.Publisher("~depth_image", Image, queue_size=10)
self.label_pub = rospy.Publisher("~labels",
DetectronLabels,
queue_size=10)
self.img_sub = rospy.Subscriber("~id_image_in",
Image,
self.img_callback,
queue_size=10)
self.tf_broadcaster = tf.TransformBroadcaster()
# setup
self.cv_bridge = CvBridge()
stamps_file = os.path.join(self.data_path, 'timestamps.csv')
self.stamp_to_id = {}
if not os.path.isfile(stamps_file):
rospy.logfatal("No timestamp file '%s' found." % stamps_file)
with open(stamps_file, 'r') as read_obj:
csv_reader = csv.reader(read_obj)
for row in csv_reader:
if row[0] == "ImageID":
continue
self.stamp_to_id[str(row[1])] = str(row[0])
def img_callback(self, id_img):
# Verify lookups for requiered datasets.
timestamp = str(
id_img.header.stamp.secs) + "%09d" % id_img.header.stamp.nsecs
if timestamp not in self.stamp_to_id:
rospy.logwarn(
"No prediction for message with timestamp '%s' found,"
" skipping image." % timestamp)
return
prediction_file = os.path.join(
self.data_path, self.stamp_to_id[timestamp] + "_predicted.png")
if not os.path.isfile(prediction_file):
rospy.logwarn("Could not find file '%s', skipping image." %
prediction_file)
return
labels_file = os.path.join(
self.data_path, self.stamp_to_id[timestamp] + "_labels.json")
if not os.path.isfile(labels_file):
rospy.logwarn("Could not find file '%s', skipping image." %
labels_file)
return
# Load and publish image.
cv_img = cv2.imread(prediction_file)
img_msg = self.cv_bridge.cv2_to_imgmsg(cv_img[:, :, 0], "8UC1")
img_msg.header.stamp = id_img.header.stamp
img_msg.header.frame_id = id_img.header.frame_id
self.img_pub.publish(img_msg)
# Load and publish labels.
label_msg = DetectronLabels()
label_msg.header.stamp = img_msg.header.stamp
with open(labels_file) as json_file:
data = json.load(json_file)
for d in data:
if 'instance_id' not in d:
d['instance_id'] = 0
if 'score' not in d:
d['score'] = 0
label = DetectronLabel()
label.id = d['id']
label.instance_id = d['instance_id']
label.is_thing = d['isthing']
label.category_id = d['category_id']
label.score = d['score']
label_msg.labels.append(label)
self.label_pub.publish(label_msg)
# Load and publish depth image. These are optional.
depth_file = os.path.join(self.data_path,
self.stamp_to_id[timestamp] + "_depth.tiff")
if os.path.isfile(depth_file):
cv_img = PilImage.open(depth_file)
img_msg = self.cv_bridge.cv2_to_imgmsg(np.array(cv_img), "32FC1")
img_msg.header.stamp = id_img.header.stamp
# img_msg.header.frame_id = id_img.header.frame_id
img_msg.header.frame_id = "test_frame"
self.depth_pub.publish(img_msg)
# Load and publish transform.
pose_file = os.path.join(self.data_path,
self.stamp_to_id[timestamp] + "_pose.txt")
if os.path.isfile(pose_file):
pose_data = [float(x) for x in open(pose_file, 'r').read().split()]
transform = np.eye(4)
for row in range(4):
for col in range(4):
transform[row, col] = pose_data[row * 4 + col]
rotation = tf.transformations.quaternion_from_matrix(transform)
self.tf_broadcaster.sendTransform(
(transform[0, 3], transform[1, 3], transform[2, 3]), rotation,
id_img.header.stamp, "test_frame", self.global_frame_name)
if __name__ == '__main__':
rospy.init_node('detectron_player', anonymous=True)
detectron_player = DetectronPlayer()
rospy.spin()
| [
"rospy.Subscriber",
"csv.reader",
"os.path.isfile",
"os.path.join",
"rospy.logwarn",
"panoptic_mapping_msgs.msg.DetectronLabels",
"rospy.init_node",
"tf.transformations.quaternion_from_matrix",
"rospy.logfatal",
"panoptic_mapping_msgs.msg.DetectronLabel",
"cv_bridge.CvBridge",
"json.load",
"... | [((5190, 5241), 'rospy.init_node', 'rospy.init_node', (['"""detectron_player"""'], {'anonymous': '(True)'}), "('detectron_player', anonymous=True)\n", (5205, 5241), False, 'import rospy\n'), ((5287, 5299), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (5297, 5299), False, 'import rospy\n'), ((433, 518), 'rospy.get_param', 'rospy.get_param', (['"""~data_path"""', '"""/home/lukas/Documents/Datasets/flat_dataset/run1"""'], {}), "('~data_path',\n '/home/lukas/Documents/Datasets/flat_dataset/run1')\n", (448, 518), False, 'import rospy\n'), ((561, 607), 'rospy.get_param', 'rospy.get_param', (['"""~global_frame_name"""', '"""world"""'], {}), "('~global_frame_name', 'world')\n", (576, 607), False, 'import rospy\n'), ((646, 703), 'rospy.Publisher', 'rospy.Publisher', (['"""~predicted_image"""', 'Image'], {'queue_size': '(10)'}), "('~predicted_image', Image, queue_size=10)\n", (661, 703), False, 'import rospy\n'), ((807, 860), 'rospy.Publisher', 'rospy.Publisher', (['"""~depth_image"""', 'Image'], {'queue_size': '(10)'}), "('~depth_image', Image, queue_size=10)\n", (822, 860), False, 'import rospy\n'), ((886, 944), 'rospy.Publisher', 'rospy.Publisher', (['"""~labels"""', 'DetectronLabels'], {'queue_size': '(10)'}), "('~labels', DetectronLabels, queue_size=10)\n", (901, 944), False, 'import rospy\n'), ((1050, 1123), 'rospy.Subscriber', 'rospy.Subscriber', (['"""~id_image_in"""', 'Image', 'self.img_callback'], {'queue_size': '(10)'}), "('~id_image_in', Image, self.img_callback, queue_size=10)\n", (1066, 1123), False, 'import rospy\n'), ((1274, 1299), 'tf.TransformBroadcaster', 'tf.TransformBroadcaster', ([], {}), '()\n', (1297, 1299), False, 'import tf\n'), ((1342, 1352), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (1350, 1352), False, 'from cv_bridge import CvBridge\n'), ((1375, 1421), 'os.path.join', 'os.path.join', (['self.data_path', '"""timestamps.csv"""'], {}), "(self.data_path, 'timestamps.csv')\n", (1387, 1421), False, 'import os\n'), ((2252, 2328), 'os.path.join', 'os.path.join', (['self.data_path', "(self.stamp_to_id[timestamp] + '_predicted.png')"], {}), "(self.data_path, self.stamp_to_id[timestamp] + '_predicted.png')\n", (2264, 2328), False, 'import os\n'), ((2546, 2620), 'os.path.join', 'os.path.join', (['self.data_path', "(self.stamp_to_id[timestamp] + '_labels.json')"], {}), "(self.data_path, self.stamp_to_id[timestamp] + '_labels.json')\n", (2558, 2620), False, 'import os\n'), ((2860, 2887), 'cv2.imread', 'cv2.imread', (['prediction_file'], {}), '(prediction_file)\n', (2870, 2887), False, 'import cv2\n'), ((3162, 3179), 'panoptic_mapping_msgs.msg.DetectronLabels', 'DetectronLabels', ([], {}), '()\n', (3177, 3179), False, 'from panoptic_mapping_msgs.msg import DetectronLabel, DetectronLabels\n'), ((3942, 4015), 'os.path.join', 'os.path.join', (['self.data_path', "(self.stamp_to_id[timestamp] + '_depth.tiff')"], {}), "(self.data_path, self.stamp_to_id[timestamp] + '_depth.tiff')\n", (3954, 4015), False, 'import os\n'), ((4061, 4087), 'os.path.isfile', 'os.path.isfile', (['depth_file'], {}), '(depth_file)\n', (4075, 4087), False, 'import os\n'), ((4486, 4557), 'os.path.join', 'os.path.join', (['self.data_path', "(self.stamp_to_id[timestamp] + '_pose.txt')"], {}), "(self.data_path, self.stamp_to_id[timestamp] + '_pose.txt')\n", (4498, 4557), False, 'import os\n'), ((4602, 4627), 'os.path.isfile', 'os.path.isfile', (['pose_file'], {}), '(pose_file)\n', (4616, 4627), False, 'import os\n'), ((1467, 1494), 'os.path.isfile', 'os.path.isfile', (['stamps_file'], {}), '(stamps_file)\n', (1481, 1494), False, 'import os\n'), ((1508, 1569), 'rospy.logfatal', 'rospy.logfatal', (['("No timestamp file \'%s\' found." % stamps_file)'], {}), '("No timestamp file \'%s\' found." % stamps_file)\n', (1522, 1569), False, 'import rospy\n'), ((1644, 1664), 'csv.reader', 'csv.reader', (['read_obj'], {}), '(read_obj)\n', (1654, 1664), False, 'import csv\n'), ((2073, 2179), 'rospy.logwarn', 'rospy.logwarn', (['("No prediction for message with timestamp \'%s\' found, skipping image." %\n timestamp)'], {}), '(\n "No prediction for message with timestamp \'%s\' found, skipping image." %\n timestamp)\n', (2086, 2179), False, 'import rospy\n'), ((2357, 2388), 'os.path.isfile', 'os.path.isfile', (['prediction_file'], {}), '(prediction_file)\n', (2371, 2388), False, 'import os\n'), ((2402, 2478), 'rospy.logwarn', 'rospy.logwarn', (['("Could not find file \'%s\', skipping image." % prediction_file)'], {}), '("Could not find file \'%s\', skipping image." % prediction_file)\n', (2415, 2478), False, 'import rospy\n'), ((2649, 2676), 'os.path.isfile', 'os.path.isfile', (['labels_file'], {}), '(labels_file)\n', (2663, 2676), False, 'import os\n'), ((2690, 2762), 'rospy.logwarn', 'rospy.logwarn', (['("Could not find file \'%s\', skipping image." % labels_file)'], {}), '("Could not find file \'%s\', skipping image." % labels_file)\n', (2703, 2762), False, 'import rospy\n'), ((3298, 3318), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (3307, 3318), False, 'import json\n'), ((4110, 4135), 'PIL.Image.open', 'PilImage.open', (['depth_file'], {}), '(depth_file)\n', (4123, 4135), True, 'from PIL import Image as PilImage\n'), ((4733, 4742), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (4739, 4742), True, 'import numpy as np\n'), ((4903, 4955), 'tf.transformations.quaternion_from_matrix', 'tf.transformations.quaternion_from_matrix', (['transform'], {}), '(transform)\n', (4944, 4955), False, 'import tf\n'), ((3526, 3542), 'panoptic_mapping_msgs.msg.DetectronLabel', 'DetectronLabel', ([], {}), '()\n', (3540, 3542), False, 'from panoptic_mapping_msgs.msg import DetectronLabel, DetectronLabels\n'), ((4187, 4203), 'numpy.array', 'np.array', (['cv_img'], {}), '(cv_img)\n', (4195, 4203), True, 'import numpy as np\n')] |
"""
The MIT License (MIT)
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
Computation of purity score with sklearn.
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sklearn.metrics import accuracy_score
import numpy as np
def purity_score(y_true, y_pred):
"""Purity score
To compute purity, each cluster is assigned to the class which is most frequent
in the cluster [1], and then the accuracy of this assignment is measured by counting
the number of correctly assigned documents and dividing by the number of documents.abs
Args:
y_true(np.ndarray): n*1 matrix Ground truth labels
y_pred(np.ndarray): n*1 matrix Predicted clusters
Returns:
float: Purity score
References:
[1] https://nlp.stanford.edu/IR-book/html/htmledition/evaluation-of-clustering-1.html
"""
# matrix which will hold the majority-voted labels
y_labeled_voted = np.zeros(y_true.shape)
labels = np.unique(y_true)
# We set the number of bins to be n_classes+2 so that
# we count the actual occurence of classes between two consecutive bin
# the bigger being excluded [bin_i, bin_i+1[
bins = np.concatenate((labels, [np.max(labels)+1]), axis=0)
for cluster in np.unique(y_pred):
hist, _ = np.histogram(y_true[y_pred==cluster], bins=bins)
# Find the most present label in the cluster
winner = np.argmax(hist)
y_labeled_voted[y_pred==cluster] = winner
return accuracy_score(y_true, y_labeled_voted) | [
"numpy.argmax",
"sklearn.metrics.accuracy_score",
"numpy.zeros",
"numpy.histogram",
"numpy.max",
"numpy.unique"
] | [((1940, 1962), 'numpy.zeros', 'np.zeros', (['y_true.shape'], {}), '(y_true.shape)\n', (1948, 1962), True, 'import numpy as np\n'), ((1976, 1993), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (1985, 1993), True, 'import numpy as np\n'), ((2261, 2278), 'numpy.unique', 'np.unique', (['y_pred'], {}), '(y_pred)\n', (2270, 2278), True, 'import numpy as np\n'), ((2499, 2538), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_labeled_voted'], {}), '(y_true, y_labeled_voted)\n', (2513, 2538), False, 'from sklearn.metrics import accuracy_score\n'), ((2298, 2348), 'numpy.histogram', 'np.histogram', (['y_true[y_pred == cluster]'], {'bins': 'bins'}), '(y_true[y_pred == cluster], bins=bins)\n', (2310, 2348), True, 'import numpy as np\n'), ((2417, 2432), 'numpy.argmax', 'np.argmax', (['hist'], {}), '(hist)\n', (2426, 2432), True, 'import numpy as np\n'), ((2213, 2227), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (2219, 2227), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
from tensorflow import keras
import model
# -------------------- cerate dataset:
print("Created dataset: 10000 samples")
# sample variables
n_samples = 10000
def noise(n_samples) : return np.random.normal(0.0, 0.1, size=n_samples)
A = noise(n_samples)
X = np.exp(-0.5 * A * A) * np.sin(2 * A) + noise(n_samples)
Y = 0.5 * np.exp(-0.5 * A * A) * np.sin(2 * A) + 0.1 * X + 0.02 * noise(n_samples)
data_for_A = (A - np.mean(A))/np.var(A)
data_for_X = (X - np.mean(X))/np.var(X)
data_for_Y = (Y - np.mean(Y))/np.var(Y)
# 80% of the sample are used for training, and 20% for testing
rnd = np.random.uniform(0,1,n_samples)
train_idx = np.array(np.where(rnd < 0.8)).flatten()
test_idx = np.array(np.where(rnd >= 0.8)).flatten()
train_A = data_for_A[train_idx]
train_X = data_for_X[train_idx]
train_Y = data_for_Y[train_idx]
test_A = data_for_A[test_idx]
test_X = data_for_X[test_idx]
test_Y = data_for_Y[test_idx]
data_train = np.vstack((data_for_X[train_idx], data_for_A[train_idx])).T
data_test = np.vstack((data_for_X[test_idx], data_for_A[test_idx])).T
# -------------------- cerate first model:
print("Train model without HSCIC:")
# define optimizer, loss function, and weights for losses
optimizer = keras.optimizers.Adam(learning_rate=0.001)
loss_function = tf.keras.losses.MeanSquaredError()
# fit the first model over training examples (no HSCIC loss)
autoencoder = model.Model(model_loss_weight=1.0,
kl_loss_weight=1.0,
hscic_loss_weight=0.0)
autoencoder.compile(optimizer=optimizer,
loss=loss_function,
run_eagerly=True)
autoencoder.fit(x = data_train,
y = train_Y,
batch_size=125,
epochs = 5)
y_predict = autoencoder.predict(data_test)
# fit the second model over training examples (with HSCIC loss)
print("Train model with HSCIC:")
autoencoder = model.Model(model_loss_weight=0.0001,
kl_loss_weight=0.0001,
hscic_loss_weight=1.0)
autoencoder.compile(optimizer=optimizer,
loss=loss_function,
run_eagerly=True)
autoencoder.fit(x = data_train,
y = train_Y,
batch_size=125,
epochs = 5)
y_predict_hscic = autoencoder.predict(data_test)
# ----------- plot stuff
import matplotlib.pyplot as plt
plt.figure(1)
plt.scatter(test_A, test_X, c = test_Y, alpha = 1.0, cmap = 'Spectral')
plt.xlabel('A')
plt.ylabel('X')
plt.suptitle('Original Dataset')
plt.figure(2)
plt.scatter(test_A, test_X, c = y_predict, alpha = 1.0, cmap = 'Spectral')
plt.xlabel('A')
plt.ylabel('X')
plt.suptitle('Model Predictions without HSCIC')
plt.figure(3)
plt.scatter(test_A, test_X, c = y_predict_hscic, alpha = 1.0, cmap = 'Spectral')
plt.xlabel('A')
plt.ylabel('X')
plt.suptitle('Model Predictions with HSCIC')
plt.show()
| [
"numpy.random.uniform",
"tensorflow.keras.losses.MeanSquaredError",
"matplotlib.pyplot.show",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.scatter",
"model.Model",
"matplotlib.pyplot.figure",
"numpy.sin",
"tensorflow.keras.optimizers.Adam",
"numpy.exp",
"numpy.random.normal",
"numpy.mean",... | [((632, 666), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'n_samples'], {}), '(0, 1, n_samples)\n', (649, 666), True, 'import numpy as np\n'), ((1254, 1296), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (1275, 1296), False, 'from tensorflow import keras\n'), ((1313, 1347), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (1345, 1347), True, 'import tensorflow as tf\n'), ((1424, 1501), 'model.Model', 'model.Model', ([], {'model_loss_weight': '(1.0)', 'kl_loss_weight': '(1.0)', 'hscic_loss_weight': '(0.0)'}), '(model_loss_weight=1.0, kl_loss_weight=1.0, hscic_loss_weight=0.0)\n', (1435, 1501), False, 'import model\n'), ((1950, 2037), 'model.Model', 'model.Model', ([], {'model_loss_weight': '(0.0001)', 'kl_loss_weight': '(0.0001)', 'hscic_loss_weight': '(1.0)'}), '(model_loss_weight=0.0001, kl_loss_weight=0.0001,\n hscic_loss_weight=1.0)\n', (1961, 2037), False, 'import model\n'), ((2434, 2447), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2444, 2447), True, 'import matplotlib.pyplot as plt\n'), ((2448, 2513), 'matplotlib.pyplot.scatter', 'plt.scatter', (['test_A', 'test_X'], {'c': 'test_Y', 'alpha': '(1.0)', 'cmap': '"""Spectral"""'}), "(test_A, test_X, c=test_Y, alpha=1.0, cmap='Spectral')\n", (2459, 2513), True, 'import matplotlib.pyplot as plt\n'), ((2520, 2535), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""A"""'], {}), "('A')\n", (2530, 2535), True, 'import matplotlib.pyplot as plt\n'), ((2536, 2551), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""X"""'], {}), "('X')\n", (2546, 2551), True, 'import matplotlib.pyplot as plt\n'), ((2552, 2584), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Original Dataset"""'], {}), "('Original Dataset')\n", (2564, 2584), True, 'import matplotlib.pyplot as plt\n'), ((2585, 2598), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (2595, 2598), True, 'import matplotlib.pyplot as plt\n'), ((2599, 2667), 'matplotlib.pyplot.scatter', 'plt.scatter', (['test_A', 'test_X'], {'c': 'y_predict', 'alpha': '(1.0)', 'cmap': '"""Spectral"""'}), "(test_A, test_X, c=y_predict, alpha=1.0, cmap='Spectral')\n", (2610, 2667), True, 'import matplotlib.pyplot as plt\n'), ((2674, 2689), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""A"""'], {}), "('A')\n", (2684, 2689), True, 'import matplotlib.pyplot as plt\n'), ((2690, 2705), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""X"""'], {}), "('X')\n", (2700, 2705), True, 'import matplotlib.pyplot as plt\n'), ((2706, 2753), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Model Predictions without HSCIC"""'], {}), "('Model Predictions without HSCIC')\n", (2718, 2753), True, 'import matplotlib.pyplot as plt\n'), ((2754, 2767), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (2764, 2767), True, 'import matplotlib.pyplot as plt\n'), ((2768, 2842), 'matplotlib.pyplot.scatter', 'plt.scatter', (['test_A', 'test_X'], {'c': 'y_predict_hscic', 'alpha': '(1.0)', 'cmap': '"""Spectral"""'}), "(test_A, test_X, c=y_predict_hscic, alpha=1.0, cmap='Spectral')\n", (2779, 2842), True, 'import matplotlib.pyplot as plt\n'), ((2849, 2864), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""A"""'], {}), "('A')\n", (2859, 2864), True, 'import matplotlib.pyplot as plt\n'), ((2865, 2880), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""X"""'], {}), "('X')\n", (2875, 2880), True, 'import matplotlib.pyplot as plt\n'), ((2881, 2925), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Model Predictions with HSCIC"""'], {}), "('Model Predictions with HSCIC')\n", (2893, 2925), True, 'import matplotlib.pyplot as plt\n'), ((2926, 2936), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2934, 2936), True, 'import matplotlib.pyplot as plt\n'), ((233, 275), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(0.1)'], {'size': 'n_samples'}), '(0.0, 0.1, size=n_samples)\n', (249, 275), True, 'import numpy as np\n'), ((472, 481), 'numpy.var', 'np.var', (['A'], {}), '(A)\n', (478, 481), True, 'import numpy as np\n'), ((512, 521), 'numpy.var', 'np.var', (['X'], {}), '(X)\n', (518, 521), True, 'import numpy as np\n'), ((552, 561), 'numpy.var', 'np.var', (['Y'], {}), '(Y)\n', (558, 561), True, 'import numpy as np\n'), ((972, 1029), 'numpy.vstack', 'np.vstack', (['(data_for_X[train_idx], data_for_A[train_idx])'], {}), '((data_for_X[train_idx], data_for_A[train_idx]))\n', (981, 1029), True, 'import numpy as np\n'), ((1045, 1100), 'numpy.vstack', 'np.vstack', (['(data_for_X[test_idx], data_for_A[test_idx])'], {}), '((data_for_X[test_idx], data_for_A[test_idx]))\n', (1054, 1100), True, 'import numpy as np\n'), ((302, 322), 'numpy.exp', 'np.exp', (['(-0.5 * A * A)'], {}), '(-0.5 * A * A)\n', (308, 322), True, 'import numpy as np\n'), ((325, 338), 'numpy.sin', 'np.sin', (['(2 * A)'], {}), '(2 * A)\n', (331, 338), True, 'import numpy as np\n'), ((460, 470), 'numpy.mean', 'np.mean', (['A'], {}), '(A)\n', (467, 470), True, 'import numpy as np\n'), ((500, 510), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (507, 510), True, 'import numpy as np\n'), ((540, 550), 'numpy.mean', 'np.mean', (['Y'], {}), '(Y)\n', (547, 550), True, 'import numpy as np\n'), ((391, 404), 'numpy.sin', 'np.sin', (['(2 * A)'], {}), '(2 * A)\n', (397, 404), True, 'import numpy as np\n'), ((686, 705), 'numpy.where', 'np.where', (['(rnd < 0.8)'], {}), '(rnd < 0.8)\n', (694, 705), True, 'import numpy as np\n'), ((739, 759), 'numpy.where', 'np.where', (['(rnd >= 0.8)'], {}), '(rnd >= 0.8)\n', (747, 759), True, 'import numpy as np\n'), ((368, 388), 'numpy.exp', 'np.exp', (['(-0.5 * A * A)'], {}), '(-0.5 * A * A)\n', (374, 388), True, 'import numpy as np\n')] |
import tensorflow as tf
if tf.__version__ == '1.5.0':
import keras
from keras.engine import Layer
from tensorflow import sparse_tensor_to_dense as to_dense
else:
from tensorflow import keras
from tensorflow.keras.layers import Layer
from tensorflow.sparse import to_dense
import numpy as np
from .utils import apply_box_deltas_graph
from .utils import clip_boxes_graph
from .utils import norm_boxes_graph
#NOTE: None of this will get exported to CoreML. This is only useful for python inference, and for CoreML to determine
#input and output shapes.
def refine_detections_graph(rois,
classifications,
window,
BBOX_STD_DEV,
DETECTION_MIN_CONFIDENCE,
DETECTION_MAX_INSTANCES,
DETECTION_NMS_THRESHOLD):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in normalized coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where
coordinates are normalized.
"""
# Class IDs per ROI
class_ids = tf.to_int32(classifications[:,4])
# Class-specific bounding box deltas
deltas_specific = classifications[:,0:4]
# Class probability of the top class of each ROI
class_scores = classifications[:,5]
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * BBOX_STD_DEV)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if DETECTION_MIN_CONFIDENCE:
conf_keep = tf.where(class_scores >= DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.gather(pre_nms_rois, ixs),
tf.gather(pre_nms_scores, ixs),
max_output_size=DETECTION_MAX_INSTANCES,
iou_threshold=DETECTION_NMS_THRESHOLD)
# Map indices
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
class_keep = tf.pad(class_keep, [(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,dtype=tf.int64, parallel_iterations=1)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = to_dense(keep)[0]
# Keep top detections
roi_count = DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are normalized.
detections = tf.concat([
tf.gather(refined_rois, keep),
tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = DETECTION_MAX_INSTANCES - tf.shape(detections)[0]
detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT")
return tf.reshape(detections, (-1,6))
class DetectionLayer(Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are normalized.
"""
def __init__(self,
max_detections,
bounding_box_std_dev,
detection_min_confidence,
detection_nms_threshold,
image_shape,
**kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.max_detections = max_detections
self.bounding_box_std_dev = bounding_box_std_dev
self.detection_min_confidence = detection_min_confidence
self.detection_nms_threshold = detection_nms_threshold
self.image_shape = image_shape
assert max_detections != None
def get_config(self):
config = super(DetectionLayer, self).get_config()
config['max_detections'] = self.max_detections
config['bounding_box_std_dev'] = self.bounding_box_std_dev
config['detection_min_confidence'] = self.detection_min_confidence
config['detection_nms_threshold'] = self.detection_nms_threshold
config['image_shape'] = self.image_shape
return config
def call(self, inputs):
def refine_detections(inputs):
rois = inputs[0]
classifications = inputs[1]
if len(inputs) > 2:
image_bounding_box = inputs[2]
else:
image_bounding_box = tf.convert_to_tensor(np.array([0, 0, 1, 1]),
dtype=tf.float32)
return refine_detections_graph(rois,
classifications,
image_bounding_box,
np.array(self.bounding_box_std_dev),
self.detection_min_confidence,
self.max_detections,
self.detection_nms_threshold)
detections = keras.layers.Lambda(lambda x: tf.map_fn(refine_detections, x, dtype=tf.float32))(inputs)
return detections
def compute_output_shape(self, input_shape):
assert isinstance(input_shape, list)
roi_shape, classifications_shape = input_shape
return (roi_shape[0],self.max_detections, 6)
| [
"tensorflow.sparse.to_dense",
"tensorflow.gather",
"tensorflow.nn.top_k",
"tensorflow.pad",
"tensorflow.reshape",
"tensorflow.to_int32",
"tensorflow.shape",
"tensorflow.equal",
"numpy.array",
"tensorflow.where",
"tensorflow.map_fn",
"tensorflow.unique",
"tensorflow.expand_dims"
] | [((1542, 1576), 'tensorflow.to_int32', 'tf.to_int32', (['classifications[:, 4]'], {}), '(classifications[:, 4])\n', (1553, 1576), True, 'import tensorflow as tf\n'), ((2546, 2572), 'tensorflow.gather', 'tf.gather', (['class_ids', 'keep'], {}), '(class_ids, keep)\n', (2555, 2572), True, 'import tensorflow as tf\n'), ((2594, 2623), 'tensorflow.gather', 'tf.gather', (['class_scores', 'keep'], {}), '(class_scores, keep)\n', (2603, 2623), True, 'import tensorflow as tf\n'), ((2643, 2672), 'tensorflow.gather', 'tf.gather', (['refined_rois', 'keep'], {}), '(refined_rois, keep)\n', (2652, 2672), True, 'import tensorflow as tf\n'), ((3744, 3836), 'tensorflow.map_fn', 'tf.map_fn', (['nms_keep_map', 'unique_pre_nms_class_ids'], {'dtype': 'tf.int64', 'parallel_iterations': '(1)'}), '(nms_keep_map, unique_pre_nms_class_ids, dtype=tf.int64,\n parallel_iterations=1)\n', (3753, 3836), True, 'import tensorflow as tf\n'), ((3907, 3933), 'tensorflow.reshape', 'tf.reshape', (['nms_keep', '[-1]'], {}), '(nms_keep, [-1])\n', (3917, 3933), True, 'import tensorflow as tf\n'), ((4301, 4330), 'tensorflow.gather', 'tf.gather', (['class_scores', 'keep'], {}), '(class_scores, keep)\n', (4310, 4330), True, 'import tensorflow as tf\n'), ((4484, 4508), 'tensorflow.gather', 'tf.gather', (['keep', 'top_ids'], {}), '(keep, top_ids)\n', (4493, 4508), True, 'import tensorflow as tf\n'), ((4954, 5004), 'tensorflow.pad', 'tf.pad', (['detections', '[(0, gap), (0, 0)]', '"""CONSTANT"""'], {}), "(detections, [(0, gap), (0, 0)], 'CONSTANT')\n", (4960, 5004), True, 'import tensorflow as tf\n'), ((5016, 5047), 'tensorflow.reshape', 'tf.reshape', (['detections', '(-1, 6)'], {}), '(detections, (-1, 6))\n', (5026, 5047), True, 'import tensorflow as tf\n'), ((2122, 2145), 'tensorflow.where', 'tf.where', (['(class_ids > 0)'], {}), '(class_ids > 0)\n', (2130, 2145), True, 'import tensorflow as tf\n'), ((2706, 2734), 'tensorflow.unique', 'tf.unique', (['pre_nms_class_ids'], {}), '(pre_nms_class_ids)\n', (2715, 2734), True, 'import tensorflow as tf\n'), ((3467, 3534), 'tensorflow.pad', 'tf.pad', (['class_keep', '[(0, gap)]'], {'mode': '"""CONSTANT"""', 'constant_values': '(-1)'}), "(class_keep, [(0, gap)], mode='CONSTANT', constant_values=-1)\n", (3473, 3534), True, 'import tensorflow as tf\n'), ((4092, 4115), 'tensorflow.expand_dims', 'tf.expand_dims', (['keep', '(0)'], {}), '(keep, 0)\n', (4106, 4115), True, 'import tensorflow as tf\n'), ((4153, 4180), 'tensorflow.expand_dims', 'tf.expand_dims', (['nms_keep', '(0)'], {}), '(nms_keep, 0)\n', (4167, 4180), True, 'import tensorflow as tf\n'), ((4193, 4207), 'tensorflow.sparse.to_dense', 'to_dense', (['keep'], {}), '(keep)\n', (4201, 4207), False, 'from tensorflow.sparse import to_dense\n'), ((4414, 4469), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['class_scores_keep'], {'k': 'num_keep', 'sorted': '(True)'}), '(class_scores_keep, k=num_keep, sorted=True)\n', (4425, 4469), True, 'import tensorflow as tf\n'), ((2243, 2293), 'tensorflow.where', 'tf.where', (['(class_scores >= DETECTION_MIN_CONFIDENCE)'], {}), '(class_scores >= DETECTION_MIN_CONFIDENCE)\n', (2251, 2293), True, 'import tensorflow as tf\n'), ((2340, 2363), 'tensorflow.expand_dims', 'tf.expand_dims', (['keep', '(0)'], {}), '(keep, 0)\n', (2354, 2363), True, 'import tensorflow as tf\n'), ((2405, 2433), 'tensorflow.expand_dims', 'tf.expand_dims', (['conf_keep', '(0)'], {}), '(conf_keep, 0)\n', (2419, 2433), True, 'import tensorflow as tf\n'), ((2450, 2464), 'tensorflow.sparse.to_dense', 'to_dense', (['keep'], {}), '(keep)\n', (2458, 2464), False, 'from tensorflow.sparse import to_dense\n'), ((3043, 3071), 'tensorflow.gather', 'tf.gather', (['pre_nms_rois', 'ixs'], {}), '(pre_nms_rois, ixs)\n', (3052, 3071), True, 'import tensorflow as tf\n'), ((3089, 3119), 'tensorflow.gather', 'tf.gather', (['pre_nms_scores', 'ixs'], {}), '(pre_nms_scores, ixs)\n', (3098, 3119), True, 'import tensorflow as tf\n'), ((3292, 3318), 'tensorflow.gather', 'tf.gather', (['ixs', 'class_keep'], {}), '(ixs, class_keep)\n', (3301, 3318), True, 'import tensorflow as tf\n'), ((3969, 3992), 'tensorflow.where', 'tf.where', (['(nms_keep > -1)'], {}), '(nms_keep > -1)\n', (3977, 3992), True, 'import tensorflow as tf\n'), ((4357, 4384), 'tensorflow.shape', 'tf.shape', (['class_scores_keep'], {}), '(class_scores_keep)\n', (4365, 4384), True, 'import tensorflow as tf\n'), ((4644, 4673), 'tensorflow.gather', 'tf.gather', (['refined_rois', 'keep'], {}), '(refined_rois, keep)\n', (4653, 4673), True, 'import tensorflow as tf\n'), ((4913, 4933), 'tensorflow.shape', 'tf.shape', (['detections'], {}), '(detections)\n', (4921, 4933), True, 'import tensorflow as tf\n'), ((2911, 2948), 'tensorflow.equal', 'tf.equal', (['pre_nms_class_ids', 'class_id'], {}), '(pre_nms_class_ids, class_id)\n', (2919, 2948), True, 'import tensorflow as tf\n'), ((3422, 3442), 'tensorflow.shape', 'tf.shape', (['class_keep'], {}), '(class_keep)\n', (3430, 3442), True, 'import tensorflow as tf\n'), ((4749, 4778), 'tensorflow.gather', 'tf.gather', (['class_scores', 'keep'], {}), '(class_scores, keep)\n', (4758, 4778), True, 'import tensorflow as tf\n'), ((6955, 6990), 'numpy.array', 'np.array', (['self.bounding_box_std_dev'], {}), '(self.bounding_box_std_dev)\n', (6963, 6990), True, 'import numpy as np\n'), ((4695, 4721), 'tensorflow.gather', 'tf.gather', (['class_ids', 'keep'], {}), '(class_ids, keep)\n', (4704, 4721), True, 'import tensorflow as tf\n'), ((6639, 6661), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (6647, 6661), True, 'import numpy as np\n'), ((7255, 7304), 'tensorflow.map_fn', 'tf.map_fn', (['refine_detections', 'x'], {'dtype': 'tf.float32'}), '(refine_detections, x, dtype=tf.float32)\n', (7264, 7304), True, 'import tensorflow as tf\n')] |
"""Test animations."""
# pylint: disable=wrong-import-position
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from celluloid import Camera
def test_single():
"""Test plt.figure()"""
fig = plt.figure()
camera = Camera(fig)
for _ in range(10):
plt.plot(range(5))
plt.plot(-np.arange(5))
artists = camera.snap()
assert len(artists) == 2
# pylint: disable=protected-access
assert sum(len(x) for x in camera._photos) == 2 * 10
anim = camera.animate()
assert len(list(anim.frame_seq)) == 10
def test_two_axes():
"""Test subplots."""
fig, axes = plt.subplots(2)
camera = Camera(fig)
axes[0].plot(np.zeros(100))
axes[1].plot(np.zeros(100))
artists = camera.snap()
assert len(artists) == 2
axes[0].plot(np.ones(100))
axes[1].plot(np.ones(100))
artists = camera.snap()
# pylint: disable=protected-access
assert sum(len(x) for x in camera._photos) == 4
anim = camera.animate()
assert len(list(anim.frame_seq)) == 2
def test_legends():
"""Test subplots."""
camera = Camera(plt.figure())
plt.legend(plt.plot(range(5)), ['hello'])
artists = camera.snap()
assert len(artists) == 2
plt.legend(plt.plot(range(5)), ['world'])
artists = camera.snap()
assert len(artists) == 2
# pylint: disable=protected-access
assert camera._photos[0][1].texts[0]._text == 'hello'
assert camera._photos[1][1].texts[0]._text == 'world'
# pylint: disable=protected-access
assert sum(len(x) for x in camera._photos) == 4
anim = camera.animate()
assert len(list(anim.frame_seq)) == 2
def test_images():
"""Test subplots."""
camera = Camera(plt.figure())
plt.imshow(np.ones((5, 5)))
artists = camera.snap()
assert len(artists) == 1
plt.imshow(np.zeros((5, 5)))
artists = camera.snap()
assert len(artists) == 1
# pylint: disable=protected-access
assert sum(len(x) for x in camera._photos) == 2
anim = camera.animate()
assert len(list(anim.frame_seq)) == 2
| [
"celluloid.Camera",
"numpy.zeros",
"numpy.ones",
"matplotlib.pyplot.figure",
"matplotlib.use",
"numpy.arange",
"matplotlib.pyplot.subplots"
] | [((100, 121), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (114, 121), False, 'import matplotlib\n'), ((248, 260), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (258, 260), True, 'from matplotlib import pyplot as plt\n'), ((274, 285), 'celluloid.Camera', 'Camera', (['fig'], {}), '(fig)\n', (280, 285), False, 'from celluloid import Camera\n'), ((668, 683), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (680, 683), True, 'from matplotlib import pyplot as plt\n'), ((697, 708), 'celluloid.Camera', 'Camera', (['fig'], {}), '(fig)\n', (703, 708), False, 'from celluloid import Camera\n'), ((726, 739), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (734, 739), True, 'import numpy as np\n'), ((758, 771), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (766, 771), True, 'import numpy as np\n'), ((848, 860), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (855, 860), True, 'import numpy as np\n'), ((879, 891), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (886, 891), True, 'import numpy as np\n'), ((1151, 1163), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1161, 1163), True, 'from matplotlib import pyplot as plt\n'), ((1758, 1770), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1768, 1770), True, 'from matplotlib import pyplot as plt\n'), ((1788, 1803), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (1795, 1803), True, 'import numpy as np\n'), ((1878, 1894), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {}), '((5, 5))\n', (1886, 1894), True, 'import numpy as np\n'), ((356, 368), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (365, 368), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Joy
import numpy as np
class G29Control():
def __init__(self):
# Mapping
# 0 -> 450 degree == 0 -> 1
ratio_constant = 7.854 # orientation:steering = 1:1
self.steering_ratio = ratio_constant / 2.0 # orientation:steering = 1:2
# ROS
# publish to the mobile_base controller
self.g29_pub = rospy.Publisher('/base_controller/cmd_vel', Twist, queue_size=1)
# subscribe to joystick inputs on topic "joy"
self.joy_subscriber = rospy.Subscriber("G29/joy", Joy, self.g29_callback)
def g29_callback(self, data):
steering_input = data.axes[0]
linear_vel_fwd = data.axes[2]
linear_vel_bwd = data.axes[3]
# Publish
twist = Twist()
twist.linear.x = (linear_vel_fwd - linear_vel_bwd) / 2.0 # [-1, 1]
twist.angular.z = self.steering_ratio * steering_input
if twist.linear.x < 0:
twist.angular.z = -twist.angular.z # flip direction when going backwards
twist.angular.z = np.clip(twist.angular.z, -1.0, 1.0) # limit to [-1, 1]
self.g29_pub.publish(twist)
if __name__ == "__main__":
# launch falcon control
rospy.init_node('g29_control_node')
g29_control = G29Control()
r = rospy.Rate(60)
while not rospy.is_shutdown():
r.sleep()
| [
"rospy.Subscriber",
"rospy.Publisher",
"geometry_msgs.msg.Twist",
"rospy.Rate",
"numpy.clip",
"rospy.is_shutdown",
"rospy.init_node"
] | [((1296, 1331), 'rospy.init_node', 'rospy.init_node', (['"""g29_control_node"""'], {}), "('g29_control_node')\n", (1311, 1331), False, 'import rospy\n'), ((1372, 1386), 'rospy.Rate', 'rospy.Rate', (['(60)'], {}), '(60)\n', (1382, 1386), False, 'import rospy\n'), ((453, 517), 'rospy.Publisher', 'rospy.Publisher', (['"""/base_controller/cmd_vel"""', 'Twist'], {'queue_size': '(1)'}), "('/base_controller/cmd_vel', Twist, queue_size=1)\n", (468, 517), False, 'import rospy\n'), ((602, 653), 'rospy.Subscriber', 'rospy.Subscriber', (['"""G29/joy"""', 'Joy', 'self.g29_callback'], {}), "('G29/joy', Joy, self.g29_callback)\n", (618, 653), False, 'import rospy\n'), ((847, 854), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (852, 854), False, 'from geometry_msgs.msg import Twist\n'), ((1135, 1170), 'numpy.clip', 'np.clip', (['twist.angular.z', '(-1.0)', '(1.0)'], {}), '(twist.angular.z, -1.0, 1.0)\n', (1142, 1170), True, 'import numpy as np\n'), ((1401, 1420), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (1418, 1420), False, 'import rospy\n')] |
"""
A module for testing/debugging call routines
"""
import logging
import os
import traceback
import numpy as np
from datetime import datetime
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def print_errors(func):
def new_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
logger.error(traceback.print_exc())
raise e
return new_func
def print_arr_info(state):
for varname, arr in state.items():
logger.info(f"{varname}: shape[{arr.shape}] isfortran[{np.isfortran(arr)}]")
def print_location_ping(state):
logger.info("Ping reached!")
def dump_state(state):
DUMP_PATH = str(os.environ.get("STATE_DUMP_PATH", "."))
try:
rank = state.get("rank")
except KeyError:
logger.info("Could not save state. No rank included in state.")
return
time_str = datetime.now().strftime("%Y%m%d.%H%M%S")
filename = f"state_dump.{time_str}.tile{int(rank.squeeze()[0])}.npz"
outfile = os.path.join(DUMP_PATH, filename)
logger.info(f"Dumping state to {outfile}")
np.savez(outfile, **state)
| [
"traceback.print_exc",
"numpy.isfortran",
"datetime.datetime.now",
"os.environ.get",
"numpy.savez",
"os.path.join",
"logging.getLogger"
] | [((154, 181), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (171, 181), False, 'import logging\n'), ((1050, 1083), 'os.path.join', 'os.path.join', (['DUMP_PATH', 'filename'], {}), '(DUMP_PATH, filename)\n', (1062, 1083), False, 'import os\n'), ((1135, 1161), 'numpy.savez', 'np.savez', (['outfile'], {}), '(outfile, **state)\n', (1143, 1161), True, 'import numpy as np\n'), ((715, 753), 'os.environ.get', 'os.environ.get', (['"""STATE_DUMP_PATH"""', '"""."""'], {}), "('STATE_DUMP_PATH', '.')\n", (729, 753), False, 'import os\n'), ((922, 936), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (934, 936), False, 'from datetime import datetime\n'), ((383, 404), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (402, 404), False, 'import traceback\n'), ((579, 596), 'numpy.isfortran', 'np.isfortran', (['arr'], {}), '(arr)\n', (591, 596), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# # What's this TensorFlow business?
#
# You've written a lot of code in this assignment to provide a whole host of neural network functionality. Dropout, Batch Norm, and 2D convolutions are some of the workhorses of deep learning in computer vision. You've also worked hard to make your code efficient and vectorized.
#
# For the last part of this assignment, though, we're going to leave behind your beautiful codebase and instead migrate to one of two popular deep learning frameworks: in this instance, TensorFlow (or PyTorch, if you choose to work with that notebook).
# #### What is it?
# TensorFlow is a system for executing computational graphs over Tensor objects, with native support for performing backpropogation for its Variables. In it, we work with Tensors which are n-dimensional arrays analogous to the numpy ndarray.
#
# #### Why?
#
# * Our code will now run on GPUs! Much faster training. Writing your own modules to run on GPUs is beyond the scope of this class, unfortunately.
# * We want you to be ready to use one of these frameworks for your project so you can experiment more efficiently than if you were writing every feature you want to use by hand.
# * We want you to stand on the shoulders of giants! TensorFlow and PyTorch are both excellent frameworks that will make your lives a lot easier, and now that you understand their guts, you are free to use them :)
# * We want you to be exposed to the sort of deep learning code you might run into in academia or industry.
# ## How will I learn TensorFlow?
#
# TensorFlow has many excellent tutorials available, including those from [Google themselves](https://www.tensorflow.org/get_started/get_started).
#
# Otherwise, this notebook will walk you through much of what you need to do to train models in TensorFlow. See the end of the notebook for some links to helpful tutorials if you want to learn more or need further clarification on topics that aren't fully explained here.
#
# **NOTE: This notebook is meant to teach you the latest version of Tensorflow 2.0. Most examples on the web today are still in 1.x, so be careful not to confuse the two when looking up documentation**.
#
# ## Install Tensorflow 2.0
# Tensorflow 2.0 is still not in a fully 100% stable release, but it's still usable and more intuitive than TF 1.x. Please make sure you have it installed before moving on in this notebook! Here are some steps to get started:
#
# 1. Have the latest version of Anaconda installed on your machine.
# 2. Create a new conda environment starting from Python 3.7. In this setup example, we'll call it `tf_20_env`.
# 3. Run the command: `source activate tf_20_env`
# 4. Then pip install TF 2.0 as described here: https://www.tensorflow.org/install/pip
#
# A guide on creating Anaconda enviornments: https://uoa-eresearch.github.io/eresearch-cookbook/recipe/2014/11/20/conda/
#
# This will give you an new enviornemnt to play in TF 2.0. Generally, if you plan to also use TensorFlow in your other projects, you might also want to keep a seperate Conda environment or virtualenv in Python 3.7 that has Tensorflow 1.9, so you can switch back and forth at will.
# # Table of Contents
#
# This notebook has 5 parts. We will walk through TensorFlow at **three different levels of abstraction**, which should help you better understand it and prepare you for working on your project.
#
# 1. Part I, Preparation: load the CIFAR-10 dataset.
# 2. Part II, Barebone TensorFlow: **Abstraction Level 1**, we will work directly with low-level TensorFlow graphs.
# 3. Part III, Keras Model API: **Abstraction Level 2**, we will use `tf.keras.Model` to define arbitrary neural network architecture.
# 4. Part IV, Keras Sequential + Functional API: **Abstraction Level 3**, we will use `tf.keras.Sequential` to define a linear feed-forward network very conveniently, and then explore the functional libraries for building unique and uncommon models that require more flexibility.
# 5. Part V, CIFAR-10 open-ended challenge: please implement your own network to get as high accuracy as possible on CIFAR-10. You can experiment with any layer, optimizer, hyperparameters or other advanced features.
#
# We will discuss Keras in more detail later in the notebook.
#
# Here is a table of comparison:
#
# | API | Flexibility | Convenience |
# |---------------|-------------|-------------|
# | Barebone | High | Low |
# | `tf.keras.Model` | High | Medium |
# | `tf.keras.Sequential` | Low | High |
# # Part I: Preparation
#
# First, we load the CIFAR-10 dataset. This might take a few minutes to download the first time you run it, but after that the files should be cached on disk and loading should be faster.
#
# In previous parts of the assignment we used CS231N-specific code to download and read the CIFAR-10 dataset; however the `tf.keras.datasets` package in TensorFlow provides prebuilt utility functions for loading many common datasets.
#
# For the purposes of this assignment we will still write our own code to preprocess the data and iterate through it in minibatches. The `tf.data` package in TensorFlow provides tools for automating this process, but working with this package adds extra complication and is beyond the scope of this notebook. However using `tf.data` can be much more efficient than the simple approach used in this notebook, so you should consider using it for your project.
# In[1]:
import os
import tensorflow as tf
import numpy as np
import math
import timeit
import matplotlib.pyplot as plt
#config = tf.ConfigProto()
#config.gpu_options.allow_growth = True
#get_ipython().run_line_magic('matplotlib', 'inline')
gpu_devices = tf.config.experimental.list_physical_devices('GPU')
for device in gpu_devices:
tf.config.experimental.set_memory_growth(device, True)
# In[2]:
def load_cifar10(num_training=49000, num_validation=1000, num_test=10000):
"""
Fetch the CIFAR-10 dataset from the web and perform preprocessing to prepare
it for the two-layer neural net classifier. These are the same steps as
we used for the SVM, but condensed to a single function.
"""
# Load the raw CIFAR-10 dataset and use appropriate data types and shapes
cifar10 = tf.keras.datasets.cifar10.load_data()
(X_train, y_train), (X_test, y_test) = cifar10
X_train = np.asarray(X_train, dtype=np.float32)
y_train = np.asarray(y_train, dtype=np.int32).flatten()
X_test = np.asarray(X_test, dtype=np.float32)
y_test = np.asarray(y_test, dtype=np.int32).flatten()
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean pixel and divide by std
mean_pixel = X_train.mean(axis=(0, 1, 2), keepdims=True)
std_pixel = X_train.std(axis=(0, 1, 2), keepdims=True)
X_train = (X_train - mean_pixel) / std_pixel
X_val = (X_val - mean_pixel) / std_pixel
X_test = (X_test - mean_pixel) / std_pixel
return X_train, y_train, X_val, y_val, X_test, y_test
# If there are errors with SSL downloading involving self-signed certificates,
# it may be that your Python version was recently installed on the current machine.
# See: https://github.com/tensorflow/tensorflow/issues/10779
# To fix, run the command: /Applications/Python\ 3.7/Install\ Certificates.command
# ...replacing paths as necessary.
# Invoke the above function to get our data.
NHW = (0, 1, 2)
X_train, y_train, X_val, y_val, X_test, y_test = load_cifar10()
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape, y_train.dtype)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# In[3]:
class Dataset(object):
def __init__(self, X, y, batch_size, shuffle=False):
"""
Construct a Dataset object to iterate over data X and labels y
Inputs:
- X: Numpy array of data, of any shape
- y: Numpy array of labels, of any shape but with y.shape[0] == X.shape[0]
- batch_size: Integer giving number of elements per minibatch
- shuffle: (optional) Boolean, whether to shuffle the data on each epoch
"""
assert X.shape[0] == y.shape[0], 'Got different numbers of data and labels'
self.X, self.y = X, y
self.batch_size, self.shuffle = batch_size, shuffle
def __iter__(self):
N, B = self.X.shape[0], self.batch_size
idxs = np.arange(N)
if self.shuffle:
np.random.shuffle(idxs)
return iter((self.X[i:i+B], self.y[i:i+B]) for i in range(0, N, B))
train_dset = Dataset(X_train, y_train, batch_size=64, shuffle=True)
val_dset = Dataset(X_val, y_val, batch_size=64, shuffle=False)
test_dset = Dataset(X_test, y_test, batch_size=64)
# In[4]:
# We can iterate through a dataset like this:
for t, (x, y) in enumerate(train_dset):
print(t, x.shape, y.shape)
if t > 5: break
# You can optionally **use GPU by setting the flag to True below**. It's not neccessary to use a GPU for this assignment; if you are working on Google Cloud then we recommend that you do not use a GPU, as it will be significantly more expensive.
# In[5]:
# Set up some global variables
USE_GPU = True
if USE_GPU:
device = '/device:GPU:0'
else:
device = '/cpu:0'
# Constant to control how often we print when training models
print_every = 100
print('Using device: ', device)
# # Part II: Barebones TensorFlow
# TensorFlow ships with various high-level APIs which make it very convenient to define and train neural networks; we will cover some of these constructs in Part III and Part IV of this notebook. In this section we will start by building a model with basic TensorFlow constructs to help you better understand what's going on under the hood of the higher-level APIs.
#
# **"Barebones Tensorflow" is important to understanding the building blocks of TensorFlow, but much of it involves concepts from TensorFlow 1.x.** We will be working with legacy modules such as `tf.Variable`.
#
# Therefore, please read and understand the differences between legacy (1.x) TF and the new (2.0) TF.
#
# ### Historical background on TensorFlow 1.x
#
# TensorFlow 1.x is primarily a framework for working with **static computational graphs**. Nodes in the computational graph are Tensors which will hold n-dimensional arrays when the graph is run; edges in the graph represent functions that will operate on Tensors when the graph is run to actually perform useful computation.
#
# Before Tensorflow 2.0, we had to configure the graph into two phases. There are plenty of tutorials online that explain this two-step process. The process generally looks like the following for TF 1.x:
# 1. **Build a computational graph that describes the computation that you want to perform**. This stage doesn't actually perform any computation; it just builds up a symbolic representation of your computation. This stage will typically define one or more `placeholder` objects that represent inputs to the computational graph.
# 2. **Run the computational graph many times.** Each time the graph is run (e.g. for one gradient descent step) you will specify which parts of the graph you want to compute, and pass a `feed_dict` dictionary that will give concrete values to any `placeholder`s in the graph.
#
# ### The new paradigm in Tensorflow 2.0
# Now, with Tensorflow 2.0, we can simply adopt a functional form that is more Pythonic and similar in spirit to PyTorch and direct Numpy operation. Instead of the 2-step paradigm with computation graphs, making it (among other things) easier to debug TF code. You can read more details at https://www.tensorflow.org/guide/eager.
#
# The main difference between the TF 1.x and 2.0 approach is that the 2.0 approach doesn't make use of `tf.Session`, `tf.run`, `placeholder`, `feed_dict`. To get more details of what's different between the two version and how to convert between the two, check out the official migration guide: https://www.tensorflow.org/alpha/guide/migration_guide
#
# Later, in the rest of this notebook we'll focus on this new, simpler approach.
# ### TensorFlow warmup: Flatten Function
#
# We can see this in action by defining a simple `flatten` function that will reshape image data for use in a fully-connected network.
#
# In TensorFlow, data for convolutional feature maps is typically stored in a Tensor of shape N x H x W x C where:
#
# - N is the number of datapoints (minibatch size)
# - H is the height of the feature map
# - W is the width of the feature map
# - C is the number of channels in the feature map
#
# This is the right way to represent the data when we are doing something like a 2D convolution, that needs spatial understanding of where the intermediate features are relative to each other. When we use fully connected affine layers to process the image, however, we want each datapoint to be represented by a single vector -- it's no longer useful to segregate the different channels, rows, and columns of the data. So, we use a "flatten" operation to collapse the `H x W x C` values per representation into a single long vector.
#
# Notice the `tf.reshape` call has the target shape as `(N, -1)`, meaning it will reshape/keep the first dimension to be N, and then infer as necessary what the second dimension is in the output, so we can collapse the remaining dimensions from the input properly.
#
# **NOTE**: TensorFlow and PyTorch differ on the default Tensor layout; TensorFlow uses N x H x W x C but PyTorch uses N x C x H x W.
# In[6]:
def flatten(x):
"""
Input:
- TensorFlow Tensor of shape (N, D1, ..., DM)
Output:
- TensorFlow Tensor of shape (N, D1 * ... * DM)
"""
N = tf.shape(x)[0]
return tf.reshape(x, (N, -1))
# In[7]:
def test_flatten():
# Construct concrete values of the input data x using numpy
x_np = np.arange(24).reshape((2, 3, 4))
print('x_np:\n', x_np, '\n')
# Compute a concrete output value.
x_flat_np = flatten(x_np)
print('x_flat_np:\n', x_flat_np, '\n')
test_flatten()
# ### Barebones TensorFlow: Define a Two-Layer Network
# We will now implement our first neural network with TensorFlow: a fully-connected ReLU network with two hidden layers and no biases on the CIFAR10 dataset. For now we will use only low-level TensorFlow operators to define the network; later we will see how to use the higher-level abstractions provided by `tf.keras` to simplify the process.
#
# We will define the forward pass of the network in the function `two_layer_fc`; this will accept TensorFlow Tensors for the inputs and weights of the network, and return a TensorFlow Tensor for the scores.
#
# After defining the network architecture in the `two_layer_fc` function, we will test the implementation by checking the shape of the output.
#
# **It's important that you read and understand this implementation.**
# In[8]:
def two_layer_fc(x, params):
"""
A fully-connected neural network; the architecture is:
fully-connected layer -> ReLU -> fully connected layer.
Note that we only need to define the forward pass here; TensorFlow will take
care of computing the gradients for us.
The input to the network will be a minibatch of data, of shape
(N, d1, ..., dM) where d1 * ... * dM = D. The hidden layer will have H units,
and the output layer will produce scores for C classes.
Inputs:
- x: A TensorFlow Tensor of shape (N, d1, ..., dM) giving a minibatch of
input data.
- params: A list [w1, w2] of TensorFlow Tensors giving weights for the
network, where w1 has shape (D, H) and w2 has shape (H, C).
Returns:
- scores: A TensorFlow Tensor of shape (N, C) giving classification scores
for the input data x.
"""
w1, w2 = params # Unpack the parameters
x = flatten(x) # Flatten the input; now x has shape (N, D)
h = tf.nn.relu(tf.matmul(x, w1)) # Hidden layer: h has shape (N, H)
scores = tf.matmul(h, w2) # Compute scores of shape (N, C)
return scores
# In[9]:
def two_layer_fc_test():
hidden_layer_size = 42
# Scoping our TF operations under a tf.device context manager
# lets us tell TensorFlow where we want these Tensors to be
# multiplied and/or operated on, e.g. on a CPU or a GPU.
with tf.device(device):
x = tf.zeros((64, 32, 32, 3))
w1 = tf.zeros((32 * 32 * 3, hidden_layer_size))
w2 = tf.zeros((hidden_layer_size, 10))
# Call our two_layer_fc function for the forward pass of the network.
scores = two_layer_fc(x, [w1, w2])
print(scores.shape)
two_layer_fc_test()
# ### Barebones TensorFlow: Three-Layer ConvNet
# Here you will complete the implementation of the function `three_layer_convnet` which will perform the forward pass of a three-layer convolutional network. The network should have the following architecture:
#
# 1. A convolutional layer (with bias) with `channel_1` filters, each with shape `KW1 x KH1`, and zero-padding of two
# 2. ReLU nonlinearity
# 3. A convolutional layer (with bias) with `channel_2` filters, each with shape `KW2 x KH2`, and zero-padding of one
# 4. ReLU nonlinearity
# 5. Fully-connected layer with bias, producing scores for `C` classes.
#
# **HINT**: For convolutions: https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/nn/conv2d; be careful with padding!
#
# **HINT**: For biases: https://www.tensorflow.org/performance/xla/broadcasting
# In[10]:
def three_layer_convnet(x, params):
"""
A three-layer convolutional network with the architecture described above.
Inputs:
- x: A TensorFlow Tensor of shape (N, H, W, 3) giving a minibatch of images
- params: A list of TensorFlow Tensors giving the weights and biases for the
network; should contain the following:
- conv_w1: TensorFlow Tensor of shape (KH1, KW1, 3, channel_1) giving
weights for the first convolutional layer.
- conv_b1: TensorFlow Tensor of shape (channel_1,) giving biases for the
first convolutional layer.
- conv_w2: TensorFlow Tensor of shape (KH2, KW2, channel_1, channel_2)
giving weights for the second convolutional layer
- conv_b2: TensorFlow Tensor of shape (channel_2,) giving biases for the
second convolutional layer.
- fc_w: TensorFlow Tensor giving weights for the fully-connected layer.
Can you figure out what the shape should be?
- fc_b: TensorFlow Tensor giving biases for the fully-connected layer.
Can you figure out what the shape should be?
"""
conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b = params
scores = None
############################################################################
# TODO: Implement the forward pass for the three-layer ConvNet. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# 1.
w1x1 = tf.nn.conv2d(input=x, filters=conv_w1, strides = 1, padding=[[0, 0], [2, 2], [2, 2], [0, 0]], data_format='NHWC', name="Prvi")
conv_layer_1 = tf.nn.bias_add(w1x1, conv_b1)
# 2.
reLu1 = tf.nn.relu(conv_layer_1)
# 3.
w2x2 = tf.nn.conv2d(input=reLu1, filters=conv_w2, strides = 1, padding=[[0, 0], [1, 1], [1, 1], [0, 0]], data_format='NHWC', name="Drugi")
conv_layer_2 = tf.nn.bias_add(w2x2, conv_b2)
# 4.
relu2 = tf.nn.relu(conv_layer_2)
# 5.
relu2flatten = flatten(relu2)
w3x3 = tf.matmul(relu2flatten, fc_w)
scores = tf.nn.bias_add(w3x3, fc_b)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
return scores
# After defing the forward pass of the three-layer ConvNet above, run the following cell to test your implementation. Like the two-layer network, we run the graph on a batch of zeros just to make sure the function doesn't crash, and produces outputs of the correct shape.
#
# When you run this function, `scores_np` should have shape `(64, 10)`.
# In[11]:
def three_layer_convnet_test():
with tf.device(device):
x = tf.zeros((64, 32, 32, 3))
conv_w1 = tf.zeros((5, 5, 3, 6))
conv_b1 = tf.zeros((6,))
conv_w2 = tf.zeros((3, 3, 6, 9))
conv_b2 = tf.zeros((9,))
fc_w = tf.zeros((32 * 32 * 9, 10))
fc_b = tf.zeros((10,))
params = [conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b]
scores = three_layer_convnet(x, params)
# Inputs to convolutional layers are 4-dimensional arrays with shape
# [batch_size, height, width, channels]
print('scores_np has shape: ', scores.shape)
three_layer_convnet_test()
# ### Barebones TensorFlow: Training Step
#
# We now define the `training_step` function performs a single training step. This will take three basic steps:
#
# 1. Compute the loss
# 2. Compute the gradient of the loss with respect to all network weights
# 3. Make a weight update step using (stochastic) gradient descent.
#
#
# We need to use a few new TensorFlow functions to do all of this:
# - For computing the cross-entropy loss we'll use `tf.nn.sparse_softmax_cross_entropy_with_logits`: https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits
#
# - For averaging the loss across a minibatch of data we'll use `tf.reduce_mean`:
# https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/reduce_mean
#
# - For computing gradients of the loss with respect to the weights we'll use `tf.GradientTape` (useful for Eager execution): https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/GradientTape
#
# - We'll mutate the weight values stored in a TensorFlow Tensor using `tf.assign_sub` ("sub" is for subtraction): https://www.tensorflow.org/api_docs/python/tf/assign_sub
#
# In[12]:
def training_step(model_fn, x, y, params, learning_rate):
with tf.GradientTape() as tape:
scores = model_fn(x, params) # Forward pass of the model
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=scores)
total_loss = tf.reduce_mean(loss)
grad_params = tape.gradient(total_loss, params)
# Make a vanilla gradient descent step on all of the model parameters
# Manually update the weights using assign_sub()
for w, grad_w in zip(params, grad_params):
w.assign_sub(learning_rate * grad_w)
return total_loss
# In[13]:
def train_part2(model_fn, init_fn, learning_rate):
"""
Train a model on CIFAR-10.
Inputs:
- model_fn: A Python function that performs the forward pass of the model
using TensorFlow; it should have the following signature:
scores = model_fn(x, params) where x is a TensorFlow Tensor giving a
minibatch of image data, params is a list of TensorFlow Tensors holding
the model weights, and scores is a TensorFlow Tensor of shape (N, C)
giving scores for all elements of x.
- init_fn: A Python function that initializes the parameters of the model.
It should have the signature params = init_fn() where params is a list
of TensorFlow Tensors holding the (randomly initialized) weights of the
model.
- learning_rate: Python float giving the learning rate to use for SGD.
"""
params = init_fn() # Initialize the model parameters
for t, (x_np, y_np) in enumerate(train_dset):
# Run the graph on a batch of training data.
loss = training_step(model_fn, x_np, y_np, params, learning_rate)
# Periodically print the loss and check accuracy on the val set.
if t % print_every == 0:
print('Iteration %d, loss = %.4f' % (t, loss))
check_accuracy(val_dset, x_np, model_fn, params)
# In[14]:
def check_accuracy(dset, x, model_fn, params):
"""
Check accuracy on a classification model, e.g. for validation.
Inputs:
- dset: A Dataset object against which to check accuracy
- x: A TensorFlow placeholder Tensor where input images should be fed
- model_fn: the Model we will be calling to make predictions on x
- params: parameters for the model_fn to work with
Returns: Nothing, but prints the accuracy of the model
"""
num_correct, num_samples = 0, 0
for x_batch, y_batch in dset:
scores_np = model_fn(x_batch, params).numpy()
y_pred = scores_np.argmax(axis=1)
num_samples += x_batch.shape[0]
num_correct += (y_pred == y_batch).sum()
acc = float(num_correct) / num_samples
print('Got %d / %d correct (%.2f%%)' % (num_correct, num_samples, 100 * acc))
# ### Barebones TensorFlow: Initialization
# We'll use the following utility method to initialize the weight matrices for our models using Kaiming's normalization method.
#
# [1] He et al, *Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification
# *, ICCV 2015, https://arxiv.org/abs/1502.01852
# In[15]:
def create_matrix_with_kaiming_normal(shape):
if len(shape) == 2:
fan_in, fan_out = shape[0], shape[1]
elif len(shape) == 4:
fan_in, fan_out = np.prod(shape[:3]), shape[3]
return tf.keras.backend.random_normal(shape) * np.sqrt(2.0 / fan_in)
# ### Barebones TensorFlow: Train a Two-Layer Network
# We are finally ready to use all of the pieces defined above to train a two-layer fully-connected network on CIFAR-10.
#
# We just need to define a function to initialize the weights of the model, and call `train_part2`.
#
# Defining the weights of the network introduces another important piece of TensorFlow API: `tf.Variable`. A TensorFlow Variable is a Tensor whose value is stored in the graph and persists across runs of the computational graph; however unlike constants defined with `tf.zeros` or `tf.random_normal`, the values of a Variable can be mutated as the graph runs; these mutations will persist across graph runs. Learnable parameters of the network are usually stored in Variables.
#
# You don't need to tune any hyperparameters, but you should achieve validation accuracies above 40% after one epoch of training.
# In[16]:
def two_layer_fc_init():
"""
Initialize the weights of a two-layer network, for use with the
two_layer_network function defined above.
You can use the `create_matrix_with_kaiming_normal` helper!
Inputs: None
Returns: A list of:
- w1: TensorFlow tf.Variable giving the weights for the first layer
- w2: TensorFlow tf.Variable giving the weights for the second layer
"""
hidden_layer_size = 4000
w1 = tf.Variable(create_matrix_with_kaiming_normal((3 * 32 * 32, 4000)))
w2 = tf.Variable(create_matrix_with_kaiming_normal((4000, 10)))
return [w1, w2]
learning_rate = 1e-2
train_part2(two_layer_fc, two_layer_fc_init, learning_rate)
# ### Barebones TensorFlow: Train a three-layer ConvNet
# We will now use TensorFlow to train a three-layer ConvNet on CIFAR-10.
#
# You need to implement the `three_layer_convnet_init` function. Recall that the architecture of the network is:
#
# 1. Convolutional layer (with bias) with 32 5x5 filters, with zero-padding 2
# 2. ReLU
# 3. Convolutional layer (with bias) with 16 3x3 filters, with zero-padding 1
# 4. ReLU
# 5. Fully-connected layer (with bias) to compute scores for 10 classes
#
# You don't need to do any hyperparameter tuning, but you should see validation accuracies above 43% after one epoch of training.
# In[17]:
def three_layer_convnet_init():
"""
Initialize the weights of a Three-Layer ConvNet, for use with the
three_layer_convnet function defined above.
You can use the `create_matrix_with_kaiming_normal` helper!
Inputs: None
Returns a list containing:
- conv_w1: TensorFlow tf.Variable giving weights for the first conv layer
- conv_b1: TensorFlow tf.Variable giving biases for the first conv layer
- conv_w2: TensorFlow tf.Variable giving weights for the second conv layer
- conv_b2: TensorFlow tf.Variable giving biases for the second conv layer
- fc_w: TensorFlow tf.Variable giving weights for the fully-connected layer
- fc_b: TensorFlow tf.Variable giving biases for the fully-connected layer
"""
params = None
############################################################################
# TODO: Initialize the parameters of the three-layer network. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# network; should contain the following:
# - conv_w1: TensorFlow Tensor of shape (KH1, KW1, 3, channel_1) giving
# weights for the first convolutional layer.
# - conv_b1: TensorFlow Tensor of shape (channel_1,) giving biases for the
# first convolutional layer.
# - conv_w2: TensorFlow Tensor of shape (KH2, KW2, channel_1, channel_2)
# giving weights for the second convolutional layer
# - conv_b2: TensorFlow Tensor of shape (channel_2,) giving biases for the
# second convolutional layer.
# - fc_w: TensorFlow Tensor giving weights for the fully-connected layer.
# Can you figure out what the shape should be?
# - fc_b: TensorFlow Tensor giving biases for the fully-connected layer.
# Can you figure out what the shape should be?
#
# conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b = params
conv_w1 = tf.Variable(create_matrix_with_kaiming_normal((5, 5, 3, 32)))
conv_b1 = tf.Variable(tf.zeros((32,)))
conv_w2 = tf.Variable(create_matrix_with_kaiming_normal((3, 3, 32, 16)))
conv_b2 = tf.Variable(tf.zeros((16,)))
fc_w = tf.Variable(create_matrix_with_kaiming_normal((32 * 32 * 16, 10)))
fc_b = tf.Variable(tf.zeros((10,)))
params = [conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b]
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
return params
learning_rate = 3e-3
train_part2(three_layer_convnet, three_layer_convnet_init, learning_rate)
# # Part III: Keras Model Subclassing API
#
# Implementing a neural network using the low-level TensorFlow API is a good way to understand how TensorFlow works, but it's a little inconvenient - we had to manually keep track of all Tensors holding learnable parameters. This was fine for a small network, but could quickly become unweildy for a large complex model.
#
# Fortunately TensorFlow 2.0 provides higher-level APIs such as `tf.keras` which make it easy to build models out of modular, object-oriented layers. Further, TensorFlow 2.0 uses eager execution that evaluates operations immediately, without explicitly constructing any computational graphs. This makes it easy to write and debug models, and reduces the boilerplate code.
#
# In this part of the notebook we will define neural network models using the `tf.keras.Model` API. To implement your own model, you need to do the following:
#
# 1. Define a new class which subclasses `tf.keras.Model`. Give your class an intuitive name that describes it, like `TwoLayerFC` or `ThreeLayerConvNet`.
# 2. In the initializer `__init__()` for your new class, define all the layers you need as class attributes. The `tf.keras.layers` package provides many common neural-network layers, like `tf.keras.layers.Dense` for fully-connected layers and `tf.keras.layers.Conv2D` for convolutional layers. Under the hood, these layers will construct `Variable` Tensors for any learnable parameters. **Warning**: Don't forget to call `super(YourModelName, self).__init__()` as the first line in your initializer!
# 3. Implement the `call()` method for your class; this implements the forward pass of your model, and defines the *connectivity* of your network. Layers defined in `__init__()` implement `__call__()` so they can be used as function objects that transform input Tensors into output Tensors. Don't define any new layers in `call()`; any layers you want to use in the forward pass should be defined in `__init__()`.
#
# After you define your `tf.keras.Model` subclass, you can instantiate it and use it like the model functions from Part II.
#
# ### Keras Model Subclassing API: Two-Layer Network
#
# Here is a concrete example of using the `tf.keras.Model` API to define a two-layer network. There are a few new bits of API to be aware of here:
#
# We use an `Initializer` object to set up the initial values of the learnable parameters of the layers; in particular `tf.initializers.VarianceScaling` gives behavior similar to the Kaiming initialization method we used in Part II. You can read more about it here: https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/initializers/VarianceScaling
#
# We construct `tf.keras.layers.Dense` objects to represent the two fully-connected layers of the model. In addition to multiplying their input by a weight matrix and adding a bias vector, these layer can also apply a nonlinearity for you. For the first layer we specify a ReLU activation function by passing `activation='relu'` to the constructor; the second layer uses softmax activation function. Finally, we use `tf.keras.layers.Flatten` to flatten the output from the previous fully-connected layer.
# In[18]:
class TwoLayerFC(tf.keras.Model):
def __init__(self, hidden_size, num_classes):
super(TwoLayerFC, self).__init__()
initializer = tf.initializers.VarianceScaling(scale=2.0)
self.fc1 = tf.keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=initializer)
self.fc2 = tf.keras.layers.Dense(num_classes, activation='softmax',
kernel_initializer=initializer)
self.flatten = tf.keras.layers.Flatten()
def call(self, x, training=False):
x = self.flatten(x)
x = self.fc1(x)
x = self.fc2(x)
return x
def test_TwoLayerFC():
""" A small unit test to exercise the TwoLayerFC model above. """
input_size, hidden_size, num_classes = 50, 42, 10
x = tf.zeros((64, input_size))
model = TwoLayerFC(hidden_size, num_classes)
with tf.device(device):
scores = model(x)
print(scores.shape)
test_TwoLayerFC()
# ### Keras Model Subclassing API: Three-Layer ConvNet
# Now it's your turn to implement a three-layer ConvNet using the `tf.keras.Model` API. Your model should have the same architecture used in Part II:
#
# 1. Convolutional layer with 5 x 5 kernels, with zero-padding of 2
# 2. ReLU nonlinearity
# 3. Convolutional layer with 3 x 3 kernels, with zero-padding of 1
# 4. ReLU nonlinearity
# 5. Fully-connected layer to give class scores
# 6. Softmax nonlinearity
#
# You should initialize the weights of your network using the same initialization method as was used in the two-layer network above.
#
# **Hint**: Refer to the documentation for `tf.keras.layers.Conv2D` and `tf.keras.layers.Dense`:
#
# +
# https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/Dense
# In[19]:
class ThreeLayerConvNet(tf.keras.Model):
def __init__(self, channel_1, channel_2, num_classes):
super(ThreeLayerConvNet, self).__init__()
########################################################################
# TODO: Implement the __init__ method for a three-layer ConvNet. You #
# should instantiate layer objects to be used in the forward pass. #
########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
initializer = tf.initializers.VarianceScaling(scale=2.0)
self.conv1 = tf.keras.layers.Conv2D(filters=channel_1, kernel_size=(5,5), strides=1, padding='same', activation='relu', use_bias=True, kernel_initializer=initializer, bias_initializer='zeros')
self.conv2 = tf.keras.layers.Conv2D(filters=channel_2, kernel_size=(3,3), strides=1, padding='same', activation='relu', use_bias=True, kernel_initializer=initializer, bias_initializer='zeros')
self.flatten = tf.keras.layers.Flatten()
self.fc1 = tf.keras.layers.Dense(num_classes, activation='softmax',
kernel_initializer=initializer)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
########################################################################
# END OF YOUR CODE #
########################################################################
def call(self, x, training=False):
scores = None
########################################################################
# TODO: Implement the forward pass for a three-layer ConvNet. You #
# should use the layer objects defined in the __init__ method. #
########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
c1 = self.conv1(x)
c2 = self.conv2(c1)
c2 = self.flatten(c2)
scores = self.fc1(c2)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
########################################################################
# END OF YOUR CODE #
########################################################################
return scores
# Once you complete the implementation of the `ThreeLayerConvNet` above you can run the following to ensure that your implementation does not crash and produces outputs of the expected shape.
# In[20]:
def test_ThreeLayerConvNet():
channel_1, channel_2, num_classes = 12, 8, 10
model = ThreeLayerConvNet(channel_1, channel_2, num_classes)
with tf.device(device):
x = tf.zeros((64, 3, 32, 32))
scores = model(x)
print(scores.shape)
test_ThreeLayerConvNet()
# ### Keras Model Subclassing API: Eager Training
#
# While keras models have a builtin training loop (using the `model.fit`), sometimes you need more customization. Here's an example, of a training loop implemented with eager execution.
#
# In particular, notice `tf.GradientTape`. Automatic differentiation is used in the backend for implementing backpropagation in frameworks like TensorFlow. During eager execution, `tf.GradientTape` is used to trace operations for computing gradients later. A particular `tf.GradientTape` can only compute one gradient; subsequent calls to tape will throw a runtime error.
#
# TensorFlow 2.0 ships with easy-to-use built-in metrics under `tf.keras.metrics` module. Each metric is an object, and we can use `update_state()` to add observations and `reset_state()` to clear all observations. We can get the current result of a metric by calling `result()` on the metric object.
# In[21]:
def train_part34(model_init_fn, optimizer_init_fn, num_epochs=1, is_training=False):
"""
Simple training loop for use with models defined using tf.keras. It trains
a model for one epoch on the CIFAR-10 training set and periodically checks
accuracy on the CIFAR-10 validation set.
Inputs:
- model_init_fn: A function that takes no parameters; when called it
constructs the model we want to train: model = model_init_fn()
- optimizer_init_fn: A function which takes no parameters; when called it
constructs the Optimizer object we will use to optimize the model:
optimizer = optimizer_init_fn()
- num_epochs: The number of epochs to train for
Returns: Nothing, but prints progress during trainingn
"""
with tf.device(device):
# Compute the loss like we did in Part II
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy()
model = model_init_fn()
optimizer = optimizer_init_fn()
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
val_loss = tf.keras.metrics.Mean(name='val_loss')
val_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='val_accuracy')
t = 0
for epoch in range(num_epochs):
# Reset the metrics - https://www.tensorflow.org/alpha/guide/migration_guide#new-style_metrics
train_loss.reset_states()
train_accuracy.reset_states()
for x_np, y_np in train_dset:
with tf.GradientTape() as tape:
# Use the model function to build the forward pass.
scores = model(x_np, training=is_training)
loss = loss_fn(y_np, scores)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
# Update the metrics
train_loss.update_state(loss)
train_accuracy.update_state(y_np, scores)
if t % print_every == 0:
val_loss.reset_states()
val_accuracy.reset_states()
for test_x, test_y in val_dset:
# During validation at end of epoch, training set to False
prediction = model(test_x, training=False)
t_loss = loss_fn(test_y, prediction)
val_loss.update_state(t_loss)
val_accuracy.update_state(test_y, prediction)
template = 'Iteration {}, Epoch {}, Loss: {}, Accuracy: {}, Val Loss: {}, Val Accuracy: {}'
print (template.format(t, epoch+1,
train_loss.result(),
train_accuracy.result()*100,
val_loss.result(),
val_accuracy.result()*100))
t += 1
# ### Keras Model Subclassing API: Train a Two-Layer Network
# We can now use the tools defined above to train a two-layer network on CIFAR-10. We define the `model_init_fn` and `optimizer_init_fn` that construct the model and optimizer respectively when called. Here we want to train the model using stochastic gradient descent with no momentum, so we construct a `tf.keras.optimizers.SGD` function; you can [read about it here](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/optimizers/SGD).
#
# You don't need to tune any hyperparameters here, but you should achieve validation accuracies above 40% after one epoch of training.
# In[22]:
hidden_size, num_classes = 4000, 10
learning_rate = 1e-2
def model_init_fn():
return TwoLayerFC(hidden_size, num_classes)
def optimizer_init_fn():
return tf.keras.optimizers.SGD(learning_rate=learning_rate)
train_part34(model_init_fn, optimizer_init_fn)
# ### Keras Model Subclassing API: Train a Three-Layer ConvNet
# Here you should use the tools we've defined above to train a three-layer ConvNet on CIFAR-10. Your ConvNet should use 32 filters in the first convolutional layer and 16 filters in the second layer.
#
# To train the model you should use gradient descent with Nesterov momentum 0.9.
#
# **HINT**: https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/optimizers/SGD
#
# You don't need to perform any hyperparameter tuning, but you should achieve validation accuracies above 50% after training for one epoch.
# In[23]:
learning_rate = 3e-3
channel_1, channel_2, num_classes = 32, 16, 10
def model_init_fn():
model = None
############################################################################
# TODO: Complete the implementation of model_fn. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
model = ThreeLayerConvNet(channel_1, channel_2, num_classes)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
return model
def optimizer_init_fn():
optimizer = None
############################################################################
# TODO: Complete the implementation of model_fn. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9, nesterov=True)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
return optimizer
train_part34(model_init_fn, optimizer_init_fn)
# # Part IV: Keras Sequential API
# In Part III we introduced the `tf.keras.Model` API, which allows you to define models with any number of learnable layers and with arbitrary connectivity between layers.
#
# However for many models you don't need such flexibility - a lot of models can be expressed as a sequential stack of layers, with the output of each layer fed to the next layer as input. If your model fits this pattern, then there is an even easier way to define your model: using `tf.keras.Sequential`. You don't need to write any custom classes; you simply call the `tf.keras.Sequential` constructor with a list containing a sequence of layer objects.
#
# One complication with `tf.keras.Sequential` is that you must define the shape of the input to the model by passing a value to the `input_shape` of the first layer in your model.
#
# ### Keras Sequential API: Two-Layer Network
# In this subsection, we will rewrite the two-layer fully-connected network using `tf.keras.Sequential`, and train it using the training loop defined above.
#
# You don't need to perform any hyperparameter tuning here, but you should see validation accuracies above 40% after training for one epoch.
# In[24]:
learning_rate = 1e-2
def model_init_fn():
input_shape = (32, 32, 3)
hidden_layer_size, num_classes = 4000, 10
initializer = tf.initializers.VarianceScaling(scale=2.0)
layers = [
tf.keras.layers.Flatten(input_shape=input_shape),
tf.keras.layers.Dense(hidden_layer_size, activation='relu',
kernel_initializer=initializer),
tf.keras.layers.Dense(num_classes, activation='softmax',
kernel_initializer=initializer),
]
model = tf.keras.Sequential(layers)
return model
def optimizer_init_fn():
return tf.keras.optimizers.SGD(learning_rate=learning_rate)
train_part34(model_init_fn, optimizer_init_fn)
# ### Abstracting Away the Training Loop
# In the previous examples, we used a customised training loop to train models (e.g. `train_part34`). Writing your own training loop is only required if you need more flexibility and control during training your model. Alternately, you can also use built-in APIs like `tf.keras.Model.fit()` and `tf.keras.Model.evaluate` to train and evaluate a model. Also remember to configure your model for training by calling `tf.keras.Model.compile.
#
# You don't need to perform any hyperparameter tuning here, but you should see validation and test accuracies above 42% after training for one epoch.
# In[25]:
model = model_init_fn()
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=learning_rate),
loss='sparse_categorical_crossentropy',
metrics=[tf.keras.metrics.sparse_categorical_accuracy])
model.fit(X_train, y_train, batch_size=64, epochs=1, validation_data=(X_val, y_val))
model.evaluate(X_test, y_test)
# ### Keras Sequential API: Three-Layer ConvNet
# Here you should use `tf.keras.Sequential` to reimplement the same three-layer ConvNet architecture used in Part II and Part III. As a reminder, your model should have the following architecture:
#
# 1. Convolutional layer with 32 5x5 kernels, using zero padding of 2
# 2. ReLU nonlinearity
# 3. Convolutional layer with 16 3x3 kernels, using zero padding of 1
# 4. ReLU nonlinearity
# 5. Fully-connected layer giving class scores
# 6. Softmax nonlinearity
#
# You should initialize the weights of the model using a `tf.initializers.VarianceScaling` as above.
#
# You should train the model using Nesterov momentum 0.9.
#
# You don't need to perform any hyperparameter search, but you should achieve accuracy above 45% after training for one epoch.
# In[26]:
def model_init_fn():
model = None
############################################################################
# TODO: Construct a three-layer ConvNet using tf.keras.Sequential. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
channel_1, channel_2, num_classes = 32, 16, 10
initializer = tf.initializers.VarianceScaling(scale=2.0)
conv1 = tf.keras.layers.Conv2D(filters=channel_1, kernel_size=(5,5), strides=1, padding='same', activation='relu', use_bias=True, kernel_initializer=initializer, bias_initializer='zeros')
conv2 = tf.keras.layers.Conv2D(filters=channel_2, kernel_size=(3,3), strides=1, padding='same', activation='relu', use_bias=True, kernel_initializer=initializer, bias_initializer='zeros')
flatten = tf.keras.layers.Flatten()
fc1 = tf.keras.layers.Dense(num_classes, activation='softmax', kernel_initializer=initializer)
layers = [conv1, conv2, flatten, fc1]
model = tf.keras.Sequential(layers)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
return model
learning_rate = 5e-4
def optimizer_init_fn():
optimizer = None
############################################################################
# TODO: Complete the implementation of model_fn. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9, nesterov=True)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
return optimizer
train_part34(model_init_fn, optimizer_init_fn)
# We will also train this model with the built-in training loop APIs provided by TensorFlow.
# In[27]:
model = model_init_fn()
model.compile(optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=[tf.keras.metrics.sparse_categorical_accuracy])
model.fit(X_train, y_train, batch_size=64, epochs=1, validation_data=(X_val, y_val))
model.evaluate(X_test, y_test)
# ## Part IV: Functional API
# ### Demonstration with a Two-Layer Network
#
# In the previous section, we saw how we can use `tf.keras.Sequential` to stack layers to quickly build simple models. But this comes at the cost of losing flexibility.
#
# Often we will have to write complex models that have non-sequential data flows: a layer can have **multiple inputs and/or outputs**, such as stacking the output of 2 previous layers together to feed as input to a third! (Some examples are residual connections and dense blocks.)
#
# In such cases, we can use Keras functional API to write models with complex topologies such as:
#
# 1. Multi-input models
# 2. Multi-output models
# 3. Models with shared layers (the same layer called several times)
# 4. Models with non-sequential data flows (e.g. residual connections)
#
# Writing a model with Functional API requires us to create a `tf.keras.Model` instance and explicitly write input tensors and output tensors for this model.
# In[28]:
def two_layer_fc_functional(input_shape, hidden_size, num_classes):
initializer = tf.initializers.VarianceScaling(scale=2.0)
inputs = tf.keras.Input(shape=input_shape)
flattened_inputs = tf.keras.layers.Flatten()(inputs)
fc1_output = tf.keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=initializer)(flattened_inputs)
scores = tf.keras.layers.Dense(num_classes, activation='softmax',
kernel_initializer=initializer)(fc1_output)
# Instantiate the model given inputs and outputs.
model = tf.keras.Model(inputs=inputs, outputs=scores)
return model
def test_two_layer_fc_functional():
""" A small unit test to exercise the TwoLayerFC model above. """
input_size, hidden_size, num_classes = 50, 42, 10
input_shape = (50,)
x = tf.zeros((64, input_size))
model = two_layer_fc_functional(input_shape, hidden_size, num_classes)
with tf.device(device):
scores = model(x)
print(scores.shape)
test_two_layer_fc_functional()
# ### Keras Functional API: Train a Two-Layer Network
# You can now train this two-layer network constructed using the functional API.
#
# You don't need to perform any hyperparameter tuning here, but you should see validation accuracies above 40% after training for one epoch.
# In[29]:
input_shape = (32, 32, 3)
hidden_size, num_classes = 4000, 10
learning_rate = 1e-2
def model_init_fn():
return two_layer_fc_functional(input_shape, hidden_size, num_classes)
def optimizer_init_fn():
return tf.keras.optimizers.SGD(learning_rate=learning_rate)
train_part34(model_init_fn, optimizer_init_fn)
# # Part V: CIFAR-10 open-ended challenge
#
# In this section you can experiment with whatever ConvNet architecture you'd like on CIFAR-10.
#
# You should experiment with architectures, hyperparameters, loss functions, regularization, or anything else you can think of to train a model that achieves **at least 70%** accuracy on the **validation** set within 10 epochs. You can use the built-in train function, the `train_part34` function from above, or implement your own training loop.
#
# Describe what you did at the end of the notebook.
#
# ### Some things you can try:
# - **Filter size**: Above we used 5x5 and 3x3; is this optimal?
# - **Number of filters**: Above we used 16 and 32 filters. Would more or fewer do better?
# - **Pooling**: We didn't use any pooling above. Would this improve the model?
# - **Normalization**: Would your model be improved with batch normalization, layer normalization, group normalization, or some other normalization strategy?
# - **Network architecture**: The ConvNet above has only three layers of trainable parameters. Would a deeper model do better?
# - **Global average pooling**: Instead of flattening after the final convolutional layer, would global average pooling do better? This strategy is used for example in Google's Inception network and in Residual Networks.
# - **Regularization**: Would some kind of regularization improve performance? Maybe weight decay or dropout?
#
# ### NOTE: Batch Normalization / Dropout
# If you are using Batch Normalization and Dropout, remember to pass `is_training=True` if you use the `train_part34()` function. BatchNorm and Dropout layers have different behaviors at training and inference time. `training` is a specific keyword argument reserved for this purpose in any `tf.keras.Model`'s `call()` function. Read more about this here : https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/BatchNormalization#methods
# https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/Dropout#methods
#
# ### Tips for training
# For each network architecture that you try, you should tune the learning rate and other hyperparameters. When doing this there are a couple important things to keep in mind:
#
# - If the parameters are working well, you should see improvement within a few hundred iterations
# - Remember the coarse-to-fine approach for hyperparameter tuning: start by testing a large range of hyperparameters for just a few training iterations to find the combinations of parameters that are working at all.
# - Once you have found some sets of parameters that seem to work, search more finely around these parameters. You may need to train for more epochs.
# - You should use the validation set for hyperparameter search, and save your test set for evaluating your architecture on the best parameters as selected by the validation set.
#
# ### Going above and beyond
# If you are feeling adventurous there are many other features you can implement to try and improve your performance. You are **not required** to implement any of these, but don't miss the fun if you have time!
#
# - Alternative optimizers: you can try Adam, Adagrad, RMSprop, etc.
# - Alternative activation functions such as leaky ReLU, parametric ReLU, ELU, or MaxOut.
# - Model ensembles
# - Data augmentation
# - New Architectures
# - [ResNets](https://arxiv.org/abs/1512.03385) where the input from the previous layer is added to the output.
# - [DenseNets](https://arxiv.org/abs/1608.06993) where inputs into previous layers are concatenated together.
# - [This blog has an in-depth overview](https://chatbotslife.com/resnets-highwaynets-and-densenets-oh-my-9bb15918ee32)
#
# ### Have fun and happy training!
# In[30]:
class CustomConvNet(tf.keras.Model):
def __init__(self):
super(CustomConvNet, self).__init__()
############################################################################
# TODO: Construct a model that performs well on CIFAR-10 #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
initializer = tf.initializers.VarianceScaling(scale=2.0)
self.conv1 = tf.keras.layers.Conv2D(filters=32, kernel_size=(5,5), strides=1, padding='same', activation='linear', use_bias=True, kernel_initializer=initializer, bias_initializer='zeros')
self.batch1 = tf.keras.layers.BatchNormalization()
self.relu1 = tf.keras.layers.ReLU()
self.conv1 = tf.keras.layers.Conv2D(filters=32, kernel_size=(5,5), strides=1, padding='same', activation='linear', use_bias=True, kernel_initializer=initializer, bias_initializer='zeros')
self.batch1 = tf.keras.layers.BatchNormalization()
self.relu1 = tf.keras.layers.ReLU()
self.drop1 = tf.keras.layers.Dropout(0.1)
self.pool = tf.keras.layers.MaxPool2D()
self.conv2 = tf.keras.layers.Conv2D(filters=16, kernel_size=(3,3), strides=1, padding='same', activation='linear', use_bias=True, kernel_initializer=initializer, bias_initializer='zeros')
self.batch2 = tf.keras.layers.BatchNormalization()
self.relu2 = tf.keras.layers.ReLU()
self.drop2 = tf.keras.layers.Dropout(0.1)
self.flatten = tf.keras.layers.Flatten()
# self.fc1 = tf.keras.layers.Dense(num_classes, activation='linear',
# kernel_initializer=initializer)
# self.fcbatch1 = tf.keras.layers.BatchNormalization()
# self.fcrelu1 = tf.keras.layers.ReLU()
# self.fcdrop1 = tf.keras.layers.Dropout(0.1)
self.fc1 = tf.keras.layers.Dense(num_classes, activation='softmax',
kernel_initializer=initializer)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
def call(self, input_tensor, training=False):
############################################################################
# TODO: Construct a model that performs well on CIFAR-10 #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
x = self.conv1(input_tensor)
x = self.batch1(x, training=training)
x = self.relu1(x)
x = self.drop1(x, training=training)
x = self.conv2(x)
x = self.batch2(x, training=training)
x = self.relu2(x)
x = self.drop2(x, training=training)
x = self.flatten(x)
x = self.fc1(x)
# x = self.fcbatch1(x)
# x = self.fcrelu1(x)
# x = self.fcdrop1(x)
# x = self.fc2(x)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
return x
# device = '/device:GPU:0' # Change this to a CPU/GPU as you wish!
device = '/cpu:0' # Change this to a CPU/GPU as you wish!
print_every = 700
num_epochs = 10
model = CustomConvNet()
def model_init_fn():
return CustomConvNet()
def optimizer_init_fn():
learning_rate = 1e-3
return tf.keras.optimizers.Adam(learning_rate)
train_part34(model_init_fn, optimizer_init_fn, num_epochs=num_epochs, is_training=True)
# ## Describe what you did
#
# In the cell below you should write an explanation of what you did, any additional features that you implemented, and/or any graphs that you made in the process of training and evaluating your network.
# THIS OVERFITS, I'll come back later when I have a GPU.
| [
"tensorflow.keras.layers.Dense",
"tensorflow.keras.metrics.Mean",
"tensorflow.keras.backend.random_normal",
"tensorflow.reshape",
"tensorflow.keras.optimizers.SGD",
"tensorflow.matmul",
"numpy.arange",
"tensorflow.nn.conv2d",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.keras.Sequential",
"n... | [((5761, 5812), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (5805, 5812), True, 'import tensorflow as tf\n'), ((5844, 5898), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['device', '(True)'], {}), '(device, True)\n', (5884, 5898), True, 'import tensorflow as tf\n'), ((6312, 6349), 'tensorflow.keras.datasets.cifar10.load_data', 'tf.keras.datasets.cifar10.load_data', ([], {}), '()\n', (6347, 6349), True, 'import tensorflow as tf\n'), ((6415, 6452), 'numpy.asarray', 'np.asarray', (['X_train'], {'dtype': 'np.float32'}), '(X_train, dtype=np.float32)\n', (6425, 6452), True, 'import numpy as np\n'), ((6526, 6562), 'numpy.asarray', 'np.asarray', (['X_test'], {'dtype': 'np.float32'}), '(X_test, dtype=np.float32)\n', (6536, 6562), True, 'import numpy as np\n'), ((14156, 14178), 'tensorflow.reshape', 'tf.reshape', (['x', '(N, -1)'], {}), '(x, (N, -1))\n', (14166, 14178), True, 'import tensorflow as tf\n'), ((16432, 16448), 'tensorflow.matmul', 'tf.matmul', (['h', 'w2'], {}), '(h, w2)\n', (16441, 16448), True, 'import tensorflow as tf\n'), ((19473, 19601), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'x', 'filters': 'conv_w1', 'strides': '(1)', 'padding': '[[0, 0], [2, 2], [2, 2], [0, 0]]', 'data_format': '"""NHWC"""', 'name': '"""Prvi"""'}), "(input=x, filters=conv_w1, strides=1, padding=[[0, 0], [2, 2],\n [2, 2], [0, 0]], data_format='NHWC', name='Prvi')\n", (19485, 19601), True, 'import tensorflow as tf\n'), ((19619, 19648), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['w1x1', 'conv_b1'], {}), '(w1x1, conv_b1)\n', (19633, 19648), True, 'import tensorflow as tf\n'), ((19675, 19699), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv_layer_1'], {}), '(conv_layer_1)\n', (19685, 19699), True, 'import tensorflow as tf\n'), ((19725, 19859), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'reLu1', 'filters': 'conv_w2', 'strides': '(1)', 'padding': '[[0, 0], [1, 1], [1, 1], [0, 0]]', 'data_format': '"""NHWC"""', 'name': '"""Drugi"""'}), "(input=reLu1, filters=conv_w2, strides=1, padding=[[0, 0], [1, \n 1], [1, 1], [0, 0]], data_format='NHWC', name='Drugi')\n", (19737, 19859), True, 'import tensorflow as tf\n'), ((19876, 19905), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['w2x2', 'conv_b2'], {}), '(w2x2, conv_b2)\n', (19890, 19905), True, 'import tensorflow as tf\n'), ((19932, 19956), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv_layer_2'], {}), '(conv_layer_2)\n', (19942, 19956), True, 'import tensorflow as tf\n'), ((20016, 20045), 'tensorflow.matmul', 'tf.matmul', (['relu2flatten', 'fc_w'], {}), '(relu2flatten, fc_w)\n', (20025, 20045), True, 'import tensorflow as tf\n'), ((20059, 20085), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['w3x3', 'fc_b'], {}), '(w3x3, fc_b)\n', (20073, 20085), True, 'import tensorflow as tf\n'), ((35140, 35166), 'tensorflow.zeros', 'tf.zeros', (['(64, input_size)'], {}), '((64, input_size))\n', (35148, 35166), True, 'import tensorflow as tf\n'), ((44165, 44217), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (44188, 44217), True, 'import tensorflow as tf\n'), ((46050, 46136), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(learning_rate=learning_rate, momentum=0.9, nesterov\n =True)\n', (46073, 46136), True, 'import tensorflow as tf\n'), ((47860, 47902), 'tensorflow.initializers.VarianceScaling', 'tf.initializers.VarianceScaling', ([], {'scale': '(2.0)'}), '(scale=2.0)\n', (47891, 47902), True, 'import tensorflow as tf\n'), ((48254, 48281), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['layers'], {}), '(layers)\n', (48273, 48281), True, 'import tensorflow as tf\n'), ((48336, 48388), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (48359, 48388), True, 'import tensorflow as tf\n'), ((50668, 50710), 'tensorflow.initializers.VarianceScaling', 'tf.initializers.VarianceScaling', ([], {'scale': '(2.0)'}), '(scale=2.0)\n', (50699, 50710), True, 'import tensorflow as tf\n'), ((50723, 50912), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': 'channel_1', 'kernel_size': '(5, 5)', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""', 'use_bias': '(True)', 'kernel_initializer': 'initializer', 'bias_initializer': '"""zeros"""'}), "(filters=channel_1, kernel_size=(5, 5), strides=1,\n padding='same', activation='relu', use_bias=True, kernel_initializer=\n initializer, bias_initializer='zeros')\n", (50745, 50912), True, 'import tensorflow as tf\n'), ((50915, 51104), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': 'channel_2', 'kernel_size': '(3, 3)', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""', 'use_bias': '(True)', 'kernel_initializer': 'initializer', 'bias_initializer': '"""zeros"""'}), "(filters=channel_2, kernel_size=(3, 3), strides=1,\n padding='same', activation='relu', use_bias=True, kernel_initializer=\n initializer, bias_initializer='zeros')\n", (50937, 51104), True, 'import tensorflow as tf\n'), ((51109, 51134), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (51132, 51134), True, 'import tensorflow as tf\n'), ((51145, 51238), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_classes'], {'activation': '"""softmax"""', 'kernel_initializer': 'initializer'}), "(num_classes, activation='softmax', kernel_initializer\n =initializer)\n", (51166, 51238), True, 'import tensorflow as tf\n'), ((51288, 51315), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['layers'], {}), '(layers)\n', (51307, 51315), True, 'import tensorflow as tf\n'), ((52038, 52124), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(learning_rate=learning_rate, momentum=0.9, nesterov\n =True)\n', (52061, 52124), True, 'import tensorflow as tf\n'), ((53996, 54038), 'tensorflow.initializers.VarianceScaling', 'tf.initializers.VarianceScaling', ([], {'scale': '(2.0)'}), '(scale=2.0)\n', (54027, 54038), True, 'import tensorflow as tf\n'), ((54052, 54085), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (54066, 54085), True, 'import tensorflow as tf\n'), ((54507, 54552), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'scores'}), '(inputs=inputs, outputs=scores)\n', (54521, 54552), True, 'import tensorflow as tf\n'), ((54768, 54794), 'tensorflow.zeros', 'tf.zeros', (['(64, input_size)'], {}), '((64, input_size))\n', (54776, 54794), True, 'import tensorflow as tf\n'), ((55507, 55559), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (55530, 55559), True, 'import tensorflow as tf\n'), ((63265, 63304), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['learning_rate'], {}), '(learning_rate)\n', (63289, 63304), True, 'import tensorflow as tf\n'), ((8824, 8836), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (8833, 8836), True, 'import numpy as np\n'), ((14130, 14141), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (14138, 14141), True, 'import tensorflow as tf\n'), ((16365, 16381), 'tensorflow.matmul', 'tf.matmul', (['x', 'w1'], {}), '(x, w1)\n', (16374, 16381), True, 'import tensorflow as tf\n'), ((16775, 16792), 'tensorflow.device', 'tf.device', (['device'], {}), '(device)\n', (16784, 16792), True, 'import tensorflow as tf\n'), ((16814, 16839), 'tensorflow.zeros', 'tf.zeros', (['(64, 32, 32, 3)'], {}), '((64, 32, 32, 3))\n', (16822, 16839), True, 'import tensorflow as tf\n'), ((16853, 16895), 'tensorflow.zeros', 'tf.zeros', (['(32 * 32 * 3, hidden_layer_size)'], {}), '((32 * 32 * 3, hidden_layer_size))\n', (16861, 16895), True, 'import tensorflow as tf\n'), ((16909, 16942), 'tensorflow.zeros', 'tf.zeros', (['(hidden_layer_size, 10)'], {}), '((hidden_layer_size, 10))\n', (16917, 16942), True, 'import tensorflow as tf\n'), ((20826, 20843), 'tensorflow.device', 'tf.device', (['device'], {}), '(device)\n', (20835, 20843), True, 'import tensorflow as tf\n'), ((20857, 20882), 'tensorflow.zeros', 'tf.zeros', (['(64, 32, 32, 3)'], {}), '((64, 32, 32, 3))\n', (20865, 20882), True, 'import tensorflow as tf\n'), ((20901, 20923), 'tensorflow.zeros', 'tf.zeros', (['(5, 5, 3, 6)'], {}), '((5, 5, 3, 6))\n', (20909, 20923), True, 'import tensorflow as tf\n'), ((20942, 20956), 'tensorflow.zeros', 'tf.zeros', (['(6,)'], {}), '((6,))\n', (20950, 20956), True, 'import tensorflow as tf\n'), ((20975, 20997), 'tensorflow.zeros', 'tf.zeros', (['(3, 3, 6, 9)'], {}), '((3, 3, 6, 9))\n', (20983, 20997), True, 'import tensorflow as tf\n'), ((21016, 21030), 'tensorflow.zeros', 'tf.zeros', (['(9,)'], {}), '((9,))\n', (21024, 21030), True, 'import tensorflow as tf\n'), ((21046, 21073), 'tensorflow.zeros', 'tf.zeros', (['(32 * 32 * 9, 10)'], {}), '((32 * 32 * 9, 10))\n', (21054, 21073), True, 'import tensorflow as tf\n'), ((21089, 21104), 'tensorflow.zeros', 'tf.zeros', (['(10,)'], {}), '((10,))\n', (21097, 21104), True, 'import tensorflow as tf\n'), ((22639, 22656), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (22654, 22656), True, 'import tensorflow as tf\n'), ((22746, 22817), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'scores'}), '(labels=y, logits=scores)\n', (22792, 22817), True, 'import tensorflow as tf\n'), ((22839, 22859), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (22853, 22859), True, 'import tensorflow as tf\n'), ((25989, 26026), 'tensorflow.keras.backend.random_normal', 'tf.keras.backend.random_normal', (['shape'], {}), '(shape)\n', (26019, 26026), True, 'import tensorflow as tf\n'), ((26029, 26050), 'numpy.sqrt', 'np.sqrt', (['(2.0 / fan_in)'], {}), '(2.0 / fan_in)\n', (26036, 26050), True, 'import numpy as np\n'), ((30386, 30401), 'tensorflow.zeros', 'tf.zeros', (['(32,)'], {}), '((32,))\n', (30394, 30401), True, 'import tensorflow as tf\n'), ((30506, 30521), 'tensorflow.zeros', 'tf.zeros', (['(16,)'], {}), '((16,))\n', (30514, 30521), True, 'import tensorflow as tf\n'), ((30624, 30639), 'tensorflow.zeros', 'tf.zeros', (['(10,)'], {}), '((10,))\n', (30632, 30639), True, 'import tensorflow as tf\n'), ((34471, 34513), 'tensorflow.initializers.VarianceScaling', 'tf.initializers.VarianceScaling', ([], {'scale': '(2.0)'}), '(scale=2.0)\n', (34502, 34513), True, 'import tensorflow as tf\n'), ((34533, 34623), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['hidden_size'], {'activation': '"""relu"""', 'kernel_initializer': 'initializer'}), "(hidden_size, activation='relu', kernel_initializer=\n initializer)\n", (34554, 34623), True, 'import tensorflow as tf\n'), ((34673, 34766), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_classes'], {'activation': '"""softmax"""', 'kernel_initializer': 'initializer'}), "(num_classes, activation='softmax', kernel_initializer\n =initializer)\n", (34694, 34766), True, 'import tensorflow as tf\n'), ((34820, 34845), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (34843, 34845), True, 'import tensorflow as tf\n'), ((35225, 35242), 'tensorflow.device', 'tf.device', (['device'], {}), '(device)\n', (35234, 35242), True, 'import tensorflow as tf\n'), ((36697, 36739), 'tensorflow.initializers.VarianceScaling', 'tf.initializers.VarianceScaling', ([], {'scale': '(2.0)'}), '(scale=2.0)\n', (36728, 36739), True, 'import tensorflow as tf\n'), ((36770, 36959), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': 'channel_1', 'kernel_size': '(5, 5)', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""', 'use_bias': '(True)', 'kernel_initializer': 'initializer', 'bias_initializer': '"""zeros"""'}), "(filters=channel_1, kernel_size=(5, 5), strides=1,\n padding='same', activation='relu', use_bias=True, kernel_initializer=\n initializer, bias_initializer='zeros')\n", (36792, 36959), True, 'import tensorflow as tf\n'), ((36971, 37160), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': 'channel_2', 'kernel_size': '(3, 3)', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""', 'use_bias': '(True)', 'kernel_initializer': 'initializer', 'bias_initializer': '"""zeros"""'}), "(filters=channel_2, kernel_size=(3, 3), strides=1,\n padding='same', activation='relu', use_bias=True, kernel_initializer=\n initializer, bias_initializer='zeros')\n", (36993, 37160), True, 'import tensorflow as tf\n'), ((37174, 37199), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (37197, 37199), True, 'import tensorflow as tf\n'), ((37219, 37312), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_classes'], {'activation': '"""softmax"""', 'kernel_initializer': 'initializer'}), "(num_classes, activation='softmax', kernel_initializer\n =initializer)\n", (37240, 37312), True, 'import tensorflow as tf\n'), ((38958, 38975), 'tensorflow.device', 'tf.device', (['device'], {}), '(device)\n', (38967, 38975), True, 'import tensorflow as tf\n'), ((38989, 39014), 'tensorflow.zeros', 'tf.zeros', (['(64, 3, 32, 32)'], {}), '((64, 3, 32, 32))\n', (38997, 39014), True, 'import tensorflow as tf\n'), ((40810, 40827), 'tensorflow.device', 'tf.device', (['device'], {}), '(device)\n', (40819, 40827), True, 'import tensorflow as tf\n'), ((40898, 40945), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {}), '()\n', (40943, 40945), True, 'import tensorflow as tf\n'), ((41057, 41097), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""train_loss"""'}), "(name='train_loss')\n", (41078, 41097), True, 'import tensorflow as tf\n'), ((41123, 41188), 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {'name': '"""train_accuracy"""'}), "(name='train_accuracy')\n", (41165, 41188), True, 'import tensorflow as tf\n'), ((41213, 41251), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""val_loss"""'}), "(name='val_loss')\n", (41234, 41251), True, 'import tensorflow as tf\n'), ((41275, 41338), 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {'name': '"""val_accuracy"""'}), "(name='val_accuracy')\n", (41317, 41338), True, 'import tensorflow as tf\n'), ((47926, 47974), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'input_shape': 'input_shape'}), '(input_shape=input_shape)\n', (47949, 47974), True, 'import tensorflow as tf\n'), ((47984, 48079), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['hidden_layer_size'], {'activation': '"""relu"""', 'kernel_initializer': 'initializer'}), "(hidden_layer_size, activation='relu',\n kernel_initializer=initializer)\n", (48005, 48079), True, 'import tensorflow as tf\n'), ((48115, 48208), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_classes'], {'activation': '"""softmax"""', 'kernel_initializer': 'initializer'}), "(num_classes, activation='softmax', kernel_initializer\n =initializer)\n", (48136, 48208), True, 'import tensorflow as tf\n'), ((49135, 49187), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (49158, 49187), True, 'import tensorflow as tf\n'), ((54109, 54134), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (54132, 54134), True, 'import tensorflow as tf\n'), ((54160, 54250), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['hidden_size'], {'activation': '"""relu"""', 'kernel_initializer': 'initializer'}), "(hidden_size, activation='relu', kernel_initializer=\n initializer)\n", (54181, 54250), True, 'import tensorflow as tf\n'), ((54310, 54403), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_classes'], {'activation': '"""softmax"""', 'kernel_initializer': 'initializer'}), "(num_classes, activation='softmax', kernel_initializer\n =initializer)\n", (54331, 54403), True, 'import tensorflow as tf\n'), ((54884, 54901), 'tensorflow.device', 'tf.device', (['device'], {}), '(device)\n', (54893, 54901), True, 'import tensorflow as tf\n'), ((59815, 59857), 'tensorflow.initializers.VarianceScaling', 'tf.initializers.VarianceScaling', ([], {'scale': '(2.0)'}), '(scale=2.0)\n', (59846, 59857), True, 'import tensorflow as tf\n'), ((59888, 60073), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5, 5)', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""linear"""', 'use_bias': '(True)', 'kernel_initializer': 'initializer', 'bias_initializer': '"""zeros"""'}), "(filters=32, kernel_size=(5, 5), strides=1, padding=\n 'same', activation='linear', use_bias=True, kernel_initializer=\n initializer, bias_initializer='zeros')\n", (59910, 60073), True, 'import tensorflow as tf\n'), ((60085, 60121), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (60119, 60121), True, 'import tensorflow as tf\n'), ((60143, 60165), 'tensorflow.keras.layers.ReLU', 'tf.keras.layers.ReLU', ([], {}), '()\n', (60163, 60165), True, 'import tensorflow as tf\n'), ((60187, 60372), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5, 5)', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""linear"""', 'use_bias': '(True)', 'kernel_initializer': 'initializer', 'bias_initializer': '"""zeros"""'}), "(filters=32, kernel_size=(5, 5), strides=1, padding=\n 'same', activation='linear', use_bias=True, kernel_initializer=\n initializer, bias_initializer='zeros')\n", (60209, 60372), True, 'import tensorflow as tf\n'), ((60384, 60420), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (60418, 60420), True, 'import tensorflow as tf\n'), ((60442, 60464), 'tensorflow.keras.layers.ReLU', 'tf.keras.layers.ReLU', ([], {}), '()\n', (60462, 60464), True, 'import tensorflow as tf\n'), ((60486, 60514), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (60509, 60514), True, 'import tensorflow as tf\n'), ((60535, 60562), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', ([], {}), '()\n', (60560, 60562), True, 'import tensorflow as tf\n'), ((60593, 60778), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': '(16)', 'kernel_size': '(3, 3)', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""linear"""', 'use_bias': '(True)', 'kernel_initializer': 'initializer', 'bias_initializer': '"""zeros"""'}), "(filters=16, kernel_size=(3, 3), strides=1, padding=\n 'same', activation='linear', use_bias=True, kernel_initializer=\n initializer, bias_initializer='zeros')\n", (60615, 60778), True, 'import tensorflow as tf\n'), ((60790, 60826), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (60824, 60826), True, 'import tensorflow as tf\n'), ((60848, 60870), 'tensorflow.keras.layers.ReLU', 'tf.keras.layers.ReLU', ([], {}), '()\n', (60868, 60870), True, 'import tensorflow as tf\n'), ((60892, 60920), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (60915, 60920), True, 'import tensorflow as tf\n'), ((60944, 60969), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (60967, 60969), True, 'import tensorflow as tf\n'), ((61309, 61402), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_classes'], {'activation': '"""softmax"""', 'kernel_initializer': 'initializer'}), "(num_classes, activation='softmax', kernel_initializer\n =initializer)\n", (61330, 61402), True, 'import tensorflow as tf\n'), ((6467, 6502), 'numpy.asarray', 'np.asarray', (['y_train'], {'dtype': 'np.int32'}), '(y_train, dtype=np.int32)\n', (6477, 6502), True, 'import numpy as np\n'), ((6576, 6610), 'numpy.asarray', 'np.asarray', (['y_test'], {'dtype': 'np.int32'}), '(y_test, dtype=np.int32)\n', (6586, 6610), True, 'import numpy as np\n'), ((8874, 8897), 'numpy.random.shuffle', 'np.random.shuffle', (['idxs'], {}), '(idxs)\n', (8891, 8897), True, 'import numpy as np\n'), ((14287, 14300), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (14296, 14300), True, 'import numpy as np\n'), ((25949, 25967), 'numpy.prod', 'np.prod', (['shape[:3]'], {}), '(shape[:3])\n', (25956, 25967), True, 'import numpy as np\n'), ((41678, 41695), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (41693, 41695), True, 'import tensorflow as tf\n')] |
# Copyright 2021 <NAME> & <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models, optimizers, metrics
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from utils.layers import PrimaryCaps, FCCaps, Length
from utils.tools import get_callbacks, marginLoss, multiAccuracy
from utils.dataset import Dataset
from utils import pre_process_multimnist
from models import efficient_capsnet_graph_mnist, efficient_capsnet_graph_smallnorb, efficient_capsnet_graph_multimnist, original_capsnet_graph_mnist
import os
import json
from tqdm.notebook import tqdm
import sys
sys.path.append("..")
from globalvariables import *
from models.fileloader import file_loader
class Model(object):
"""
A class used to share common model functions and attributes.
...
Attributes
----------
model_name: str
name of the model (Ex. 'MNIST')
mode: str
model modality (Ex. 'test')
config_path: str
path configuration file
verbose: bool
Methods
-------
load_config():
load configuration file
load_graph_weights():
load network weights
predict(dataset_test):
use the model to predict dataset_test
evaluate(X_test, y_test):
comute accuracy and test error with the given dataset (X_test, y_test)
save_graph_weights():
save model weights
"""
def __init__(self, model_name, mode='test', config_path='config.json', verbose=True):
self.model_name = model_name
self.model = None
self.mode = mode
self.config_path = config_path
self.config = None
self.verbose = verbose
self.load_config()
def load_config(self):
"""
Load config file
"""
with open(self.config_path) as json_data_file:
self.config = json.load(json_data_file)
def load_graph_weights(self):
try:
self.model.load_weights(self.model_path)
except Exception as e:
print("[ERRROR] Graph Weights not found")
def predict(self, dataset_test):
return self.model.predict(dataset_test)
def evaluate(self, X_test, y_test):
print('-'*30 + f'{self.model_name} Evaluation' + '-'*30)
if self.model_name == "MULTIMNIST":
dataset_test = pre_process_multimnist.generate_tf_data_test(X_test, y_test, self.config["shift_multimnist"], n_multi=self.config['n_overlay_multimnist'])
acc = []
for X,y in tqdm(dataset_test,total=len(X_test)):
y_pred,X_gen1,X_gen2 = self.model.predict(X)
acc.append(multiAccuracy(y, y_pred))
acc = np.mean(acc)
else:
y_pred, X_gen = self.model.predict(X_test)
acc = np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))/y_test.shape[0]
test_error = 1 - acc
print('Test acc:', acc)
print(f"Test error [%]: {(test_error):.4%}")
if self.model_name == "MULTIMNIST":
print(f"N° misclassified images: {int(test_error*len(y_test)*self.config['n_overlay_multimnist'])} out of {len(y_test)*self.config['n_overlay_multimnist']}")
else:
print(f"N° misclassified images: {int(test_error*len(y_test))} out of {len(y_test)}")
def save_graph_weights(self):
self.model.save_weights(self.model_path)
class EfficientCapsNet(Model):
"""
A class used to manage an Efficiet-CapsNet model. 'model_name' and 'mode' define the particular architecure and modality of the
generated network.
...
Attributes
----------
model_name: str
name of the model (Ex. 'MNIST')
mode: str
model modality (Ex. 'test')
config_path: str
path configuration file
custom_path: str
custom weights path
verbose: bool
Methods
-------
load_graph():
load the network graph given the model_name
train(dataset, initial_epoch)
train the constructed network with a given dataset. All train hyperparameters are defined in the configuration file
"""
def __init__(self, model_name, mode='test', config_path='config.json', custom_path=None, verbose=True):
Model.__init__(self, model_name, mode, config_path, verbose)
if custom_path != None:
self.model_path = custom_path
else:
self.model_path = os.path.join(self.config['saved_model_dir'], f"efficient_capsnet_{self.model_name}.h5")
self.model_path_new_train = os.path.join(self.config['saved_model_dir'], f"efficient_capsnet{self.model_name}_new_train.h5")
self.tb_path = os.path.join(self.config['tb_log_save_dir'], f"efficient_capsnet_{self.model_name}")
# self.load_graph()
def load_graph(self, inputs, pred_len):
return efficient_capsnet_graph_mnist.build_graph(inputs.shape, pred_len, self.mode, self.verbose)
def train(self, dataset=None, initial_epoch=0):
batch_size = 5
lr = 0.001
epochs = 1
hist_len = 10
pred_len = 4
seq_length = hist_len+pred_len
# (train_x, train_y) = get_data(hist_len, pred_len)
(train_x, train_y), (test_x, test_y) = get_data(hist_len, pred_len)
self.model = self.load_graph(train_x, pred_len)
self.model.compile(optimizer=optimizers.Adam(lr=lr), loss='mean_squared_error', metrics=[metrics.MeanSquaredError()])
y_generator = train_generator(train_x, train_y, batch_size)
self.model.fit_generator(generator=y_generator, steps_per_epoch=int(train_y.shape[0] / batch_size), epochs=epochs)
print(time.clock()-start)
predicted_y = self.model.predict(test_x)
mapes = []
maes = []
rmses = []
mses = []
for i in range(len(predicted_y)):
y_ = predicted_y[i, :, :, :]
y = test_y[i, :, :, :]
mapes.append(MAPE(y, y_))
maes.append(MAE(y, y_))
rmses.append(RMSE(y, y_))
mses.append(MSE(y, y_))
print("=====MAE====")
print(np.average(np.array(maes)))
print(np.min(np.array(maes)))
print(np.max(np.array(maes)))
print("=====RMSE===")
print(np.average(np.array(rmses)))
print(np.min(np.array(rmses)))
print(np.max(np.array(rmses)))
def MAPE(v, v_):
'''
Mean absolute percentage error.
:param v: np.ndarray or int, ground truth.
:param v_: np.ndarray or int, prediction.
:return: int, MAPE averages on all elements of input.
'''
return np.mean(np.abs(v_ - v) / (v + 1e-5))
def RMSE(v, v_):
'''
Mean squared error.
:param v: np.ndarray or int, ground truth.
:param v_: np.ndarray or int, prediction.
:return: int, RMSE averages on all elements of input.
'''
return np.sqrt(np.mean((v_ - v) ** 2))
def MAE(v, v_):
'''
Mean absolute error.
:param v: np.ndarray or int, ground truth.
:param v_: np.ndarray or int, prediction.
:return: int, MAE averages on all elements of input.
'''
return np.mean(np.abs(v_ - v))
def MSE(v, v_):
'''
Mean squared error.
:param v: np.ndarray or int, ground truth.
:param v_: np.ndarray or int, prediction.
:return: int, RMSE averages on all elements of input.
'''
return np.mean((v_ - v) ** 2)
def get_data(hist_len, pred_len):
sampler = file_loader()
data = sampler.sample_stdn(datatype="train", hist_len = hist_len, pred_len = pred_len)
data = np.squeeze(data[:,:,:,1])
row_total = data.shape[0]-hist_len-pred_len
train_x = [data[np.newaxis, x:x+hist_len, :, :] for x in range(row_total)]
train_x = np.concatenate(train_x, axis=0)
train_y = [data[np.newaxis, x+hist_len:x+hist_len+pred_len, :, :] for x in range(row_total)]
train_y = np.concatenate(train_y, axis=0)
data = sampler.sample_stdn(datatype="test", hist_len = hist_len, pred_len = pred_len)
data = np.squeeze(data[:,:,:,1])
row_total = data.shape[0]-hist_len-pred_len
test_x = [data[np.newaxis, x:x+hist_len, :, :] for x in range(row_total)]
test_x = np.concatenate(test_x, axis=0)
test_y = [data[np.newaxis, x+hist_len:x+hist_len+pred_len, :, :] for x in range(row_total)]
test_y = np.concatenate(test_y, axis=0)
return (train_x, train_y), (test_x, test_y)
def train_generator(x, y, batch_size):
train_datagen = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False,
samplewise_std_normalization=False, zca_whitening=False, zca_epsilon=1e-06, rotation_range=0.1,
width_shift_range=0.1, height_shift_range=0.1, shear_range=0.0,
zoom_range=0.1, channel_shift_range=0.0, fill_mode='nearest', cval=0.0, horizontal_flip=True,
vertical_flip=False, rescale=None, preprocessing_function=None,
data_format=None) # shift up to 2 pixel for MNIST
train_datagen.fit(x)
generator = train_datagen.flow(x, y, batch_size=batch_size)
while True:
x_batch, y_batch = generator.next()
#yield ([x_batch, y_batch])
yield (x_batch, y_batch)
class CapsNet(Model):
"""
A class used to manage the original CapsNet architecture.
...
Attributes
----------
model_name: str
name of the model (only MNIST provided)
mode: str
model modality (Ex. 'test')
config_path: str
path configuration file
verbose: bool
n_routing: int
number of routing interations
Methods
-------
load_graph():
load the network graph given the model_name
train():
train the constructed network with a given dataset. All train hyperparameters are defined in the configuration file
"""
def __init__(self, model_name, mode='test', config_path='config.json', custom_path=None, verbose=True, n_routing=3):
Model.__init__(self, model_name, mode, config_path, verbose)
self.n_routing = n_routing
self.load_config()
if custom_path != None:
self.model_path = custom_path
else:
self.model_path = os.path.join(self.config['saved_model_dir'], f"efficient_capsnet_{self.model_name}.h5")
self.model_path_new_train = os.path.join(self.config['saved_model_dir'], f"original_capsnet_{self.model_name}_new_train.h5")
self.tb_path = os.path.join(self.config['tb_log_save_dir'], f"original_capsnet_{self.model_name}")
self.load_graph()
def load_graph(self):
self.model = original_capsnet_graph_mnist.build_graph(self.config['MNIST_INPUT_SHAPE'], self.mode, self.n_routing, self.verbose)
def train(self, dataset=None, initial_epoch=0):
callbacks = get_callbacks(self.tb_path, self.model_path_new_train, self.config['lr_dec'], self.config['lr'])
if dataset == None:
dataset = Dataset(self.model_name, self.config_path)
dataset_train, dataset_val = dataset.get_tf_data()
self.model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=self.config['lr']),
loss=[marginLoss, 'mse'],
loss_weights=[1., self.config['lmd_gen']],
metrics={'Original_CapsNet': 'accuracy'})
print('-'*30 + f'{self.model_name} train' + '-'*30)
history = self.model.fit(dataset_train,
epochs=self.config['epochs'],
validation_data=(dataset_val), batch_size=self.config['batch_size'], initial_epoch=initial_epoch,
callbacks=callbacks)
return history
| [
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"numpy.abs",
"numpy.argmax",
"utils.tools.multiAccuracy",
"numpy.mean",
"models.fileloader.file_loader",
"os.path.join",
"sys.path.append",
"utils.tools.get_callbacks",
"utils.pre_process_multimnist.generate_tf_data_test",
"tensorflow.ke... | [((1262, 1283), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (1277, 1283), False, 'import sys\n'), ((8069, 8091), 'numpy.mean', 'np.mean', (['((v_ - v) ** 2)'], {}), '((v_ - v) ** 2)\n', (8076, 8091), True, 'import numpy as np\n'), ((8146, 8159), 'models.fileloader.file_loader', 'file_loader', ([], {}), '()\n', (8157, 8159), False, 'from models.fileloader import file_loader\n'), ((8262, 8290), 'numpy.squeeze', 'np.squeeze', (['data[:, :, :, 1]'], {}), '(data[:, :, :, 1])\n', (8272, 8290), True, 'import numpy as np\n'), ((8434, 8465), 'numpy.concatenate', 'np.concatenate', (['train_x'], {'axis': '(0)'}), '(train_x, axis=0)\n', (8448, 8465), True, 'import numpy as np\n'), ((8577, 8608), 'numpy.concatenate', 'np.concatenate', (['train_y'], {'axis': '(0)'}), '(train_y, axis=0)\n', (8591, 8608), True, 'import numpy as np\n'), ((8713, 8741), 'numpy.squeeze', 'np.squeeze', (['data[:, :, :, 1]'], {}), '(data[:, :, :, 1])\n', (8723, 8741), True, 'import numpy as np\n'), ((8883, 8913), 'numpy.concatenate', 'np.concatenate', (['test_x'], {'axis': '(0)'}), '(test_x, axis=0)\n', (8897, 8913), True, 'import numpy as np\n'), ((9023, 9053), 'numpy.concatenate', 'np.concatenate', (['test_y'], {'axis': '(0)'}), '(test_y, axis=0)\n', (9037, 9053), True, 'import numpy as np\n'), ((9165, 9631), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'featurewise_center': '(False)', 'samplewise_center': '(False)', 'featurewise_std_normalization': '(False)', 'samplewise_std_normalization': '(False)', 'zca_whitening': '(False)', 'zca_epsilon': '(1e-06)', 'rotation_range': '(0.1)', 'width_shift_range': '(0.1)', 'height_shift_range': '(0.1)', 'shear_range': '(0.0)', 'zoom_range': '(0.1)', 'channel_shift_range': '(0.0)', 'fill_mode': '"""nearest"""', 'cval': '(0.0)', 'horizontal_flip': '(True)', 'vertical_flip': '(False)', 'rescale': 'None', 'preprocessing_function': 'None', 'data_format': 'None'}), "(featurewise_center=False, samplewise_center=False,\n featurewise_std_normalization=False, samplewise_std_normalization=False,\n zca_whitening=False, zca_epsilon=1e-06, rotation_range=0.1,\n width_shift_range=0.1, height_shift_range=0.1, shear_range=0.0,\n zoom_range=0.1, channel_shift_range=0.0, fill_mode='nearest', cval=0.0,\n horizontal_flip=True, vertical_flip=False, rescale=None,\n preprocessing_function=None, data_format=None)\n", (9183, 9631), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((5235, 5335), 'os.path.join', 'os.path.join', (["self.config['saved_model_dir']", 'f"""efficient_capsnet{self.model_name}_new_train.h5"""'], {}), "(self.config['saved_model_dir'],\n f'efficient_capsnet{self.model_name}_new_train.h5')\n", (5247, 5335), False, 'import os\n'), ((5355, 5443), 'os.path.join', 'os.path.join', (["self.config['tb_log_save_dir']", 'f"""efficient_capsnet_{self.model_name}"""'], {}), "(self.config['tb_log_save_dir'],\n f'efficient_capsnet_{self.model_name}')\n", (5367, 5443), False, 'import os\n'), ((5533, 5627), 'models.efficient_capsnet_graph_mnist.build_graph', 'efficient_capsnet_graph_mnist.build_graph', (['inputs.shape', 'pred_len', 'self.mode', 'self.verbose'], {}), '(inputs.shape, pred_len, self.mode,\n self.verbose)\n', (5574, 5627), False, 'from models import efficient_capsnet_graph_mnist, efficient_capsnet_graph_smallnorb, efficient_capsnet_graph_multimnist, original_capsnet_graph_mnist\n'), ((7578, 7600), 'numpy.mean', 'np.mean', (['((v_ - v) ** 2)'], {}), '((v_ - v) ** 2)\n', (7585, 7600), True, 'import numpy as np\n'), ((7830, 7844), 'numpy.abs', 'np.abs', (['(v_ - v)'], {}), '(v_ - v)\n', (7836, 7844), True, 'import numpy as np\n'), ((11221, 11321), 'os.path.join', 'os.path.join', (["self.config['saved_model_dir']", 'f"""original_capsnet_{self.model_name}_new_train.h5"""'], {}), "(self.config['saved_model_dir'],\n f'original_capsnet_{self.model_name}_new_train.h5')\n", (11233, 11321), False, 'import os\n'), ((11341, 11428), 'os.path.join', 'os.path.join', (["self.config['tb_log_save_dir']", 'f"""original_capsnet_{self.model_name}"""'], {}), "(self.config['tb_log_save_dir'],\n f'original_capsnet_{self.model_name}')\n", (11353, 11428), False, 'import os\n'), ((11504, 11623), 'models.original_capsnet_graph_mnist.build_graph', 'original_capsnet_graph_mnist.build_graph', (["self.config['MNIST_INPUT_SHAPE']", 'self.mode', 'self.n_routing', 'self.verbose'], {}), "(self.config['MNIST_INPUT_SHAPE'],\n self.mode, self.n_routing, self.verbose)\n", (11544, 11623), False, 'from models import efficient_capsnet_graph_mnist, efficient_capsnet_graph_smallnorb, efficient_capsnet_graph_multimnist, original_capsnet_graph_mnist\n'), ((11701, 11802), 'utils.tools.get_callbacks', 'get_callbacks', (['self.tb_path', 'self.model_path_new_train', "self.config['lr_dec']", "self.config['lr']"], {}), "(self.tb_path, self.model_path_new_train, self.config['lr_dec'\n ], self.config['lr'])\n", (11714, 11802), False, 'from utils.tools import get_callbacks, marginLoss, multiAccuracy\n'), ((2520, 2545), 'json.load', 'json.load', (['json_data_file'], {}), '(json_data_file)\n', (2529, 2545), False, 'import json\n'), ((3026, 3169), 'utils.pre_process_multimnist.generate_tf_data_test', 'pre_process_multimnist.generate_tf_data_test', (['X_test', 'y_test', "self.config['shift_multimnist']"], {'n_multi': "self.config['n_overlay_multimnist']"}), "(X_test, y_test, self.config[\n 'shift_multimnist'], n_multi=self.config['n_overlay_multimnist'])\n", (3070, 3169), False, 'from utils import pre_process_multimnist\n'), ((3379, 3391), 'numpy.mean', 'np.mean', (['acc'], {}), '(acc)\n', (3386, 3391), True, 'import numpy as np\n'), ((5111, 5202), 'os.path.join', 'os.path.join', (["self.config['saved_model_dir']", 'f"""efficient_capsnet_{self.model_name}.h5"""'], {}), "(self.config['saved_model_dir'],\n f'efficient_capsnet_{self.model_name}.h5')\n", (5123, 5202), False, 'import os\n'), ((7320, 7334), 'numpy.abs', 'np.abs', (['(v_ - v)'], {}), '(v_ - v)\n', (7326, 7334), True, 'import numpy as np\n'), ((11097, 11188), 'os.path.join', 'os.path.join', (["self.config['saved_model_dir']", 'f"""efficient_capsnet_{self.model_name}.h5"""'], {}), "(self.config['saved_model_dir'],\n f'efficient_capsnet_{self.model_name}.h5')\n", (11109, 11188), False, 'import os\n'), ((11857, 11899), 'utils.dataset.Dataset', 'Dataset', (['self.model_name', 'self.config_path'], {}), '(self.model_name, self.config_path)\n', (11864, 11899), False, 'from utils.dataset import Dataset\n'), ((6064, 6086), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (6079, 6086), False, 'from tensorflow.keras import layers, models, optimizers, metrics\n'), ((6835, 6849), 'numpy.array', 'np.array', (['maes'], {}), '(maes)\n', (6843, 6849), True, 'import numpy as np\n'), ((6873, 6887), 'numpy.array', 'np.array', (['maes'], {}), '(maes)\n', (6881, 6887), True, 'import numpy as np\n'), ((6911, 6925), 'numpy.array', 'np.array', (['maes'], {}), '(maes)\n', (6919, 6925), True, 'import numpy as np\n'), ((6983, 6998), 'numpy.array', 'np.array', (['rmses'], {}), '(rmses)\n', (6991, 6998), True, 'import numpy as np\n'), ((7022, 7037), 'numpy.array', 'np.array', (['rmses'], {}), '(rmses)\n', (7030, 7037), True, 'import numpy as np\n'), ((7061, 7076), 'numpy.array', 'np.array', (['rmses'], {}), '(rmses)\n', (7069, 7076), True, 'import numpy as np\n'), ((12011, 12068), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': "self.config['lr']"}), "(learning_rate=self.config['lr'])\n", (12035, 12068), True, 'import tensorflow as tf\n'), ((3335, 3359), 'utils.tools.multiAccuracy', 'multiAccuracy', (['y', 'y_pred'], {}), '(y, y_pred)\n', (3348, 3359), False, 'from utils.tools import get_callbacks, marginLoss, multiAccuracy\n'), ((6124, 6150), 'tensorflow.keras.metrics.MeanSquaredError', 'metrics.MeanSquaredError', ([], {}), '()\n', (6148, 6150), False, 'from tensorflow.keras import layers, models, optimizers, metrics\n'), ((3487, 3507), 'numpy.argmax', 'np.argmax', (['y_pred', '(1)'], {}), '(y_pred, 1)\n', (3496, 3507), True, 'import numpy as np\n'), ((3511, 3531), 'numpy.argmax', 'np.argmax', (['y_test', '(1)'], {}), '(y_test, 1)\n', (3520, 3531), True, 'import numpy as np\n')] |
# coding: utf-8
# refer to https://blog.csdn.net/zzzzjh/article/details/80633573
import numpy as np
import random
import matplotlib.pyplot as plt
import time
class GA(object):
def __init__(self, x_range, fitness_function, pop_size, iteration_max, p_crossover, p_mutation, plot):
self.bounds_begin = x_range[0] # lower bound
self.bounds_end = x_range[1] # upper bound
self.fitness_function = fitness_function
self.bit_length = int(np.log2((self.bounds_end - self.bounds_begin) / 0.0001)) + 1 # the length of chromosome
self.pop_size = pop_size
self.iteration_max = iteration_max
self.p_crossover = p_crossover
self.p_mutation = p_mutation
self.population = np.random.randint(0, 2, size = (self.pop_size, self.bit_length)) # initialize the population
self.plot = plot
# get the fitness value
def fitness(self, population):
fit_value = []
cumsump = []
for i in population:
x = self.transform2to10(i)
xx = self.bounds_begin + x * (self.bounds_end - self.bounds_begin) / (pow(2, self.bit_length) - 1)
s = self.fitness_function(xx)
fit_value.append(s)
f_sum = sum(fit_value)
every_population = [x / f_sum for x in fit_value]
cumsump.append(every_population[0])
every_population.remove(every_population[0])
for j in every_population:
p = cumsump[-1] + j
cumsump.append(p)
return cumsump
# select two population to crossover
def select(self, cumsump):
seln = []
for i in range(2):
j = 1
r = np.random.uniform(0, 1)
prand = [x - r for x in cumsump]
while prand[j] < 0:
j = j + 1
seln.append(j)
return seln
# crossover the population
def crossover(self, seln, pc):
d = self.population[seln[1]].copy()
f = self.population[seln[0]].copy()
r = np.random.uniform()
if r < pc:
c = np.random.randint(1, self.bit_length - 1)
a = self.population[seln[1]][c:]
b = self.population[seln[0]][c:]
d[c:] = b
f[c:] = a
g = d
h = f
else:
g = self.population[seln[1]]
h = self.population[seln[0]]
return g, h
# mutation of the populations
def mutation(self,scnew,p_mutation):
r = np.random.uniform(0, 1)
if r < p_mutation:
v = np.random.randint(0, self.bit_length)
scnew[v] = abs(scnew[v] - 1)
else:
scnew = scnew
return scnew
# convert the binary to decimal
def transform2to10(self, population):
x = 0
n = self.bit_length
p = population.copy()
p = p.tolist()
p.reverse()
for j in range(n):
x = x + p[j] * pow(2, j)
return x
def slover(self):
scnew = []
# decode the initial x
decode_dna = self.transform2to10(self.population[0])
curr_x = self.bounds_begin + decode_dna * (self.bounds_end - self.bounds_begin) / (pow(2, self.bit_length) - 1)
print ('Init x: ', curr_x)
cumsump = self.fitness(self.population)
start_time = time.time()
# use ga to find the minimum
for i in range(self.iteration_max):
for j in range(0, self.pop_size, 2):
seln = self.select(cumsump) #return the selected order
scro = self.crossover(seln, self.p_crossover) #returen two chromosome
s1 = self.mutation(scro[0], self.p_mutation)
s2 = self.mutation(scro[1], self.p_mutation)
scnew.append(s1)
scnew.append(s2)
self.population = scnew
cumsump = self.fitness(self.population)
# decode the x
x_list = []
for dna in self.population:
decode_dna = self.transform2to10(dna)
curr_x = self.bounds_begin + decode_dna * (self.bounds_end - self.bounds_begin) / (pow(2, self.bit_length) - 1)
x_list.append(curr_x)
# get y from the decoded x
y_list = []
for curr_x in x_list:
y_list.append(self.fitness_function(curr_x))
# get the minimum
fmin = y_list.index(min(y_list))
x = x_list[fmin]
x_list_in_gen = x_list[-(self.pop_size):]
y_list_in_gen = y_list[-(self.pop_size):]
if i % 100 == 0:
print ('Current x: ', x)
# show the animation
if self.plot == True:
scatter_best_x = x
scatter_best_y = self.fitness_function(x)
plt_x = plt.scatter(x_list_in_gen, y_list_in_gen, c = 'b')
plt_best = plt.scatter(scatter_best_x, scatter_best_y, c = 'r')
plt.pause(0.01)
plt_x.remove()
plt_best.remove()
end_time = time.time() - start_time
print('Optimal x: ', x)
print('Computing time:', end_time)
return x | [
"numpy.random.uniform",
"numpy.log2",
"matplotlib.pyplot.scatter",
"time.time",
"numpy.random.randint",
"matplotlib.pyplot.pause"
] | [((735, 797), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': '(self.pop_size, self.bit_length)'}), '(0, 2, size=(self.pop_size, self.bit_length))\n', (752, 797), True, 'import numpy as np\n'), ((2013, 2032), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2030, 2032), True, 'import numpy as np\n'), ((2484, 2507), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2501, 2507), True, 'import numpy as np\n'), ((3336, 3347), 'time.time', 'time.time', ([], {}), '()\n', (3345, 3347), False, 'import time\n'), ((1672, 1695), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1689, 1695), True, 'import numpy as np\n'), ((2068, 2109), 'numpy.random.randint', 'np.random.randint', (['(1)', '(self.bit_length - 1)'], {}), '(1, self.bit_length - 1)\n', (2085, 2109), True, 'import numpy as np\n'), ((2551, 2588), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.bit_length'], {}), '(0, self.bit_length)\n', (2568, 2588), True, 'import numpy as np\n'), ((5108, 5119), 'time.time', 'time.time', ([], {}), '()\n', (5117, 5119), False, 'import time\n'), ((469, 524), 'numpy.log2', 'np.log2', (['((self.bounds_end - self.bounds_begin) / 0.0001)'], {}), '((self.bounds_end - self.bounds_begin) / 0.0001)\n', (476, 524), True, 'import numpy as np\n'), ((4860, 4908), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_list_in_gen', 'y_list_in_gen'], {'c': '"""b"""'}), "(x_list_in_gen, y_list_in_gen, c='b')\n", (4871, 4908), True, 'import matplotlib.pyplot as plt\n'), ((4938, 4988), 'matplotlib.pyplot.scatter', 'plt.scatter', (['scatter_best_x', 'scatter_best_y'], {'c': '"""r"""'}), "(scatter_best_x, scatter_best_y, c='r')\n", (4949, 4988), True, 'import matplotlib.pyplot as plt\n'), ((5007, 5022), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (5016, 5022), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import random
from .helpers import normalize
class Env:
def __init__(self, n=5, d=5, B=1.0, noise='normal'):
self.n = n
self.d = d
self.B = B
# initialize kernel parameters
self.init_kernel()
# initialize noise parameters
self.noise = noise
self.init_noise()
self.actions = []
# initialize monitor
self.regrets = []
def init_kernel(self):
''' initialize the index set for kernel'''
_x = np.random.random_sample((1, self.d))
_norms = np.linalg.norm(_x, ord=2, axis = 1, keepdims = True)
self.theta = _x * self.B / _norms
def init_noise(self):
if self.noise == 'normal':
self.rho = 1.0
def f(self, x):
''' get dot product with f'''
return np.dot(self.theta, x)
def sample_action(self):
x_i = np.random.random_sample((1, self.d))
# normalize to l2 ball
x_i = normalize(x_i, p=2)
return x_i
def get_action_set(self):
actions = []
opt_x = None
subopt_x = []
c_opt = 0
while opt_x is None or len(subopt_x) < self.n - 1:
x_i = np.random.random_sample((1, self.d))
# normalize to l2 ball
x_i = normalize(x_i, p=2)
c_opt += 1
if self.f(x_i) >= 0.8:
opt_x = x_i
if self.f(x_i) <= 0.7:
subopt_x.append(x_i)
if c_opt > 500:
print('Restarting kernel init')
c_opt = 0
self.init_kernel()
print('Tries for optimal arm: %d' % c_opt)
actions.append(opt_x)
actions.extend(subopt_x)
random.shuffle(actions)
# calculate best action and store latest r*
round_rewards = [self.f(x) for x in actions]
self.opt_x = np.argmax(round_rewards)
self.opt_r = round_rewards[self.opt_x]
self.actions = actions
return actions
def sample_noise(self, f_x):
if self.noise == 'normal':
return f_x + np.random.normal(scale=self.rho)
if self.noise == 'bernoulli':
return np.random.binomial(1, f_x)
def play(self, x_t):
f_x = self.f(x_t)
y_t = self.sample_noise(f_x)
r_t = self.opt_r - f_x
return y_t, r_t
class GridEnv(Env):
def get_action_set(self):
pass | [
"numpy.random.binomial",
"numpy.random.random_sample",
"numpy.argmax",
"random.shuffle",
"numpy.linalg.norm",
"numpy.random.normal",
"numpy.dot"
] | [((540, 576), 'numpy.random.random_sample', 'np.random.random_sample', (['(1, self.d)'], {}), '((1, self.d))\n', (563, 576), True, 'import numpy as np\n'), ((594, 642), 'numpy.linalg.norm', 'np.linalg.norm', (['_x'], {'ord': '(2)', 'axis': '(1)', 'keepdims': '(True)'}), '(_x, ord=2, axis=1, keepdims=True)\n', (608, 642), True, 'import numpy as np\n'), ((865, 886), 'numpy.dot', 'np.dot', (['self.theta', 'x'], {}), '(self.theta, x)\n', (871, 886), True, 'import numpy as np\n'), ((935, 971), 'numpy.random.random_sample', 'np.random.random_sample', (['(1, self.d)'], {}), '((1, self.d))\n', (958, 971), True, 'import numpy as np\n'), ((1785, 1808), 'random.shuffle', 'random.shuffle', (['actions'], {}), '(actions)\n', (1799, 1808), False, 'import random\n'), ((1944, 1968), 'numpy.argmax', 'np.argmax', (['round_rewards'], {}), '(round_rewards)\n', (1953, 1968), True, 'import numpy as np\n'), ((1248, 1284), 'numpy.random.random_sample', 'np.random.random_sample', (['(1, self.d)'], {}), '((1, self.d))\n', (1271, 1284), True, 'import numpy as np\n'), ((2269, 2295), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'f_x'], {}), '(1, f_x)\n', (2287, 2295), True, 'import numpy as np\n'), ((2170, 2202), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'self.rho'}), '(scale=self.rho)\n', (2186, 2202), True, 'import numpy as np\n')] |
import json
import time
import cv2
import numpy as np
import torch
import torch.nn as nn
from nets.retinaface import RetinaFace
from utils.anchors import Anchors
from utils.config import cfg_mnet, cfg_re50
from utils.utils import letterbox_image, preprocess_input
from utils.utils_bbox import (decode, decode_landm, non_max_suppression,
retinaface_correct_boxes)
#------------------------------------#
# 请注意主干网络与预训练权重的对应
# 即注意修改model_path和backbone
#------------------------------------#
class Retinaface(object):
_defaults = {
#---------------------------------------------------------------------#
# 使用自己训练好的模型进行预测一定要修改model_path
# model_path指向logs文件夹下的权值文件
# 训练好后logs文件夹下存在多个权值文件,选择损失较低的即可。
#---------------------------------------------------------------------#
# "model_path" : 'model_data/Retinaface_mobilenet0.25.pth',
"model_path": 'logs/2022_02_28_09_58_03/Epoch89-Val_Min_Loss1.9594.pth',
#---------------------------------------------------------------------#
# 所使用的的主干网络:mobilenet、resnet50
#---------------------------------------------------------------------#
"backbone" : 'mobilenet',
#---------------------------------------------------------------------#
# 只有得分大于置信度的预测框会被保留下来
#---------------------------------------------------------------------#
"confidence" : 0.5,
#---------------------------------------------------------------------#
# 非极大抑制所用到的nms_iou大小
#---------------------------------------------------------------------#
"nms_iou" : 0.45,
#---------------------------------------------------------------------#
# 是否需要进行图像大小限制。
# 开启后,会将输入图像的大小限制为input_shape。否则使用原图进行预测。
# 可根据输入图像的大小自行调整input_shape,注意为32的倍数,如[640, 640, 3]
#---------------------------------------------------------------------#
"input_shape" : [1280, 1280, 3],
#---------------------------------------------------------------------#
# 是否需要进行图像大小限制。
#---------------------------------------------------------------------#
"letterbox_image" : True,
#--------------------------------#
# 是否使用Cuda
# 没有GPU可以设置成False
#--------------------------------#
"cuda" : True,
"json_path" : "./data/classes.json"
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
#---------------------------------------------------#
# 初始化Retinaface
#---------------------------------------------------#
def __init__(self, **kwargs):
self.__dict__.update(self._defaults)
for name, value in kwargs.items():
setattr(self, name, value)
#---------------------------------------------------#
# 不同主干网络的config信息
#---------------------------------------------------#
if self.backbone == "mobilenet":
self.cfg = cfg_mnet
else:
self.cfg = cfg_re50
#---------------------------------------------------#
# 先验框的生成
#---------------------------------------------------#
if self.letterbox_image:
self.anchors = Anchors(self.cfg, image_size=[self.input_shape[0], self.input_shape[1]]).get_anchors()
self.generate()
#---------------------------------------------------#
# 载入模型
#---------------------------------------------------#
def generate(self):
#-------------------------------#
# 载入模型与权值
#-------------------------------#
self.net = RetinaFace(cfg=self.cfg, mode='eval').eval()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.net.load_state_dict(torch.load(self.model_path, map_location=device))
self.net = self.net.eval()
print('{} model, and classes loaded.'.format(self.model_path))
if self.cuda:
self.net = nn.DataParallel(self.net)
self.net = self.net.cuda()
#---------------------------------------------------#
# 检测图片
#---------------------------------------------------#
def detect_image(self, image, validate=False, img_name=None, des_dir=None):
json_file = open(self.json_path, 'r')
class_dict = json.load(json_file)
json_file.close()
category_index = {v: k for k, v in class_dict.items()}
#---------------------------------------------------#
# 对输入图像进行一个备份,后面用于绘图
#---------------------------------------------------#
old_image = image.copy()
#---------------------------------------------------#
# 把图像转换成numpy的形式
#---------------------------------------------------#
image = np.array(image,np.float32)
#---------------------------------------------------#
# 计算输入图片的高和宽
#---------------------------------------------------#
im_height, im_width, _ = np.shape(image)
#---------------------------------------------------#
# 计算scale,用于将获得的预测框转换成原图的高宽
#---------------------------------------------------#
scale = [
np.shape(image)[1], np.shape(image)[0], np.shape(image)[1], np.shape(image)[0]
]
scale_for_landmarks = [
np.shape(image)[1], np.shape(image)[0], np.shape(image)[1], np.shape(image)[0],
np.shape(image)[1], np.shape(image)[0], np.shape(image)[1], np.shape(image)[0],
np.shape(image)[1], np.shape(image)[0], np.shape(image)[1], np.shape(image)[0],
np.shape(image)[1], np.shape(image)[0], np.shape(image)[1], np.shape(image)[0],
np.shape(image)[1], np.shape(image)[0], np.shape(image)[1], np.shape(image)[0],
np.shape(image)[1], np.shape(image)[0], np.shape(image)[1], np.shape(image)[0],
np.shape(image)[1], np.shape(image)[0], np.shape(image)[1], np.shape(image)[0],
np.shape(image)[1], np.shape(image)[0], np.shape(image)[1], np.shape(image)[0],
np.shape(image)[1], np.shape(image)[0], np.shape(image)[1], np.shape(image)[0],
]
#---------------------------------------------------------#
# letterbox_image可以给图像增加灰条,实现不失真的resize
#---------------------------------------------------------#
if self.letterbox_image:
image = letterbox_image(image, [self.input_shape[1], self.input_shape[0]])
else:
self.anchors = Anchors(self.cfg, image_size=(im_height, im_width)).get_anchors()
with torch.no_grad():
#-----------------------------------------------------------#
# 图片预处理,归一化。
#-----------------------------------------------------------#
image = torch.from_numpy(preprocess_input(image).transpose(2, 0, 1)).unsqueeze(0).type(torch.FloatTensor)
if self.cuda:
self.anchors = self.anchors.cuda()
image = image.cuda()
#---------------------------------------------------------#
# 传入网络进行预测
#---------------------------------------------------------#
loc, classes, landms, conf = self.net(image)
#-----------------------------------------------------------#
# 对预测框进行解码
#-----------------------------------------------------------#
boxes = decode(loc.data.squeeze(0), self.anchors, self.cfg['variance'])
classes = classes.data.squeeze(0)
#-----------------------------------------------------------#
# 获得预测结果的置信度
#-----------------------------------------------------------#
conf = conf.data.squeeze(0).unsqueeze(1)
#-----------------------------------------------------------#
# 对人脸关键点进行解码
#-----------------------------------------------------------#
landms = decode_landm(landms.data.squeeze(0), self.anchors, self.cfg['variance'])
#-----------------------------------------------------------#
# 对人脸识别结果进行堆叠
#-----------------------------------------------------------#
loc_c_conf_landms = torch.cat([boxes, classes, conf, landms], -1)
loc_c_conf_landms = non_max_suppression(loc_c_conf_landms, self.confidence)
if len(loc_c_conf_landms) <= 0:
return old_image
#---------------------------------------------------------#
# 如果使用了letterbox_image的话,要把灰条的部分去除掉。
#---------------------------------------------------------#
if self.letterbox_image:
loc_c_conf_landms = retinaface_correct_boxes(loc_c_conf_landms, \
np.array([self.input_shape[0], self.input_shape[1]]), np.array([im_height, im_width]))
loc_c_conf_landms[:, :4] = loc_c_conf_landms[:, :4] * scale
loc_c_conf_landms[:, 8:] = loc_c_conf_landms[:, 8:] * scale_for_landmarks
if validate:
with open(des_dir + img_name.replace('jpg', 'txt'), 'w') as file:
for label in loc_c_conf_landms:
class_idx = label[5:7].argmax(0) + 1
clazz = category_index[class_idx]
conf = label[4 + class_idx]
print('%s %s %f'%(img_name, clazz, conf))
file.write('%s %f %d %d %d %d\n' % (clazz, conf, label[0], label[1], label[2], label[3]))
return
for b in loc_c_conf_landms:
class_idx = b[5:7].argmax(0) + 1
clazz = category_index[class_idx]
text = "{} {:.2%}".format(clazz, b[4 + class_idx])
b = list(map(int, b))
#---------------------------------------------------#
# b[0]-b[3]为人脸框的坐标,b[4 + class_idx]为得分
#---------------------------------------------------#
cv2.rectangle(old_image, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)
cx = b[0]
cy = b[1] + 12
cv2.putText(old_image, text, (cx, cy),
cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
print(b[0], b[1], b[2], b[3], clazz)
#---------------------------------------------------#
# b[5]-b[14]为人脸关键点的坐标
#---------------------------------------------------#
for i in range(8, 44,2):
cv2.circle(old_image, (b[i], b[i+1]), 1, (0, 0, 255), 4)
return old_image
def get_FPS(self, image, test_interval):
#---------------------------------------------------#
# 把图像转换成numpy的形式
#---------------------------------------------------#
image = np.array(image,np.float32)
#---------------------------------------------------#
# 计算输入图片的高和宽
#---------------------------------------------------#
im_height, im_width, _ = np.shape(image)
#---------------------------------------------------------#
# letterbox_image可以给图像增加灰条,实现不失真的resize
#---------------------------------------------------------#
if self.letterbox_image:
image = letterbox_image(image, [self.input_shape[1], self.input_shape[0]])
else:
self.anchors = Anchors(self.cfg, image_size=(im_height, im_width)).get_anchors()
with torch.no_grad():
#-----------------------------------------------------------#
# 图片预处理,归一化。
#-----------------------------------------------------------#
image = torch.from_numpy(preprocess_input(image).transpose(2, 0, 1)).unsqueeze(0).type(torch.FloatTensor)
if self.cuda:
self.anchors = self.anchors.cuda()
image = image.cuda()
#---------------------------------------------------------#
# 传入网络进行预测
#---------------------------------------------------------#
loc, conf, landms = self.net(image)
#-----------------------------------------------------------#
# 对预测框进行解码
#-----------------------------------------------------------#
boxes = decode(loc.data.squeeze(0), self.anchors, self.cfg['variance'])
#-----------------------------------------------------------#
# 获得预测结果的置信度
#-----------------------------------------------------------#
conf = conf.data.squeeze(0)[:, 1:2]
#-----------------------------------------------------------#
# 对人脸关键点进行解码
#-----------------------------------------------------------#
landms = decode_landm(landms.data.squeeze(0), self.anchors, self.cfg['variance'])
#-----------------------------------------------------------#
# 对人脸识别结果进行堆叠
#-----------------------------------------------------------#
boxes_conf_landms = torch.cat([boxes, conf, landms], -1)
boxes_conf_landms = non_max_suppression(boxes_conf_landms, self.confidence)
t1 = time.time()
for _ in range(test_interval):
with torch.no_grad():
#---------------------------------------------------------#
# 传入网络进行预测
#---------------------------------------------------------#
loc, conf, landms = self.net(image)
#-----------------------------------------------------------#
# 对预测框进行解码
#-----------------------------------------------------------#
boxes = decode(loc.data.squeeze(0), self.anchors, self.cfg['variance'])
#-----------------------------------------------------------#
# 获得预测结果的置信度
#-----------------------------------------------------------#
conf = conf.data.squeeze(0)[:, 1:2]
#-----------------------------------------------------------#
# 对人脸关键点进行解码
#-----------------------------------------------------------#
landms = decode_landm(landms.data.squeeze(0), self.anchors, self.cfg['variance'])
#-----------------------------------------------------------#
# 对人脸识别结果进行堆叠
#-----------------------------------------------------------#
boxes_conf_landms = torch.cat([boxes, conf, landms], -1)
boxes_conf_landms = non_max_suppression(boxes_conf_landms, self.confidence)
t2 = time.time()
tact_time = (t2 - t1) / test_interval
return tact_time
#---------------------------------------------------#
# 检测图片
#---------------------------------------------------#
def get_map_txt(self, image):
#---------------------------------------------------#
# 把图像转换成numpy的形式
#---------------------------------------------------#
image = np.array(image,np.float32)
#---------------------------------------------------#
# 计算输入图片的高和宽
#---------------------------------------------------#
im_height, im_width, _ = np.shape(image)
#---------------------------------------------------#
# 计算scale,用于将获得的预测框转换成原图的高宽
#---------------------------------------------------#
scale = [
np.shape(image)[1], np.shape(image)[0], np.shape(image)[1], np.shape(image)[0]
]
scale_for_landmarks = [
np.shape(image)[1], np.shape(image)[0], np.shape(image)[1], np.shape(image)[0],
np.shape(image)[1], np.shape(image)[0], np.shape(image)[1], np.shape(image)[0],
np.shape(image)[1], np.shape(image)[0]
]
#---------------------------------------------------------#
# letterbox_image可以给图像增加灰条,实现不失真的resize
#---------------------------------------------------------#
if self.letterbox_image:
image = letterbox_image(image, [self.input_shape[1], self.input_shape[0]])
else:
self.anchors = Anchors(self.cfg, image_size=(im_height, im_width)).get_anchors()
with torch.no_grad():
#-----------------------------------------------------------#
# 图片预处理,归一化。
#-----------------------------------------------------------#
image = torch.from_numpy(preprocess_input(image).transpose(2, 0, 1)).unsqueeze(0).type(torch.FloatTensor)
if self.cuda:
self.anchors = self.anchors.cuda()
image = image.cuda()
#---------------------------------------------------------#
# 传入网络进行预测
#---------------------------------------------------------#
loc, conf, landms = self.net(image)
#-----------------------------------------------------------#
# 对预测框进行解码
#-----------------------------------------------------------#
boxes = decode(loc.data.squeeze(0), self.anchors, self.cfg['variance'])
#-----------------------------------------------------------#
# 获得预测结果的置信度
#-----------------------------------------------------------#
conf = conf.data.squeeze(0)[:, 1:2]
#-----------------------------------------------------------#
# 对人脸关键点进行解码
#-----------------------------------------------------------#
landms = decode_landm(landms.data.squeeze(0), self.anchors, self.cfg['variance'])
#-----------------------------------------------------------#
# 对人脸识别结果进行堆叠
#-----------------------------------------------------------#
boxes_conf_landms = torch.cat([boxes, conf, landms], -1)
boxes_conf_landms = non_max_suppression(boxes_conf_landms, self.confidence)
if len(boxes_conf_landms) <= 0:
return np.array([])
#---------------------------------------------------------#
# 如果使用了letterbox_image的话,要把灰条的部分去除掉。
#---------------------------------------------------------#
if self.letterbox_image:
boxes_conf_landms = retinaface_correct_boxes(boxes_conf_landms, \
np.array([self.input_shape[0], self.input_shape[1]]), np.array([im_height, im_width]))
boxes_conf_landms[:, :4] = boxes_conf_landms[:, :4] * scale
boxes_conf_landms[:, 5:] = boxes_conf_landms[:, 5:] * scale_for_landmarks
return boxes_conf_landms
| [
"json.load",
"cv2.putText",
"cv2.circle",
"torch.load",
"nets.retinaface.RetinaFace",
"torch.cat",
"time.time",
"numpy.shape",
"utils.utils.letterbox_image",
"cv2.rectangle",
"utils.anchors.Anchors",
"numpy.array",
"torch.cuda.is_available",
"utils.utils_bbox.non_max_suppression",
"torch... | [((4695, 4715), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (4704, 4715), False, 'import json\n'), ((5170, 5197), 'numpy.array', 'np.array', (['image', 'np.float32'], {}), '(image, np.float32)\n', (5178, 5197), True, 'import numpy as np\n'), ((5381, 5396), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5389, 5396), True, 'import numpy as np\n'), ((11311, 11338), 'numpy.array', 'np.array', (['image', 'np.float32'], {}), '(image, np.float32)\n', (11319, 11338), True, 'import numpy as np\n'), ((11522, 11537), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (11530, 11537), True, 'import numpy as np\n'), ((13774, 13785), 'time.time', 'time.time', ([], {}), '()\n', (13783, 13785), False, 'import time\n'), ((15289, 15300), 'time.time', 'time.time', ([], {}), '()\n', (15298, 15300), False, 'import time\n'), ((15714, 15741), 'numpy.array', 'np.array', (['image', 'np.float32'], {}), '(image, np.float32)\n', (15722, 15741), True, 'import numpy as np\n'), ((15925, 15940), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (15933, 15940), True, 'import numpy as np\n'), ((4135, 4183), 'torch.load', 'torch.load', (['self.model_path'], {'map_location': 'device'}), '(self.model_path, map_location=device)\n', (4145, 4183), False, 'import torch\n'), ((4345, 4370), 'torch.nn.DataParallel', 'nn.DataParallel', (['self.net'], {}), '(self.net)\n', (4360, 4370), True, 'import torch.nn as nn\n'), ((6811, 6877), 'utils.utils.letterbox_image', 'letterbox_image', (['image', '[self.input_shape[1], self.input_shape[0]]'], {}), '(image, [self.input_shape[1], self.input_shape[0]])\n', (6826, 6877), False, 'from utils.utils import letterbox_image, preprocess_input\n'), ((7015, 7030), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7028, 7030), False, 'import torch\n'), ((8736, 8781), 'torch.cat', 'torch.cat', (['[boxes, classes, conf, landms]', '(-1)'], {}), '([boxes, classes, conf, landms], -1)\n', (8745, 8781), False, 'import torch\n'), ((8815, 8870), 'utils.utils_bbox.non_max_suppression', 'non_max_suppression', (['loc_c_conf_landms', 'self.confidence'], {}), '(loc_c_conf_landms, self.confidence)\n', (8834, 8870), False, 'from utils.utils_bbox import decode, decode_landm, non_max_suppression, retinaface_correct_boxes\n'), ((10487, 10555), 'cv2.rectangle', 'cv2.rectangle', (['old_image', '(b[0], b[1])', '(b[2], b[3])', '(0, 0, 255)', '(2)'], {}), '(old_image, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)\n', (10500, 10555), False, 'import cv2\n'), ((10620, 10710), 'cv2.putText', 'cv2.putText', (['old_image', 'text', '(cx, cy)', 'cv2.FONT_HERSHEY_DUPLEX', '(0.5)', '(255, 255, 255)'], {}), '(old_image, text, (cx, cy), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, \n 255, 255))\n', (10631, 10710), False, 'import cv2\n'), ((11784, 11850), 'utils.utils.letterbox_image', 'letterbox_image', (['image', '[self.input_shape[1], self.input_shape[0]]'], {}), '(image, [self.input_shape[1], self.input_shape[0]])\n', (11799, 11850), False, 'from utils.utils import letterbox_image, preprocess_input\n'), ((11976, 11991), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11989, 11991), False, 'import torch\n'), ((13620, 13656), 'torch.cat', 'torch.cat', (['[boxes, conf, landms]', '(-1)'], {}), '([boxes, conf, landms], -1)\n', (13629, 13656), False, 'import torch\n'), ((13690, 13745), 'utils.utils_bbox.non_max_suppression', 'non_max_suppression', (['boxes_conf_landms', 'self.confidence'], {}), '(boxes_conf_landms, self.confidence)\n', (13709, 13745), False, 'from utils.utils_bbox import decode, decode_landm, non_max_suppression, retinaface_correct_boxes\n'), ((16756, 16822), 'utils.utils.letterbox_image', 'letterbox_image', (['image', '[self.input_shape[1], self.input_shape[0]]'], {}), '(image, [self.input_shape[1], self.input_shape[0]])\n', (16771, 16822), False, 'from utils.utils import letterbox_image, preprocess_input\n'), ((16960, 16975), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16973, 16975), False, 'import torch\n'), ((18604, 18640), 'torch.cat', 'torch.cat', (['[boxes, conf, landms]', '(-1)'], {}), '([boxes, conf, landms], -1)\n', (18613, 18640), False, 'import torch\n'), ((18674, 18729), 'utils.utils_bbox.non_max_suppression', 'non_max_suppression', (['boxes_conf_landms', 'self.confidence'], {}), '(boxes_conf_landms, self.confidence)\n', (18693, 18729), False, 'from utils.utils_bbox import decode, decode_landm, non_max_suppression, retinaface_correct_boxes\n'), ((3970, 4007), 'nets.retinaface.RetinaFace', 'RetinaFace', ([], {'cfg': 'self.cfg', 'mode': '"""eval"""'}), "(cfg=self.cfg, mode='eval')\n", (3980, 4007), False, 'from nets.retinaface import RetinaFace\n'), ((4063, 4088), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4086, 4088), False, 'import torch\n'), ((5594, 5609), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5602, 5609), True, 'import numpy as np\n'), ((5614, 5629), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5622, 5629), True, 'import numpy as np\n'), ((5634, 5649), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5642, 5649), True, 'import numpy as np\n'), ((5654, 5669), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5662, 5669), True, 'import numpy as np\n'), ((5730, 5745), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5738, 5745), True, 'import numpy as np\n'), ((5750, 5765), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5758, 5765), True, 'import numpy as np\n'), ((5770, 5785), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5778, 5785), True, 'import numpy as np\n'), ((5790, 5805), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5798, 5805), True, 'import numpy as np\n'), ((5823, 5838), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5831, 5838), True, 'import numpy as np\n'), ((5843, 5858), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5851, 5858), True, 'import numpy as np\n'), ((5863, 5878), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5871, 5878), True, 'import numpy as np\n'), ((5883, 5898), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5891, 5898), True, 'import numpy as np\n'), ((5916, 5931), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5924, 5931), True, 'import numpy as np\n'), ((5936, 5951), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5944, 5951), True, 'import numpy as np\n'), ((5956, 5971), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5964, 5971), True, 'import numpy as np\n'), ((5976, 5991), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5984, 5991), True, 'import numpy as np\n'), ((6009, 6024), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6017, 6024), True, 'import numpy as np\n'), ((6029, 6044), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6037, 6044), True, 'import numpy as np\n'), ((6049, 6064), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6057, 6064), True, 'import numpy as np\n'), ((6069, 6084), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6077, 6084), True, 'import numpy as np\n'), ((6102, 6117), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6110, 6117), True, 'import numpy as np\n'), ((6122, 6137), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6130, 6137), True, 'import numpy as np\n'), ((6142, 6157), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6150, 6157), True, 'import numpy as np\n'), ((6162, 6177), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6170, 6177), True, 'import numpy as np\n'), ((6195, 6210), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6203, 6210), True, 'import numpy as np\n'), ((6215, 6230), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6223, 6230), True, 'import numpy as np\n'), ((6235, 6250), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6243, 6250), True, 'import numpy as np\n'), ((6255, 6270), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6263, 6270), True, 'import numpy as np\n'), ((6288, 6303), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6296, 6303), True, 'import numpy as np\n'), ((6308, 6323), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6316, 6323), True, 'import numpy as np\n'), ((6328, 6343), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6336, 6343), True, 'import numpy as np\n'), ((6348, 6363), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6356, 6363), True, 'import numpy as np\n'), ((6381, 6396), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6389, 6396), True, 'import numpy as np\n'), ((6401, 6416), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6409, 6416), True, 'import numpy as np\n'), ((6421, 6436), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6429, 6436), True, 'import numpy as np\n'), ((6441, 6456), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6449, 6456), True, 'import numpy as np\n'), ((6474, 6489), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6482, 6489), True, 'import numpy as np\n'), ((6494, 6509), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6502, 6509), True, 'import numpy as np\n'), ((6514, 6529), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6522, 6529), True, 'import numpy as np\n'), ((6534, 6549), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6542, 6549), True, 'import numpy as np\n'), ((11009, 11067), 'cv2.circle', 'cv2.circle', (['old_image', '(b[i], b[i + 1])', '(1)', '(0, 0, 255)', '(4)'], {}), '(old_image, (b[i], b[i + 1]), 1, (0, 0, 255), 4)\n', (11019, 11067), False, 'import cv2\n'), ((13844, 13859), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13857, 13859), False, 'import torch\n'), ((15143, 15179), 'torch.cat', 'torch.cat', (['[boxes, conf, landms]', '(-1)'], {}), '([boxes, conf, landms], -1)\n', (15152, 15179), False, 'import torch\n'), ((15217, 15272), 'utils.utils_bbox.non_max_suppression', 'non_max_suppression', (['boxes_conf_landms', 'self.confidence'], {}), '(boxes_conf_landms, self.confidence)\n', (15236, 15272), False, 'from utils.utils_bbox import decode, decode_landm, non_max_suppression, retinaface_correct_boxes\n'), ((16138, 16153), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (16146, 16153), True, 'import numpy as np\n'), ((16158, 16173), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (16166, 16173), True, 'import numpy as np\n'), ((16178, 16193), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (16186, 16193), True, 'import numpy as np\n'), ((16198, 16213), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (16206, 16213), True, 'import numpy as np\n'), ((16274, 16289), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (16282, 16289), True, 'import numpy as np\n'), ((16294, 16309), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (16302, 16309), True, 'import numpy as np\n'), ((16314, 16329), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (16322, 16329), True, 'import numpy as np\n'), ((16334, 16349), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (16342, 16349), True, 'import numpy as np\n'), ((16367, 16382), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (16375, 16382), True, 'import numpy as np\n'), ((16387, 16402), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (16395, 16402), True, 'import numpy as np\n'), ((16407, 16422), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (16415, 16422), True, 'import numpy as np\n'), ((16427, 16442), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (16435, 16442), True, 'import numpy as np\n'), ((16460, 16475), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (16468, 16475), True, 'import numpy as np\n'), ((16480, 16495), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (16488, 16495), True, 'import numpy as np\n'), ((18801, 18813), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (18809, 18813), True, 'import numpy as np\n'), ((3569, 3641), 'utils.anchors.Anchors', 'Anchors', (['self.cfg'], {'image_size': '[self.input_shape[0], self.input_shape[1]]'}), '(self.cfg, image_size=[self.input_shape[0], self.input_shape[1]])\n', (3576, 3641), False, 'from utils.anchors import Anchors\n'), ((6921, 6972), 'utils.anchors.Anchors', 'Anchors', (['self.cfg'], {'image_size': '(im_height, im_width)'}), '(self.cfg, image_size=(im_height, im_width))\n', (6928, 6972), False, 'from utils.anchors import Anchors\n'), ((9294, 9346), 'numpy.array', 'np.array', (['[self.input_shape[0], self.input_shape[1]]'], {}), '([self.input_shape[0], self.input_shape[1]])\n', (9302, 9346), True, 'import numpy as np\n'), ((9348, 9379), 'numpy.array', 'np.array', (['[im_height, im_width]'], {}), '([im_height, im_width])\n', (9356, 9379), True, 'import numpy as np\n'), ((11894, 11945), 'utils.anchors.Anchors', 'Anchors', (['self.cfg'], {'image_size': '(im_height, im_width)'}), '(self.cfg, image_size=(im_height, im_width))\n', (11901, 11945), False, 'from utils.anchors import Anchors\n'), ((16866, 16917), 'utils.anchors.Anchors', 'Anchors', (['self.cfg'], {'image_size': '(im_height, im_width)'}), '(self.cfg, image_size=(im_height, im_width))\n', (16873, 16917), False, 'from utils.anchors import Anchors\n'), ((19156, 19208), 'numpy.array', 'np.array', (['[self.input_shape[0], self.input_shape[1]]'], {}), '([self.input_shape[0], self.input_shape[1]])\n', (19164, 19208), True, 'import numpy as np\n'), ((19210, 19241), 'numpy.array', 'np.array', (['[im_height, im_width]'], {}), '([im_height, im_width])\n', (19218, 19241), True, 'import numpy as np\n'), ((7248, 7271), 'utils.utils.preprocess_input', 'preprocess_input', (['image'], {}), '(image)\n', (7264, 7271), False, 'from utils.utils import letterbox_image, preprocess_input\n'), ((12209, 12232), 'utils.utils.preprocess_input', 'preprocess_input', (['image'], {}), '(image)\n', (12225, 12232), False, 'from utils.utils import letterbox_image, preprocess_input\n'), ((17193, 17216), 'utils.utils.preprocess_input', 'preprocess_input', (['image'], {}), '(image)\n', (17209, 17216), False, 'from utils.utils import letterbox_image, preprocess_input\n')] |
# =============================================================================
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from pathlib import Path
from typing import Dict
import cv2 as cv
import numpy as np
import pytest
import torch
import torch.nn.functional as F
import ptlflow
from ptlflow.data.flow_transforms import ToTensor
from ptlflow.utils import flow_utils
from ptlflow.utils.utils import InputPadder
# Results at scale_factor=0.66
reference_accuracy = {
'dicl_chairs_flyingchairs': 0.675,
'dicl_chairs_flyingthings3d': 20.257,
'dicl_chairs_kitti': 24.210,
'dicl_chairs_sintel': 0.373,
'dicl_kitti_flyingchairs': 2.210,
'dicl_kitti_flyingthings3d': 72.860,
'dicl_kitti_kitti': 3.729,
'dicl_kitti_sintel': 0.368,
'dicl_sintel_flyingchairs': 1.809,
'dicl_sintel_flyingthings3d': 8.649,
'dicl_sintel_kitti': 7.003,
'dicl_sintel_sintel': 0.185,
'dicl_things_flyingchairs': 1.469,
'dicl_things_flyingthings3d': 6.065,
'dicl_things_kitti': 17.919,
'dicl_things_sintel': 0.240,
'fastflownet_chairs_flyingchairs': 1.534,
'fastflownet_chairs_flyingthings3d': 21.064,
'fastflownet_chairs_kitti': 11.806,
'fastflownet_chairs_sintel': 0.455,
'fastflownet_kitti_flyingchairs': 2.000,
'fastflownet_kitti_flyingthings3d': 32.696,
'fastflownet_kitti_kitti': 2.860,
'fastflownet_kitti_sintel': 0.378,
'fastflownet_mix_flyingchairs': 1.423,
'fastflownet_mix_flyingthings3d': 17.350,
'fastflownet_mix_kitti': 5.386,
'fastflownet_mix_sintel': 0.344,
'fastflownet_sintel_flyingchairs': 1.564,
'fastflownet_sintel_flyingthings3d': 16.783,
'fastflownet_sintel_kitti': 13.789,
'fastflownet_sintel_sintel': 0.349,
'fastflownet_things_flyingchairs': 1.648,
'fastflownet_things_flyingthings3d': 20.497,
'fastflownet_things_kitti': 12.205,
'fastflownet_things_sintel': 0.434,
'flownet2_things_flyingchairs': 1.986,
'flownet2_things_flyingthings3d': 10.010,
'flownet2_things_kitti': 16.391,
'flownet2_things_sintel': 0.551,
'flownetc_things_flyingchairs': 2.803,
'flownetc_things_flyingthings3d': 12.762,
'flownetc_things_kitti': 15.847,
'flownetc_things_sintel': 1.149,
'flownetcs_things_flyingchairs': 1.759,
'flownetcs_things_flyingthings3d': 8.329,
'flownetcs_things_kitti': 13.826,
'flownetcs_things_sintel': 0.390,
'flownetcss_things_flyingchairs': 1.637,
'flownetcss_things_flyingthings3d': 7.818,
'flownetcss_things_kitti': 14.002,
'flownetcss_things_sintel': 0.316,
'flownets_things_flyingchairs': 1.828,
'flownets_things_flyingthings3d': 15.145,
'flownets_things_kitti': 13.089,
'flownets_things_sintel': 0.857,
'flownetsd_things_flyingchairs': 1.814,
'flownetsd_things_flyingthings3d': 34.579,
'flownetsd_things_kitti': 22.438,
'flownetsd_things_sintel': 0.255,
'hd3_chairs_flyingchairs': 0.865,
'hd3_chairs_flyingthings3d': 31.540,
'hd3_chairs_kitti': 24.647,
'hd3_chairs_sintel': 0.534,
'hd3_things_flyingchairs': 1.318,
'hd3_things_flyingthings3d': 8.197,
'hd3_things_kitti': 12.478,
'hd3_things_sintel': 0.282,
'hd3_sintel_flyingchairs': 2.016,
'hd3_sintel_flyingthings3d': 23.785,
'hd3_sintel_kitti': 21.161,
'hd3_sintel_sintel': 0.227,
'hd3_kitti_flyingchairs': 1.766,
'hd3_kitti_flyingthings3d': 166.434,
'hd3_kitti_kitti': 1.924,
'hd3_kitti_sintel': 0.285,
'hd3_ctxt_chairs_flyingchairs': 0.828,
'hd3_ctxt_chairs_flyingthings3d': 47.454,
'hd3_ctxt_chairs_kitti': 29.977,
'hd3_ctxt_chairs_sintel': 0.462,
'hd3_ctxt_things_flyingchairs': 1.280,
'hd3_ctxt_things_flyingthings3d': 9.238,
'hd3_ctxt_things_kitti': 13.115,
'hd3_ctxt_things_sintel': 0.249,
'hd3_ctxt_sintel_flyingchairs': 1.896,
'hd3_ctxt_sintel_flyingthings3d': 14.648,
'hd3_ctxt_sintel_kitti': 14.455,
'hd3_ctxt_sintel_sintel': 0.198,
'hd3_ctxt_kitti_flyingchairs': 2.059,
'hd3_ctxt_kitti_flyingthings3d': 66.693,
'hd3_ctxt_kitti_kitti': 1.491,
'hd3_ctxt_kitti_sintel': 0.305,
'gma_chairs_flyingchairs': 0.533,
'gma_chairs_flyingthings3d': 8.372,
'gma_chairs_kitti': 12.524,
'gma_chairs_sintel': 0.334,
'gma_things_flyingchairs': 0.796,
'gma_things_flyingthings3d': 3.652,
'gma_things_kitti': 7.176,
'gma_things_sintel': 0.188,
'gma_sintel_flyingchairs': 0.887,
'gma_sintel_flyingthings3d': 4.145,
'gma_sintel_kitti': 1.863,
'gma_sintel_sintel': 0.153,
'gma_kitti_flyingchairs': 1.626,
'gma_kitti_flyingthings3d': 18.008,
'gma_kitti_kitti': 0.987,
'gma_kitti_sintel': 0.286,
'irr_pwc_chairs_occ_flyingchairs': 0.909,
'irr_pwc_chairs_occ_flyingthings3d': 10.531,
'irr_pwc_chairs_occ_kitti': 9.929,
'irr_pwc_chairs_occ_sintel': 0.226,
'irr_pwc_things_flyingchairs': 0.959,
'irr_pwc_things_flyingthings3d': 6.844,
'irr_pwc_things_kitti': 11.348,
'irr_pwc_things_sintel': 0.235,
'irr_pwc_sintel_flyingchairs': 1.315,
'irr_pwc_sintel_flyingthings3d': 13.126,
'irr_pwc_sintel_kitti': 10.421,
'irr_pwc_sintel_sintel': 0.220,
'irr_pwc_kitti_flyingchairs': 1.538,
'irr_pwc_kitti_flyingthings3d': 79.439,
'irr_pwc_kitti_kitti': 1.373,
'irr_pwc_kitti_sintel': 0.322,
'irr_pwcnet_things_flyingchairs': 1.163,
'irr_pwcnet_things_flyingthings3d': 23.172,
'irr_pwcnet_things_kitti': 13.557,
'irr_pwcnet_things_sintel': 0.350,
'irr_pwcnet_irr_things_flyingchairs': 1.163,
'irr_pwcnet_irr_things_flyingthings3d': 12.492,
'irr_pwcnet_irr_things_kitti': 13.227,
'irr_pwcnet_irr_things_sintel': 0.326,
'lcv_raft_chairs_flyingchairs': 0.836,
'lcv_raft_chairs_flyingthings3d': 4.878,
'lcv_raft_chairs_kitti': 13.587,
'lcv_raft_chairs_sintel': 0.254,
'lcv_raft_things_flyingchairs': 0.993,
'lcv_raft_things_flyingthings3d': 4.271,
'lcv_raft_things_kitti': 6.827,
'lcv_raft_things_sintel': 0.217,
'liteflownet_kitti_flyingchairs': 1.991,
'liteflownet_kitti_flyingthings3d': 34.661,
'liteflownet_kitti_kitti': 2.164,
'liteflownet_kitti_sintel': 0.366,
'liteflownet_sintel_flyingchairs': 1.024,
'liteflownet_sintel_flyingthings3d': 18.735,
'liteflownet_sintel_kitti': 7.642,
'liteflownet_sintel_sintel': 0.203,
'liteflownet_things_flyingchairs': 1.133,
'liteflownet_things_flyingthings3d': 14.386,
'liteflownet_things_kitti': 13.362,
'liteflownet_things_sintel': 0.285,
'liteflownet2_sintel_flyingchairs': 1.037,
'liteflownet2_sintel_flyingthings3d': 13.254,
'liteflownet2_sintel_kitti': 2.526,
'liteflownet2_sintel_sintel': 0.259,
'liteflownet2_pseudoreg_kitti_flyingchairs': 1.975,
'liteflownet2_pseudoreg_kitti_flyingthings3d': 34.321,
'liteflownet2_pseudoreg_kitti_kitti': 2.265,
'liteflownet2_pseudoreg_kitti_sintel': 0.395,
'liteflownet3_sintel_flyingchairs': 1.480,
'liteflownet3_sintel_flyingthings3d': 13.961,
'liteflownet3_sintel_kitti': 3.094,
'liteflownet3_sintel_sintel': 0.246,
'liteflownet3_pseudoreg_kitti_flyingchairs': 1.725,
'liteflownet3_pseudoreg_kitti_flyingthings3d': 33.243,
'liteflownet3_pseudoreg_kitti_kitti': 2.035,
'liteflownet3_pseudoreg_kitti_sintel': 0.442,
'liteflownet3s_sintel_flyingchairs': 1.354,
'liteflownet3s_sintel_flyingthings3d': 12.980,
'liteflownet3s_sintel_kitti': 4.897,
'liteflownet3s_sintel_sintel': 0.255,
'liteflownet3s_pseudoreg_kitti_flyingchairs': 1.879,
'liteflownet3s_pseudoreg_kitti_flyingthings3d': 28.441,
'liteflownet3s_pseudoreg_kitti_kitti': 2.206,
'liteflownet3s_pseudoreg_kitti_sintel': 0.388,
'maskflownet_kitti_flyingchairs': 2.189,
'maskflownet_kitti_flyingthings3d': 54.736,
'maskflownet_kitti_kitti': 2.888,
'maskflownet_kitti_sintel': 0.287,
'maskflownet_sintel_flyingchairs': 1.021,
'maskflownet_sintel_flyingthings3d': 13.191,
'maskflownet_sintel_kitti': 4.271,
'maskflownet_sintel_sintel': 0.190,
'maskflownet_s_sintel_flyingchairs': 1.086,
'maskflownet_s_sintel_flyingthings3d': 14.158,
'maskflownet_s_sintel_kitti': 4.565,
'maskflownet_s_sintel_sintel': 0.224,
'maskflownet_s_things_flyingchairs': 1.257,
'maskflownet_s_things_flyingthings3d': 11.582,
'maskflownet_s_things_kitti': 12.396,
'maskflownet_s_things_sintel': 0.375,
'pwcnet_things_flyingchairs': 2.056,
'pwcnet_things_flyingthings3d': 20.956,
'pwcnet_things_kitti': 11.156,
'pwcnet_things_sintel': 0.595,
'pwcnet_sintel_flyingchairs': 1.887,
'pwcnet_sintel_flyingthings3d': 22.320,
'pwcnet_sintel_kitti': 5.068,
'pwcnet_sintel_sintel': 0.405,
'pwcdcnet_things_flyingchairs': 1.833,
'pwcdcnet_things_flyingthings3d': 12.122,
'pwcdcnet_things_kitti': 10.446,
'pwcdcnet_things_sintel': 0.454,
'pwcdcnet_sintel_flyingchairs': 1.321,
'pwcdcnet_sintel_flyingthings3d': 16.159,
'pwcdcnet_sintel_kitti': 2.697,
'pwcdcnet_sintel_sintel': 0.241,
'raft_chairs_flyingchairs': 0.636,
'raft_chairs_flyingthings3d': 6.662,
'raft_chairs_kitti': 9.991,
'raft_chairs_sintel': 0.222,
'raft_things_flyingchairs': 0.813,
'raft_things_flyingthings3d': 3.384,
'raft_things_kitti': 6.702,
'raft_things_sintel': 0.186,
'raft_sintel_flyingchairs': 0.761,
'raft_sintel_flyingthings3d': 3.974,
'raft_sintel_kitti': 2.251,
'raft_sintel_sintel': 0.162,
'raft_kitti_flyingchairs': 1.927,
'raft_kitti_flyingthings3d': 18.275,
'raft_kitti_kitti': 0.932,
'raft_kitti_sintel': 0.360,
'raft_small_things_flyingchairs': 1.084,
'raft_small_things_flyingthings3d': 10.463,
'raft_small_things_kitti': 9.548,
'raft_small_things_sintel': 0.282,
'scopeflow_chairs_flyingchairs': 0.965,
'scopeflow_chairs_flyingthings3d': 13.087,
'scopeflow_chairs_kitti': 13.576,
'scopeflow_chairs_sintel': 0.249,
'scopeflow_things_flyingchairs': 1.030,
'scopeflow_things_flyingthings3d': 10.189,
'scopeflow_things_kitti': 10.734,
'scopeflow_things_sintel': 0.231,
'scopeflow_kitti_flyingchairs': 1.832,
'scopeflow_kitti_flyingthings3d': 138.331,
'scopeflow_kitti_kitti': 2.507,
'scopeflow_kitti_sintel': 0.304,
'scopeflow_sintel_flyingchairs': 1.145,
'scopeflow_sintel_flyingthings3d': 11.772,
'scopeflow_sintel_kitti': 9.662,
'scopeflow_sintel_sintel': 0.218,
'scv4_chairs_flyingchairs': 0.644,
'scv4_chairs_flyingthings3d': 8.586,
'scv4_chairs_kitti': 20.236,
'scv4_chairs_sintel': 0.361,
'scv4_kitti_flyingchairs': 2.260,
'scv4_kitti_flyingthings3d': 57.771,
'scv4_kitti_kitti': 1.401,
'scv4_kitti_sintel': 0.290,
'scv4_sintel_flyingchairs': 0.813,
'scv4_sintel_flyingthings3d': 5.976,
'scv4_sintel_kitti': 2.180,
'scv4_sintel_sintel': 0.224,
'scv4_things_flyingchairs': 1.930,
'scv4_things_flyingthings3d': 6.140,
'scv4_things_kitti': 15.921,
'scv4_things_sintel': 0.300,
'scv8_chairs_flyingchairs': 2.339,
'scv8_chairs_flyingthings3d': 290.216,
'scv8_chairs_kitti': 34.867,
'scv8_chairs_sintel': 1.866,
'scv8_things_flyingchairs': 2.195,
'scv8_things_flyingthings3d': 32.719,
'scv8_things_kitti': 30.921,
'scv8_things_sintel': 1.257,
'vcn_chairs_flyingchairs': 1.155,
'vcn_chairs_flyingthings3d': 11.569,
'vcn_chairs_kitti': 9.270,
'vcn_chairs_sintel': 0.454,
'vcn_things_flyingchairs': 1.397,
'vcn_things_flyingthings3d': 7.309,
'vcn_things_kitti': 8.630,
'vcn_things_sintel': 0.364,
'vcn_sintel_flyingchairs': 1.146,
'vcn_sintel_flyingthings3d': 7.214,
'vcn_sintel_kitti': 3.845,
'vcn_sintel_sintel': 0.271,
'vcn_kitti_flyingchairs': 2.181,
'vcn_kitti_flyingthings3d': 60.751,
'vcn_kitti_kitti': 1.305,
'vcn_kitti_sintel': 0.392,
'vcn_small_chairs_flyingchairs': 1.437,
'vcn_small_chairs_flyingthings3d': 14.641,
'vcn_small_chairs_kitti': 11.638,
'vcn_small_chairs_sintel': 0.518,
'vcn_small_things_flyingchairs': 1.619,
'vcn_small_things_flyingthings3d': 11.066,
'vcn_small_things_kitti': 9.665,
'vcn_small_things_sintel': 0.575,
}
EXCLUDE_MODELS = [
'scv4', 'scv8' # Has additional requirements
]
@pytest.mark.skip(reason='Requires to download all checkpoints. Just run occasionally.')
def test_accuracy() -> None:
data = _load_data()
model_names = ptlflow.models_dict.keys()
for mname in model_names:
if mname in EXCLUDE_MODELS:
continue
model_ref = ptlflow.get_model_reference(mname)
if hasattr(model_ref, 'pretrained_checkpoints'):
ckpt_names = list(model_ref.pretrained_checkpoints.keys())
else:
ckpt_names = [None]
for cname in ckpt_names:
parser = model_ref.add_model_specific_args()
args = parser.parse_args([])
model = ptlflow.get_model(mname, cname, args)
model = model.eval()
if torch.cuda.is_available():
model = model.cuda()
for dataset_name, dataset_data in data.items():
padder = InputPadder(dataset_data['images'].shape, stride=model.output_stride)
dataset_data['images'] = padder.pad(dataset_data['images'])
preds = model(dataset_data)['flows'].detach()
dataset_data['images'] = padder.unpad(dataset_data['images'])
preds = padder.unpad(preds)
epe = torch.norm(preds - dataset_data['flows'], p=2, dim=2, keepdim=True)
epe[~dataset_data['valids'].bool()] = 0
epe = epe.sum() / dataset_data['valids'].sum()
id_str = f'{mname}_{cname}_{dataset_name}'
if cname is not None:
if reference_accuracy.get(id_str) is not None:
ref_epe = reference_accuracy[id_str]
assert epe < 1.1*ref_epe, id_str
print(f' \'{id_str}\': {epe:.03f},')
def _load_data() -> Dict[str, Dict[str, torch.Tensor]]:
data = {}
transform = ToTensor()
data['flyingchairs'] = {
'images': [cv.imread(str(Path('tests/data/ptlflow/models/flyingchairs_00001_img1.ppm'))),
cv.imread(str(Path('tests/data/ptlflow/models/flyingchairs_00001_img2.ppm')))],
'flows': flow_utils.flow_read(Path('tests/data/ptlflow/models/flyingchairs_00001_flow.flo'))
}
data['flyingchairs']['valids'] = np.ones_like(data['flyingchairs']['flows'][:, :, 0])
data['flyingchairs'] = transform(data['flyingchairs'])
data['flyingthings3d'] = {
'images': [cv.imread(str(Path('tests/data/ptlflow/models/flyingthings3d_0000000.png'))),
cv.imread(str(Path('tests/data/ptlflow/models/flyingthings3d_0000001.png')))],
'flows': flow_utils.flow_read(Path('tests/data/ptlflow/models/flyingthings3d_0000000.flo'))
}
data['flyingthings3d']['valids'] = np.ones_like(data['flyingthings3d']['flows'][:, :, 0])
data['flyingthings3d'] = transform(data['flyingthings3d'])
data['kitti'] = {
'images': [cv.imread(str(Path('tests/data/ptlflow/models/kitti2015_000000_10.png'))),
cv.imread(str(Path('tests/data/ptlflow/models/kitti2015_000000_11.png')))],
'flows': flow_utils.flow_read(Path('tests/data/ptlflow/models/kitti2015_flow_000000_10.png'))
}
nan_mask = np.isnan(data['kitti']['flows'])
data['kitti']['valids'] = 1 - nan_mask[:, :, 0].astype(np.float32)
data['kitti']['flows'][nan_mask] = 0
data['kitti'] = transform(data['kitti'])
data['sintel'] = {
'images': [cv.imread(str(Path('tests/data/ptlflow/models/sintel/training/clean/alley_1/frame_0001.png'))),
cv.imread(str(Path('tests/data/ptlflow/models/sintel/training/clean/alley_1/frame_0002.png')))],
'flows': flow_utils.flow_read(Path('tests/data/ptlflow/models/sintel/training/flow/alley_1/frame_0001.flo'))
}
data['sintel']['valids'] = np.ones_like(data['sintel']['flows'][:, :, 0])
data['sintel'] = transform(data['sintel'])
for dataset_dict in data.values():
for k in dataset_dict.keys():
if torch.cuda.is_available():
dataset_dict[k] = dataset_dict[k].cuda()
# Decrease resolution to reduce GPU requirements
scale_factor = 0.66
dataset_dict[k] = F.interpolate(dataset_dict[k], scale_factor=scale_factor, recompute_scale_factor=False)
if k == 'flows':
dataset_dict[k] *= scale_factor
elif k == 'valids':
dataset_dict[k][dataset_dict[k] < 1] = 0
# Add a fifth, batch dimension
dataset_dict[k] = dataset_dict[k][None]
return data
| [
"numpy.ones_like",
"ptlflow.data.flow_transforms.ToTensor",
"torch.norm",
"ptlflow.models_dict.keys",
"ptlflow.get_model",
"numpy.isnan",
"ptlflow.utils.utils.InputPadder",
"pathlib.Path",
"torch.cuda.is_available",
"ptlflow.get_model_reference",
"torch.nn.functional.interpolate",
"pytest.mark... | [((12907, 12999), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Requires to download all checkpoints. Just run occasionally."""'}), "(reason=\n 'Requires to download all checkpoints. Just run occasionally.')\n", (12923, 12999), False, 'import pytest\n'), ((13066, 13092), 'ptlflow.models_dict.keys', 'ptlflow.models_dict.keys', ([], {}), '()\n', (13090, 13092), False, 'import ptlflow\n'), ((14770, 14780), 'ptlflow.data.flow_transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (14778, 14780), False, 'from ptlflow.data.flow_transforms import ToTensor\n'), ((15152, 15204), 'numpy.ones_like', 'np.ones_like', (["data['flyingchairs']['flows'][:, :, 0]"], {}), "(data['flyingchairs']['flows'][:, :, 0])\n", (15164, 15204), True, 'import numpy as np\n'), ((15636, 15690), 'numpy.ones_like', 'np.ones_like', (["data['flyingthings3d']['flows'][:, :, 0]"], {}), "(data['flyingthings3d']['flows'][:, :, 0])\n", (15648, 15690), True, 'import numpy as np\n'), ((16089, 16121), 'numpy.isnan', 'np.isnan', (["data['kitti']['flows']"], {}), "(data['kitti']['flows'])\n", (16097, 16121), True, 'import numpy as np\n'), ((16688, 16734), 'numpy.ones_like', 'np.ones_like', (["data['sintel']['flows'][:, :, 0]"], {}), "(data['sintel']['flows'][:, :, 0])\n", (16700, 16734), True, 'import numpy as np\n'), ((13201, 13235), 'ptlflow.get_model_reference', 'ptlflow.get_model_reference', (['mname'], {}), '(mname)\n', (13228, 13235), False, 'import ptlflow\n'), ((13564, 13601), 'ptlflow.get_model', 'ptlflow.get_model', (['mname', 'cname', 'args'], {}), '(mname, cname, args)\n', (13581, 13601), False, 'import ptlflow\n'), ((13650, 13675), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13673, 13675), False, 'import torch\n'), ((15046, 15107), 'pathlib.Path', 'Path', (['"""tests/data/ptlflow/models/flyingchairs_00001_flow.flo"""'], {}), "('tests/data/ptlflow/models/flyingchairs_00001_flow.flo')\n", (15050, 15107), False, 'from pathlib import Path\n'), ((15529, 15589), 'pathlib.Path', 'Path', (['"""tests/data/ptlflow/models/flyingthings3d_0000000.flo"""'], {}), "('tests/data/ptlflow/models/flyingthings3d_0000000.flo')\n", (15533, 15589), False, 'from pathlib import Path\n'), ((16004, 16066), 'pathlib.Path', 'Path', (['"""tests/data/ptlflow/models/kitti2015_flow_000000_10.png"""'], {}), "('tests/data/ptlflow/models/kitti2015_flow_000000_10.png')\n", (16008, 16066), False, 'from pathlib import Path\n'), ((16572, 16649), 'pathlib.Path', 'Path', (['"""tests/data/ptlflow/models/sintel/training/flow/alley_1/frame_0001.flo"""'], {}), "('tests/data/ptlflow/models/sintel/training/flow/alley_1/frame_0001.flo')\n", (16576, 16649), False, 'from pathlib import Path\n'), ((16875, 16900), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (16898, 16900), False, 'import torch\n'), ((17083, 17174), 'torch.nn.functional.interpolate', 'F.interpolate', (['dataset_dict[k]'], {'scale_factor': 'scale_factor', 'recompute_scale_factor': '(False)'}), '(dataset_dict[k], scale_factor=scale_factor,\n recompute_scale_factor=False)\n', (17096, 17174), True, 'import torch.nn.functional as F\n'), ((13799, 13868), 'ptlflow.utils.utils.InputPadder', 'InputPadder', (["dataset_data['images'].shape"], {'stride': 'model.output_stride'}), "(dataset_data['images'].shape, stride=model.output_stride)\n", (13810, 13868), False, 'from ptlflow.utils.utils import InputPadder\n'), ((14151, 14218), 'torch.norm', 'torch.norm', (["(preds - dataset_data['flows'])"], {'p': '(2)', 'dim': '(2)', 'keepdim': '(True)'}), "(preds - dataset_data['flows'], p=2, dim=2, keepdim=True)\n", (14161, 14218), False, 'import torch\n'), ((14844, 14905), 'pathlib.Path', 'Path', (['"""tests/data/ptlflow/models/flyingchairs_00001_img1.ppm"""'], {}), "('tests/data/ptlflow/models/flyingchairs_00001_img1.ppm')\n", (14848, 14905), False, 'from pathlib import Path\n'), ((14942, 15003), 'pathlib.Path', 'Path', (['"""tests/data/ptlflow/models/flyingchairs_00001_img2.ppm"""'], {}), "('tests/data/ptlflow/models/flyingchairs_00001_img2.ppm')\n", (14946, 15003), False, 'from pathlib import Path\n'), ((15329, 15389), 'pathlib.Path', 'Path', (['"""tests/data/ptlflow/models/flyingthings3d_0000000.png"""'], {}), "('tests/data/ptlflow/models/flyingthings3d_0000000.png')\n", (15333, 15389), False, 'from pathlib import Path\n'), ((15426, 15486), 'pathlib.Path', 'Path', (['"""tests/data/ptlflow/models/flyingthings3d_0000001.png"""'], {}), "('tests/data/ptlflow/models/flyingthings3d_0000001.png')\n", (15430, 15486), False, 'from pathlib import Path\n'), ((15810, 15867), 'pathlib.Path', 'Path', (['"""tests/data/ptlflow/models/kitti2015_000000_10.png"""'], {}), "('tests/data/ptlflow/models/kitti2015_000000_10.png')\n", (15814, 15867), False, 'from pathlib import Path\n'), ((15904, 15961), 'pathlib.Path', 'Path', (['"""tests/data/ptlflow/models/kitti2015_000000_11.png"""'], {}), "('tests/data/ptlflow/models/kitti2015_000000_11.png')\n", (15908, 15961), False, 'from pathlib import Path\n'), ((16336, 16414), 'pathlib.Path', 'Path', (['"""tests/data/ptlflow/models/sintel/training/clean/alley_1/frame_0001.png"""'], {}), "('tests/data/ptlflow/models/sintel/training/clean/alley_1/frame_0001.png')\n", (16340, 16414), False, 'from pathlib import Path\n'), ((16451, 16529), 'pathlib.Path', 'Path', (['"""tests/data/ptlflow/models/sintel/training/clean/alley_1/frame_0002.png"""'], {}), "('tests/data/ptlflow/models/sintel/training/clean/alley_1/frame_0002.png')\n", (16455, 16529), False, 'from pathlib import Path\n')] |
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Support Vector Machine using TensorFlow"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__version__ = "0.1.5"
__author__ = "<NAME>"
import numpy as np
import os
import sys
import tensorflow as tf
import time
class SVM:
"""Implementation of L2-Support Vector Machine using TensorFlow"""
def __init__(self, alpha, batch_size, svm_c, num_classes, num_features):
"""Initialize the SVM class
Parameter
---------
alpha : float
The learning rate for the SVM model.
batch_size : int
Number of batches to use for training and testing.
svm_c : float
The SVM penalty parameter.
num_classes : int
Number of classes in a dataset.
num_features : int
Number of features in a dataset.
"""
self.alpha = alpha
self.batch_size = batch_size
self.svm_c = svm_c
self.num_classes = num_classes
self.num_features = num_features
def __graph__():
"""Building the inference graph"""
with tf.name_scope("input"):
# [BATCH_SIZE, NUM_FEATURES]
x_input = tf.placeholder(
dtype=tf.float32, shape=[None, self.num_features], name="x_input"
)
# [BATCH_SIZE]
y_input = tf.placeholder(dtype=tf.uint8, shape=[None], name="y_input")
# [BATCH_SIZE, NUM_CLASSES]
y_onehot = tf.one_hot(
indices=y_input,
depth=self.num_classes,
on_value=1,
off_value=-1,
name="y_onehot",
)
learning_rate = tf.placeholder(dtype=tf.float32, name="learning_rate")
with tf.name_scope("training_ops"):
with tf.name_scope("weights"):
weight = tf.get_variable(
name="weights",
initializer=tf.random_normal(
[self.num_features, self.num_classes], stddev=0.01
),
)
self.variable_summaries(weight)
with tf.name_scope("biases"):
bias = tf.get_variable(
name="biases",
initializer=tf.constant([0.1], shape=[self.num_classes]),
)
self.variable_summaries(bias)
with tf.name_scope("Wx_plus_b"):
output = tf.matmul(x_input, weight) + bias
tf.summary.histogram("pre-activations", output)
with tf.name_scope("svm"):
regularization = tf.reduce_mean(tf.square(weight))
hinge_loss = tf.reduce_mean(
tf.square(
tf.maximum(
tf.zeros([self.batch_size, self.num_classes]),
1 - tf.cast(y_onehot, tf.float32) * output,
)
)
)
with tf.name_scope("loss"):
loss = regularization + self.svm_c * hinge_loss
tf.summary.scalar("loss", loss)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(
loss
)
with tf.name_scope("accuracy"):
predicted_class = tf.sign(output)
predicted_class = tf.identity(predicted_class, name="prediction")
with tf.name_scope("correct_prediction"):
correct = tf.equal(
tf.argmax(predicted_class, 1), tf.argmax(y_onehot, 1)
)
with tf.name_scope("accuracy"):
accuracy = tf.reduce_mean(tf.cast(correct, "float"))
tf.summary.scalar("accuracy", accuracy)
merged = tf.summary.merge_all()
self.x_input = x_input
self.y_input = y_input
self.y_onehot = y_onehot
self.learning_rate = learning_rate
self.loss = loss
self.optimizer = optimizer
self.output = output
self.predicted_class = predicted_class
self.accuracy = accuracy
self.merged = merged
sys.stdout.write("\n<log> Building graph...")
__graph__()
sys.stdout.write("</log>\n")
def train(
self,
epochs,
log_path,
train_data,
train_size,
validation_data,
validation_size,
result_path,
):
"""Trains the SVM model
Parameter
---------
epochs : int
The number of passes through the entire dataset.
log_path : str
The directory where to save the TensorBoard logs.
train_data : numpy.ndarray
The numpy.ndarray to be used as the training dataset.
train_size : int
The number of data in `train_data`.
validation_data : numpy.ndarray
The numpy.ndarray to be used as the validation dataset.
validation_size : int
The number of data in `validation_data`.
result_path : str
The path where to save the NPY files consisting of the actual and predicted labels.
"""
# initialize the variables
init_op = tf.group(
tf.global_variables_initializer(), tf.local_variables_initializer()
)
# get the current time and date
timestamp = str(time.asctime())
# event files to contain the TensorBoard log
train_writer = tf.summary.FileWriter(
log_path + timestamp + "-training", graph=tf.get_default_graph()
)
test_writer = tf.summary.FileWriter(
os.path.join(log_path, timestamp + "-testing"), graph=tf.get_default_graph()
)
with tf.Session() as sess:
sess.run(init_op)
try:
for step in range(epochs * train_size // self.batch_size):
offset = (step * self.batch_size) % train_size
batch_train_data = train_data[0][
offset : (offset + self.batch_size)
]
batch_train_labels = train_data[1][
offset : (offset + self.batch_size)
]
feed_dict = {
self.x_input: batch_train_data,
self.y_input: batch_train_labels,
self.learning_rate: self.alpha,
}
summary, _, step_loss = sess.run(
[self.merged, self.optimizer, self.loss], feed_dict=feed_dict
)
if step % 100 == 0:
train_accuracy = sess.run(self.accuracy, feed_dict=feed_dict)
print(
"step[{}] train -- loss : {}, accuracy : {}".format(
step, step_loss, train_accuracy
)
)
train_writer.add_summary(summary=summary, global_step=step)
except KeyboardInterrupt:
print("Training interrupted at step {}".format(step))
os._exit(1)
finally:
print("EOF -- training done at step {}".format(step))
for step in range(epochs * validation_size // self.batch_size):
feed_dict = {
self.x_input: validation_data[0][: self.batch_size],
self.y_input: validation_data[1][: self.batch_size],
}
(
validation_summary,
predictions,
actual,
validation_loss,
validation_accuracy,
) = sess.run(
[
self.merged,
self.predicted_class,
self.y_onehot,
self.loss,
self.accuracy,
],
feed_dict=feed_dict,
)
if step % 100 == 0 and step > 0:
print(
"step [{}] validation -- loss : {}, accuracy : {}".format(
step, validation_loss, validation_accuracy
)
)
test_writer.add_summary(
summary=validation_summary, global_step=step
)
self.save_labels(
predictions=predictions,
actual=actual,
result_path=result_path,
step=step,
phase="testing",
)
print("EOF -- testing done at step {}".format(step))
@staticmethod
def variable_summaries(var):
with tf.name_scope("summaries"):
mean = tf.reduce_mean(var)
tf.summary.scalar("mean", mean)
with tf.name_scope("stddev"):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar("stddev", stddev)
tf.summary.scalar("max", tf.reduce_max(var))
tf.summary.scalar("min", tf.reduce_min(var))
tf.summary.histogram("histogram", var)
@staticmethod
def save_labels(predictions, actual, result_path, step, phase):
"""Saves the actual and predicted labels to a NPY file
Parameter
---------
predictions : numpy.ndarray
The NumPy array containing the predicted labels.
actual : numpy.ndarray
The NumPy array containing the actual labels.
result_path : str
The path where to save the concatenated actual and predicted labels.
step : int
The time step for the NumPy arrays.
phase : str
The phase for which the predictions is, i.e. training/validation/testing.
"""
if not os.path.exists(path=result_path):
os.mkdir(result_path)
# Concatenate the predicted and actual labels
labels = np.concatenate((predictions, actual), axis=1)
# save the labels array to NPY file
np.save(
file=os.path.join(result_path, "{}-svm-{}.npy".format(phase, step)),
arr=labels,
)
| [
"sys.stdout.write",
"os.mkdir",
"tensorflow.identity",
"tensorflow.local_variables_initializer",
"tensorflow.matmul",
"tensorflow.get_default_graph",
"tensorflow.reduce_max",
"os.path.join",
"time.asctime",
"tensorflow.one_hot",
"os.path.exists",
"tensorflow.sign",
"tensorflow.placeholder",
... | [((5001, 5049), 'sys.stdout.write', 'sys.stdout.write', (['"""\n<log> Building graph..."""'], {}), '("""\n<log> Building graph...""")\n', (5017, 5049), False, 'import sys\n'), ((5075, 5103), 'sys.stdout.write', 'sys.stdout.write', (['"""</log>\n"""'], {}), "('</log>\\n')\n", (5091, 5103), False, 'import sys\n'), ((11125, 11170), 'numpy.concatenate', 'np.concatenate', (['(predictions, actual)'], {'axis': '(1)'}), '((predictions, actual), axis=1)\n', (11139, 11170), True, 'import numpy as np\n'), ((2378, 2432), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'name': '"""learning_rate"""'}), "(dtype=tf.float32, name='learning_rate')\n", (2392, 2432), True, 'import tensorflow as tf\n'), ((3868, 3899), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (3885, 3899), True, 'import tensorflow as tf\n'), ((4530, 4569), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'accuracy'], {}), "('accuracy', accuracy)\n", (4547, 4569), True, 'import tensorflow as tf\n'), ((4592, 4614), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (4612, 4614), True, 'import tensorflow as tf\n'), ((6083, 6116), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6114, 6116), True, 'import tensorflow as tf\n'), ((6118, 6150), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (6148, 6150), True, 'import tensorflow as tf\n'), ((6226, 6240), 'time.asctime', 'time.asctime', ([], {}), '()\n', (6238, 6240), False, 'import time\n'), ((6486, 6532), 'os.path.join', 'os.path.join', (['log_path', "(timestamp + '-testing')"], {}), "(log_path, timestamp + '-testing')\n", (6498, 6532), False, 'import os\n'), ((6586, 6598), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (6596, 6598), True, 'import tensorflow as tf\n'), ((9876, 9902), 'tensorflow.name_scope', 'tf.name_scope', (['"""summaries"""'], {}), "('summaries')\n", (9889, 9902), True, 'import tensorflow as tf\n'), ((9923, 9942), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['var'], {}), '(var)\n', (9937, 9942), True, 'import tensorflow as tf\n'), ((9955, 9986), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mean"""', 'mean'], {}), "('mean', mean)\n", (9972, 9986), True, 'import tensorflow as tf\n'), ((10113, 10148), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""stddev"""', 'stddev'], {}), "('stddev', stddev)\n", (10130, 10148), True, 'import tensorflow as tf\n'), ((10275, 10313), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""histogram"""', 'var'], {}), "('histogram', var)\n", (10295, 10313), True, 'import tensorflow as tf\n'), ((10985, 11017), 'os.path.exists', 'os.path.exists', ([], {'path': 'result_path'}), '(path=result_path)\n', (10999, 11017), False, 'import os\n'), ((11031, 11052), 'os.mkdir', 'os.mkdir', (['result_path'], {}), '(result_path)\n', (11039, 11052), False, 'import os\n'), ((1729, 1751), 'tensorflow.name_scope', 'tf.name_scope', (['"""input"""'], {}), "('input')\n", (1742, 1751), True, 'import tensorflow as tf\n'), ((1824, 1910), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, self.num_features]', 'name': '"""x_input"""'}), "(dtype=tf.float32, shape=[None, self.num_features], name=\n 'x_input')\n", (1838, 1910), True, 'import tensorflow as tf\n'), ((2002, 2062), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.uint8', 'shape': '[None]', 'name': '"""y_input"""'}), "(dtype=tf.uint8, shape=[None], name='y_input')\n", (2016, 2062), True, 'import tensorflow as tf\n'), ((2135, 2234), 'tensorflow.one_hot', 'tf.one_hot', ([], {'indices': 'y_input', 'depth': 'self.num_classes', 'on_value': '(1)', 'off_value': '(-1)', 'name': '"""y_onehot"""'}), "(indices=y_input, depth=self.num_classes, on_value=1, off_value=-\n 1, name='y_onehot')\n", (2145, 2234), True, 'import tensorflow as tf\n'), ((2451, 2480), 'tensorflow.name_scope', 'tf.name_scope', (['"""training_ops"""'], {}), "('training_ops')\n", (2464, 2480), True, 'import tensorflow as tf\n'), ((3330, 3350), 'tensorflow.name_scope', 'tf.name_scope', (['"""svm"""'], {}), "('svm')\n", (3343, 3350), True, 'import tensorflow as tf\n'), ((4040, 4065), 'tensorflow.name_scope', 'tf.name_scope', (['"""accuracy"""'], {}), "('accuracy')\n", (4053, 4065), True, 'import tensorflow as tf\n'), ((4101, 4116), 'tensorflow.sign', 'tf.sign', (['output'], {}), '(output)\n', (4108, 4116), True, 'import tensorflow as tf\n'), ((4151, 4198), 'tensorflow.identity', 'tf.identity', (['predicted_class'], {'name': '"""prediction"""'}), "(predicted_class, name='prediction')\n", (4162, 4198), True, 'import tensorflow as tf\n'), ((6396, 6418), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (6416, 6418), True, 'import tensorflow as tf\n'), ((6540, 6562), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (6560, 6562), True, 'import tensorflow as tf\n'), ((10004, 10027), 'tensorflow.name_scope', 'tf.name_scope', (['"""stddev"""'], {}), "('stddev')\n", (10017, 10027), True, 'import tensorflow as tf\n'), ((10186, 10204), 'tensorflow.reduce_max', 'tf.reduce_max', (['var'], {}), '(var)\n', (10199, 10204), True, 'import tensorflow as tf\n'), ((10243, 10261), 'tensorflow.reduce_min', 'tf.reduce_min', (['var'], {}), '(var)\n', (10256, 10261), True, 'import tensorflow as tf\n'), ((2503, 2527), 'tensorflow.name_scope', 'tf.name_scope', (['"""weights"""'], {}), "('weights')\n", (2516, 2527), True, 'import tensorflow as tf\n'), ((2870, 2893), 'tensorflow.name_scope', 'tf.name_scope', (['"""biases"""'], {}), "('biases')\n", (2883, 2893), True, 'import tensorflow as tf\n'), ((3153, 3179), 'tensorflow.name_scope', 'tf.name_scope', (['"""Wx_plus_b"""'], {}), "('Wx_plus_b')\n", (3166, 3179), True, 'import tensorflow as tf\n'), ((3264, 3311), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""pre-activations"""', 'output'], {}), "('pre-activations', output)\n", (3284, 3311), True, 'import tensorflow as tf\n'), ((3400, 3417), 'tensorflow.square', 'tf.square', (['weight'], {}), '(weight)\n', (3409, 3417), True, 'import tensorflow as tf\n'), ((3765, 3786), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (3778, 3786), True, 'import tensorflow as tf\n'), ((3925, 3976), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (3947, 3976), True, 'import tensorflow as tf\n'), ((4220, 4255), 'tensorflow.name_scope', 'tf.name_scope', (['"""correct_prediction"""'], {}), "('correct_prediction')\n", (4233, 4255), True, 'import tensorflow as tf\n'), ((4418, 4443), 'tensorflow.name_scope', 'tf.name_scope', (['"""accuracy"""'], {}), "('accuracy')\n", (4431, 4443), True, 'import tensorflow as tf\n'), ((8029, 8040), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (8037, 8040), False, 'import os\n'), ((3210, 3236), 'tensorflow.matmul', 'tf.matmul', (['x_input', 'weight'], {}), '(x_input, weight)\n', (3219, 3236), True, 'import tensorflow as tf\n'), ((4321, 4350), 'tensorflow.argmax', 'tf.argmax', (['predicted_class', '(1)'], {}), '(predicted_class, 1)\n', (4330, 4350), True, 'import tensorflow as tf\n'), ((4352, 4374), 'tensorflow.argmax', 'tf.argmax', (['y_onehot', '(1)'], {}), '(y_onehot, 1)\n', (4361, 4374), True, 'import tensorflow as tf\n'), ((4491, 4516), 'tensorflow.cast', 'tf.cast', (['correct', '"""float"""'], {}), "(correct, 'float')\n", (4498, 4516), True, 'import tensorflow as tf\n'), ((10077, 10098), 'tensorflow.square', 'tf.square', (['(var - mean)'], {}), '(var - mean)\n', (10086, 10098), True, 'import tensorflow as tf\n'), ((2651, 2719), 'tensorflow.random_normal', 'tf.random_normal', (['[self.num_features, self.num_classes]'], {'stddev': '(0.01)'}), '([self.num_features, self.num_classes], stddev=0.01)\n', (2667, 2719), True, 'import tensorflow as tf\n'), ((3014, 3058), 'tensorflow.constant', 'tf.constant', (['[0.1]'], {'shape': '[self.num_classes]'}), '([0.1], shape=[self.num_classes])\n', (3025, 3058), True, 'import tensorflow as tf\n'), ((3559, 3604), 'tensorflow.zeros', 'tf.zeros', (['[self.batch_size, self.num_classes]'], {}), '([self.batch_size, self.num_classes])\n', (3567, 3604), True, 'import tensorflow as tf\n'), ((3638, 3667), 'tensorflow.cast', 'tf.cast', (['y_onehot', 'tf.float32'], {}), '(y_onehot, tf.float32)\n', (3645, 3667), True, 'import tensorflow as tf\n')] |
import torch
import pdb
from .gpu_memory_tools import *
import numpy as np
class Batch_Size_Estimator:
def __init__(self, net, opt, loss_func, dataset, gpu_id=0):
self.__gpu_info = GPU_MEM_INFO(gpu_id)
self.__device = torch.device("cuda:"+str(gpu_id))
self.__net = net
self.__loss_func = loss_func
self.__opt = opt
self.__dataset = dataset
def __loss_batch(self,batches):
xdata = self.__xdata_orig.repeat(batches,1,1,1).to(self.__device)
ydata = self.__ydata_orig.repeat(batches,1,1,1).to(self.__device)
mask = self.__mask.repeat(batches,1,1,1).to(self.__device)
hull = self.__hull.repeat(batches,1,1,1)
hull = hull[:,None].repeat(1,mask.shape[1],1,1).to(self.device)
out = self.__net(xdata)
loss = self.__loss_func(out, ydata, mask, hull)
loss.backward()
self.__opt.step()
self.__opt.zero_grad()
loss_v = float(loss.detach().cpu())
max_mem = round(torch.cuda.max_memory_allocated(self.__device)/1000/1000/1.049)
del xdata
del ydata
del out
del loss
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
return max_mem
def find_max_bs(self):
mems_used = []
name, self.__xdata_orig,self.__ydata_orig, self.__mask, self.__hull = self.__dataset[0]
for i in range(3):
mems_used.append(self.__loss_batch(i+1))
mems_used = np.array(mems_used[1:])
mebi_per_set = mems_used[1]-mems_used[0]
mebi_per_set += mebi_per_set*0.12
total = self.__gpu_info.total()
used = self.__gpu_info.used()
return int(round((total - used)/mebi_per_set)) | [
"torch.cuda.max_memory_allocated",
"numpy.array",
"torch.cuda.empty_cache",
"torch.cuda.reset_max_memory_allocated"
] | [((1149, 1173), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1171, 1173), False, 'import torch\n'), ((1182, 1221), 'torch.cuda.reset_max_memory_allocated', 'torch.cuda.reset_max_memory_allocated', ([], {}), '()\n', (1219, 1221), False, 'import torch\n'), ((1545, 1568), 'numpy.array', 'np.array', (['mems_used[1:]'], {}), '(mems_used[1:])\n', (1553, 1568), True, 'import numpy as np\n'), ((1008, 1054), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', (['self.__device'], {}), '(self.__device)\n', (1039, 1054), False, 'import torch\n')] |
import numpy as np
class EnvironmentModel:
def __init__(self, n_states, n_actions, seed=None):
"""
Constructor for the Environment Model of the Reinforcement learning framework
:param n_states: Number of states in the Environment
:param n_actions: Number of possible actions in the Environment
:param seed: A seed to control the random number generator (optional)
"""
self.n_states = n_states
self.n_actions = n_actions
self.random_state = np.random.RandomState(seed)
def p(self, next_state, state, action):
"""
Method to calculate probability of transitioning between state and next_state with action
To be implemented by subclasses
Raises NotImplementedError() if p() is not implemented
:param next_state: Index of next state
:param state: Index of current state
:param action: Action to be taken
:return: Probability of transitioning between state and next_state with action
"""
raise NotImplementedError()
def r(self, next_state, state, action):
"""
Method to calculate reward of transitioning between state and next_state with action
To be implemented by subclasses
Raises NotImplementedError() if r() is not implemented
:param next_state: Index of next state
:param state: Index of current state
:param action: Action to be taken
:return: Reward for transitioning between state and next_state with action
"""
raise NotImplementedError()
def draw(self, state, action):
"""
Method to draw a next_state randomly based on probability of transitioning from state when action is chosen
:param state: Index of current state
:param action: Action to be taken
:return: next_state, reward
"""
p = [self.p(ns, state, action) for ns in range(self.n_states)]
next_state = self.random_state.choice(self.n_states, p=p)
reward = self.r(next_state, state, action)
return next_state, reward
def get_prob_rewards(self):
"""
Method to get the probabilities and rewards for the env
raises NotImplementedError() if the method is not implemented by the super class
:return: probabilities, rewards as numpy arrays
"""
raise NotImplementedError()
| [
"numpy.random.RandomState"
] | [((525, 552), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (546, 552), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#
# Utilities for handling expression data.
#
#
import os, sys
gitpath = os.path.expanduser("~/git/cshlwork")
sys.path.append(gitpath)
import argparse
import datetime
import io
import logging
import traceback
from configparser import ConfigParser
import numpy as np
import pandas as pd
import h5py
# for clustering
from scipy.spatial.distance import pdist
from scipy.cluster.hierarchy import linkage
from dynamicTreeCut import cutreeHybrid
def get_configstr(cp):
with io.StringIO() as ss:
cp.write(ss)
ss.seek(0) # rewind
return ss.read()
def get_default_config():
cp = ConfigParser()
cp.read(os.path.expanduser("~/git/scqc/etc/scqc.conf"))
return cp
def load_coexp(hdf5file):
f = h5py.File(hdf5file, 'r')
dset = f['agg']
npdset = np.array(dset)
collist = list(np.array(f['col']).astype(str))
rowlist = list(np.array(f['row']).astype(str))
df = pd.DataFrame(npdset, columns = collist, index=rowlist )
return df
def load_df(filepath):
"""
Convenience method to load DF consistently accross modules.
"""
filepath = os.path.expanduser(filepath)
df = pd.read_csv(filepath, sep='\t',index_col=0, keep_default_na=False, dtype =str, comment="#")
df.fillna(value='', inplace=True)
df = df.astype('str', copy=False)
return df
def merge_write_df(newdf, filepath, mode=0o644):
"""
Reads existing, merges new, drops duplicates, writes to temp, renames temp.
"""
log = logging.getLogger('utils')
log.debug(f'inbound new df:\n{newdf}')
filepath = os.path.expanduser(filepath)
if os.path.isfile(filepath):
df = load_df(filepath)
log.debug(f'read df:\n{df}')
df = df.append(newdf, ignore_index=True)
df.fillna(value='', inplace=True)
df = df.astype('str', copy=False)
log.debug(f'appended df:\n{df}')
else:
df = newdf
df.fillna(value='', inplace=True)
df = df.astype('str', copy=False)
logging.debug(f"df length before dropping dupes is {len(df)}")
df.drop_duplicates(inplace=True, ignore_index=True, keep='first')
logging.debug(f"df length after dropping dupes is {len(df)}")
df = df.reset_index(drop=True)
rootpath = os.path.dirname(filepath)
basename = os.path.basename(filepath)
try:
(tfd, tfname) = tempfile.mkstemp(suffix=None,
prefix=f"{basename}.",
dir=f"{rootpath}/",
text=True)
logging.debug(f"made temp {tfname}")
df.to_csv(tfname, sep='\t')
os.rename(tfname, filepath)
os.chmod(filepath, mode)
logging.info(f"wrote df to {filepath}")
except Exception as ex:
logging.error(traceback.format_exc(None))
raise ex
def check_range(df, threshold):
tdf=threshold_boolean(df, threshold)
return sort_node_degree(tdf)
def threshold_boolean(df, thresh):
newdf = df.copy()
newdf[newdf > thresh] = 1
newdf[newdf <= thresh] = 0
return newdf
def threshold_value(df, thresh, sense='above'):
'''
Only keep values relative to threshold.
'above' means higher, with others set to 0
'below' means lower, with others set to 0
'''
thresh = float(thresh)
newdf = df.copy()
if sense == 'above':
newdf[newdf > thresh] = newdf
newdf[newdf <= thresh] = 0
if sense == 'below':
newdf[newdf < thresh] = newdf
newdf[newdf >= thresh] = 0
return newdf
def sort_node_degree(df):
ts = df.sum(axis=1)
ts.sort_values(inplace=True, ascending=False)
return ts
def cluster_coexp(exphd5='~/data/cococonet/yeast_AggNet.hdf5', threshold=0.95, test=False, sense='above'):
exphd5=os.path.expanduser(exphd5)
logging.debug(f"clustering {exphd5} threshold={threshold} test={test} sense='{sense}'")
edf = load_coexp(exphd5)
logging.debug(edf)
tdf = threshold_value(edf, threshold, sense=sense)
np.fill_diagonal(tdf.values, 0)
sdf = tdf.copy()
sdf.drop(sdf.loc[sdf.sum(axis=1)==0].index, inplace=True)
sdf.drop(columns=sdf.columns[sdf.sum()==0], inplace=True)
logging.debug(sdf)
# calc 1 - coexpression = distance.
dedf = 1 - sdf
# pull a subset of the distance matrix
if test:
logging.info('test is true. subsetting to 200x200')
dedf = dedf.iloc[0:200, 0:200]
distances = pdist(dedf, metric="euclidean")
logging.debug(distances)
link = linkage(distances, "average")
s = datetime.datetime.now()
clusters = cutreeHybrid(link, distances)
e = datetime.datetime.now()
elapsed = e - s
logging.info(f'Clustering took {elapsed.seconds} seconds.')
logging.debug(len(clusters['labels']))
logging.debug(clusters["labels"])
larray = clusters["labels"]
ldf = pd.DataFrame(larray, columns=['label'])
ldf.index = dedf.index
clusterdict = {}
# extract gene names using assigned labels:
outdf = pd.DataFrame(columns=['cluster','locus'])
for lidx in range(1, larray.max() + 1 ):
glist = list( dedf.loc[ldf['label'] == lidx].index )
logging.debug(f'label={lidx} len={len(glist)} -> {glist} \n')
tdf = pd.DataFrame(data=glist, columns=['locus'])
tdf['cluster'] = lidx
outdf = pd.concat([outdf, tdf], ignore_index=True )
return outdf
if __name__ == '__main__':
gitpath = os.path.expanduser("~/git/alphaexp")
sys.path.append(gitpath)
FORMAT = '%(asctime)s (UTC) [ %(levelname)s ] %(filename)s:%(lineno)d %(name)s.%(funcName)s(): %(message)s'
logging.basicConfig(format=FORMAT)
logging.getLogger().setLevel(logging.WARN)
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug',
action="store_true",
dest='debug',
help='debug logging')
parser.add_argument('-v', '--verbose',
action="store_true",
dest='verbose',
help='verbose logging')
parser.add_argument('-c', '--config',
action="store",
dest='conffile',
default='~/git/cshlwork/etc/exputils.conf',
help='Config file path [~/git/cshlwork/etc/exputils.conf]')
parser.add_argument('-i', '--infile',
metavar='infile',
type=str,
default=None,
help='infile. ')
args = parser.parse_args()
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
if args.conffile is not None:
cp = ConfigParser()
cp.read(os.path.expanduser(args.conffile))
else:
pass
cs = get_configstr(cp)
logging.debug(f"got config: {cs}")
logging.debug(f"args: {args}")
df = load_coexp(args.infile)
print(df)
| [
"argparse.ArgumentParser",
"pandas.read_csv",
"scipy.cluster.hierarchy.linkage",
"os.path.isfile",
"scipy.spatial.distance.pdist",
"sys.path.append",
"pandas.DataFrame",
"os.path.dirname",
"traceback.format_exc",
"configparser.ConfigParser",
"datetime.datetime.now",
"pandas.concat",
"numpy.f... | [((99, 135), 'os.path.expanduser', 'os.path.expanduser', (['"""~/git/cshlwork"""'], {}), "('~/git/cshlwork')\n", (117, 135), False, 'import os, sys\n'), ((136, 160), 'sys.path.append', 'sys.path.append', (['gitpath'], {}), '(gitpath)\n', (151, 160), False, 'import os, sys\n'), ((637, 651), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (649, 651), False, 'from configparser import ConfigParser\n'), ((761, 785), 'h5py.File', 'h5py.File', (['hdf5file', '"""r"""'], {}), "(hdf5file, 'r')\n", (770, 785), False, 'import h5py\n'), ((819, 833), 'numpy.array', 'np.array', (['dset'], {}), '(dset)\n', (827, 833), True, 'import numpy as np\n'), ((945, 997), 'pandas.DataFrame', 'pd.DataFrame', (['npdset'], {'columns': 'collist', 'index': 'rowlist'}), '(npdset, columns=collist, index=rowlist)\n', (957, 997), True, 'import pandas as pd\n'), ((1136, 1164), 'os.path.expanduser', 'os.path.expanduser', (['filepath'], {}), '(filepath)\n', (1154, 1164), False, 'import os, sys\n'), ((1174, 1270), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'sep': '"""\t"""', 'index_col': '(0)', 'keep_default_na': '(False)', 'dtype': 'str', 'comment': '"""#"""'}), "(filepath, sep='\\t', index_col=0, keep_default_na=False, dtype=\n str, comment='#')\n", (1185, 1270), True, 'import pandas as pd\n'), ((1515, 1541), 'logging.getLogger', 'logging.getLogger', (['"""utils"""'], {}), "('utils')\n", (1532, 1541), False, 'import logging\n'), ((1600, 1628), 'os.path.expanduser', 'os.path.expanduser', (['filepath'], {}), '(filepath)\n', (1618, 1628), False, 'import os, sys\n'), ((1636, 1660), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (1650, 1660), False, 'import os, sys\n'), ((2270, 2295), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (2285, 2295), False, 'import os, sys\n'), ((2311, 2337), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (2327, 2337), False, 'import os, sys\n'), ((3827, 3853), 'os.path.expanduser', 'os.path.expanduser', (['exphd5'], {}), '(exphd5)\n', (3845, 3853), False, 'import os, sys\n'), ((3858, 3950), 'logging.debug', 'logging.debug', (['f"""clustering {exphd5} threshold={threshold} test={test} sense=\'{sense}\'"""'], {}), '(\n f"clustering {exphd5} threshold={threshold} test={test} sense=\'{sense}\'")\n', (3871, 3950), False, 'import logging\n'), ((3979, 3997), 'logging.debug', 'logging.debug', (['edf'], {}), '(edf)\n', (3992, 3997), False, 'import logging\n'), ((4062, 4093), 'numpy.fill_diagonal', 'np.fill_diagonal', (['tdf.values', '(0)'], {}), '(tdf.values, 0)\n', (4078, 4093), True, 'import numpy as np\n'), ((4243, 4261), 'logging.debug', 'logging.debug', (['sdf'], {}), '(sdf)\n', (4256, 4261), False, 'import logging\n'), ((4499, 4530), 'scipy.spatial.distance.pdist', 'pdist', (['dedf'], {'metric': '"""euclidean"""'}), "(dedf, metric='euclidean')\n", (4504, 4530), False, 'from scipy.spatial.distance import pdist\n'), ((4535, 4559), 'logging.debug', 'logging.debug', (['distances'], {}), '(distances)\n', (4548, 4559), False, 'import logging\n'), ((4571, 4600), 'scipy.cluster.hierarchy.linkage', 'linkage', (['distances', '"""average"""'], {}), "(distances, 'average')\n", (4578, 4600), False, 'from scipy.cluster.hierarchy import linkage\n'), ((4609, 4632), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4630, 4632), False, 'import datetime\n'), ((4648, 4677), 'dynamicTreeCut.cutreeHybrid', 'cutreeHybrid', (['link', 'distances'], {}), '(link, distances)\n', (4660, 4677), False, 'from dynamicTreeCut import cutreeHybrid\n'), ((4686, 4709), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4707, 4709), False, 'import datetime\n'), ((4734, 4793), 'logging.info', 'logging.info', (['f"""Clustering took {elapsed.seconds} seconds."""'], {}), "(f'Clustering took {elapsed.seconds} seconds.')\n", (4746, 4793), False, 'import logging\n'), ((4841, 4874), 'logging.debug', 'logging.debug', (["clusters['labels']"], {}), "(clusters['labels'])\n", (4854, 4874), False, 'import logging\n'), ((4917, 4956), 'pandas.DataFrame', 'pd.DataFrame', (['larray'], {'columns': "['label']"}), "(larray, columns=['label'])\n", (4929, 4956), True, 'import pandas as pd\n'), ((5070, 5112), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['cluster', 'locus']"}), "(columns=['cluster', 'locus'])\n", (5082, 5112), True, 'import pandas as pd\n'), ((5502, 5538), 'os.path.expanduser', 'os.path.expanduser', (['"""~/git/alphaexp"""'], {}), "('~/git/alphaexp')\n", (5520, 5538), False, 'import os, sys\n'), ((5543, 5567), 'sys.path.append', 'sys.path.append', (['gitpath'], {}), '(gitpath)\n', (5558, 5567), False, 'import os, sys\n'), ((5685, 5719), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'FORMAT'}), '(format=FORMAT)\n', (5704, 5719), False, 'import logging\n'), ((5781, 5806), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5804, 5806), False, 'import argparse\n'), ((6999, 7033), 'logging.debug', 'logging.debug', (['f"""got config: {cs}"""'], {}), "(f'got config: {cs}')\n", (7012, 7033), False, 'import logging\n'), ((7038, 7068), 'logging.debug', 'logging.debug', (['f"""args: {args}"""'], {}), "(f'args: {args}')\n", (7051, 7068), False, 'import logging\n'), ((504, 517), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (515, 517), False, 'import io\n'), ((664, 710), 'os.path.expanduser', 'os.path.expanduser', (['"""~/git/scqc/etc/scqc.conf"""'], {}), "('~/git/scqc/etc/scqc.conf')\n", (682, 710), False, 'import os, sys\n'), ((2586, 2622), 'logging.debug', 'logging.debug', (['f"""made temp {tfname}"""'], {}), "(f'made temp {tfname}')\n", (2599, 2622), False, 'import logging\n'), ((2667, 2694), 'os.rename', 'os.rename', (['tfname', 'filepath'], {}), '(tfname, filepath)\n', (2676, 2694), False, 'import os, sys\n'), ((2703, 2727), 'os.chmod', 'os.chmod', (['filepath', 'mode'], {}), '(filepath, mode)\n', (2711, 2727), False, 'import os, sys\n'), ((2736, 2775), 'logging.info', 'logging.info', (['f"""wrote df to {filepath}"""'], {}), "(f'wrote df to {filepath}')\n", (2748, 2775), False, 'import logging\n'), ((4391, 4442), 'logging.info', 'logging.info', (['"""test is true. subsetting to 200x200"""'], {}), "('test is true. subsetting to 200x200')\n", (4403, 4442), False, 'import logging\n'), ((5302, 5345), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'glist', 'columns': "['locus']"}), "(data=glist, columns=['locus'])\n", (5314, 5345), True, 'import pandas as pd\n'), ((5392, 5434), 'pandas.concat', 'pd.concat', (['[outdf, tdf]'], {'ignore_index': '(True)'}), '([outdf, tdf], ignore_index=True)\n', (5401, 5434), True, 'import pandas as pd\n'), ((6869, 6883), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (6881, 6883), False, 'from configparser import ConfigParser\n'), ((5724, 5743), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (5741, 5743), False, 'import logging\n'), ((6900, 6933), 'os.path.expanduser', 'os.path.expanduser', (['args.conffile'], {}), '(args.conffile)\n', (6918, 6933), False, 'import os, sys\n'), ((853, 871), 'numpy.array', 'np.array', (["f['col']"], {}), "(f['col'])\n", (861, 871), True, 'import numpy as np\n'), ((904, 922), 'numpy.array', 'np.array', (["f['row']"], {}), "(f['row'])\n", (912, 922), True, 'import numpy as np\n'), ((2827, 2853), 'traceback.format_exc', 'traceback.format_exc', (['None'], {}), '(None)\n', (2847, 2853), False, 'import traceback\n'), ((6705, 6724), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (6722, 6724), False, 'import logging\n'), ((6778, 6797), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (6795, 6797), False, 'import logging\n')] |
import tensorflow as tf
import numpy as np
import random
import logging
from GamePlayer.Player import AverageRandomPlayer
from Environment.Wizard import Wizard
from Environment.Wizard import MAX_ROUNDS
from Environment.Card import Card
class TrickPrediction(object):
n_hidden_1 = 40
def __init__(self, session, path, input_shape=59, memory=10000, batch_size=1024, training_rounds=200,
learning_rate=0.005, save_update=500):
self.logger = logging.getLogger('wizard-rl.TrickPrediction')
self.path = path
self.input_shape = input_shape
self.output_shape = 1
self.learning_rate = learning_rate
self.memory = [([], 0, 0)] * memory
self.batch_size = batch_size
self.update_rate = max(1, batch_size // 8)
self.save_update = save_update
self.saver = None
self.t = 0
self.t_train = 0
self._prediction = None
self._optimizer = None
self._loss = None
self._x = None
self._y = None
self._var_init = None
self._session = session
self._trained = False
self._merged = None
self._histos = [None] * MAX_ROUNDS
self._sum_histograms = [None] * MAX_ROUNDS
self._train_writer = None
self.training_rounds = training_rounds
self._init_model()
def _init_model(self):
with tf.variable_scope("TP_Input_Data"):
self._x = tf.placeholder("float", [None, self.input_shape], name="handcards")
self._y = tf.placeholder("float", [None, self.output_shape], name="num_tricks")
with tf.variable_scope("TP_Network"):
hidden1 = tf.layers.dense(self._x, self.n_hidden_1, activation=tf.nn.relu, name="Hidden_1")
self._prediction = tf.layers.dense(hidden1, self.output_shape, use_bias=False)
with tf.variable_scope("TP_Learning"):
self._loss = tf.losses.mean_squared_error(self._y*10, self._prediction*10)
self._optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self._loss)
# tracking of trick prediction loss
summary = tf.summary.scalar('loss_tp', self._loss)
self._merged = tf.summary.merge([summary])
# histogramm of trick prediction results per game round
for i in range(MAX_ROUNDS):
self._histos[i] = tf.summary.histogram("histo_tp_{}_cards".format(i),
tf.math.round(tf.reduce_sum(self._prediction)))
self._sum_histograms[i] = tf.summary.merge([self._histos[i]])
self._train_writer = tf.summary.FileWriter(self.path, self._session.graph)
self.saver = tf.train.Saver()
def update(self, cards, num_forecast, num_tricks):
"""
Fills one entry in the memory and updates the estimator.
Args:
cards: np.array of handcards
num_forecast: number of tricks player declared
num_tricks: number of tricks player got
"""
# Circular buffer for memory.
self.memory[self.t % len(self.memory)] = (cards, num_forecast, num_tricks)
self.t += 1
if self.t == len(self.memory) * 2:
# Prevent overflow, this might cause skidding in the update rate
self.t = len(self.memory)
# If memory is full, we can start training
if self.t % len(self.memory) == 0:
self.update_network()
def create_minibatch(self):
# Randomly sample from experience
minibatch = random.sample(self.memory, self.batch_size)
# Initialize x and y for the neural network
x = np.zeros((self.batch_size, self.input_shape))
y = np.zeros((self.batch_size, self.output_shape))
# Iterate over the minibatch to fill x and y
i = 0
for card, num_forecast, num_tricks in minibatch:
# x are simply the hand cards of player
x[i] = card
y[i] = num_tricks
i += 1
return x, y
def update_network(self):
self._trained = True
for _ in range(50):
x, y = self.create_minibatch()
self.train_model(x, y)
def train_model(self, batch_x, batch_y):
self.t_train += 1
self.logger.info("TRAINING TRICK PREDICTION no. {}".format(self.t_train))
feed_dict = {
self._x: batch_x,
self._y: batch_y
}
# train network
summary, opt, loss = self._session.run([self._merged, self._optimizer, self._loss], feed_dict)
self.logger.info("Epoch {} - Loss: {}".format(self.t_train, loss))
self._train_writer.add_summary(summary, self.t_train)
if self.t_train % self.save_update == 0:
self.save()
def collect_training_data(self, players):
x = None
y = None
for i in range(self.training_rounds):
wizard = Wizard(players=players, track_tricks=True)
wizard.play()
temp_x, temp_y = wizard.get_history()
if x is None:
x = temp_x
y = temp_y
else:
x = np.concatenate((x, temp_x), axis=0)
y = np.concatenate((y, temp_y), axis=0)
# temporärer Tracker
if i % 100 == 0:
self.logger.info("Trick Prediction Initializer: Round {} finished".format(i))
return x, y
def init_training(self, num_players=4, epochs=100):
self.logger.info("Initial training for trick prediction")
players = [AverageRandomPlayer() for _ in range(num_players)]
x, y = self.collect_training_data(players)
batch_size = 1024
for e in range(epochs):
batch_idx = np.random.choice(np.arange(len(x)), batch_size, replace=False)
batch_x = x[batch_idx]
batch_y = y[batch_idx]
self.train_model(batch_x, batch_y[:, np.newaxis])
self.logger.info("Initial Training finished")
self._trained = True
def predict(self, s, average):
game = np.sum(s[:Card.DIFFERENT_CARDS])
if not self._trained:
# self.init_training()
return average
feed_dict = {self._x: np.array(s)[np.newaxis, :]}
summ, histo, prediction = self._session.run([self._sum_histograms[game], self._histos[game], self._prediction], feed_dict)
self._train_writer.add_summary(summ)
return prediction[0, 0]
def save(self):
save_path = self.saver.save(self._session, self.path + "/models/model_tp.ckpt")
self.logger.info("{}: Model saved in {}".format(self.name(), save_path))
| [
"tensorflow.reduce_sum",
"numpy.sum",
"numpy.concatenate",
"tensorflow.summary.scalar",
"tensorflow.train.Saver",
"random.sample",
"tensorflow.losses.mean_squared_error",
"GamePlayer.Player.AverageRandomPlayer",
"tensorflow.layers.dense",
"numpy.zeros",
"tensorflow.variable_scope",
"tensorflow... | [((475, 521), 'logging.getLogger', 'logging.getLogger', (['"""wizard-rl.TrickPrediction"""'], {}), "('wizard-rl.TrickPrediction')\n", (492, 521), False, 'import logging\n'), ((2164, 2204), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss_tp"""', 'self._loss'], {}), "('loss_tp', self._loss)\n", (2181, 2204), True, 'import tensorflow as tf\n'), ((2228, 2255), 'tensorflow.summary.merge', 'tf.summary.merge', (['[summary]'], {}), '([summary])\n', (2244, 2255), True, 'import tensorflow as tf\n'), ((2642, 2695), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self.path', 'self._session.graph'], {}), '(self.path, self._session.graph)\n', (2663, 2695), True, 'import tensorflow as tf\n'), ((2718, 2734), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2732, 2734), True, 'import tensorflow as tf\n'), ((3570, 3613), 'random.sample', 'random.sample', (['self.memory', 'self.batch_size'], {}), '(self.memory, self.batch_size)\n', (3583, 3613), False, 'import random\n'), ((3678, 3723), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.input_shape)'], {}), '((self.batch_size, self.input_shape))\n', (3686, 3723), True, 'import numpy as np\n'), ((3736, 3782), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.output_shape)'], {}), '((self.batch_size, self.output_shape))\n', (3744, 3782), True, 'import numpy as np\n'), ((6122, 6154), 'numpy.sum', 'np.sum', (['s[:Card.DIFFERENT_CARDS]'], {}), '(s[:Card.DIFFERENT_CARDS])\n', (6128, 6154), True, 'import numpy as np\n'), ((1398, 1432), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""TP_Input_Data"""'], {}), "('TP_Input_Data')\n", (1415, 1432), True, 'import tensorflow as tf\n'), ((1456, 1523), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, self.input_shape]'], {'name': '"""handcards"""'}), "('float', [None, self.input_shape], name='handcards')\n", (1470, 1523), True, 'import tensorflow as tf\n'), ((1546, 1615), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, self.output_shape]'], {'name': '"""num_tricks"""'}), "('float', [None, self.output_shape], name='num_tricks')\n", (1560, 1615), True, 'import tensorflow as tf\n'), ((1630, 1661), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""TP_Network"""'], {}), "('TP_Network')\n", (1647, 1661), True, 'import tensorflow as tf\n'), ((1685, 1771), 'tensorflow.layers.dense', 'tf.layers.dense', (['self._x', 'self.n_hidden_1'], {'activation': 'tf.nn.relu', 'name': '"""Hidden_1"""'}), "(self._x, self.n_hidden_1, activation=tf.nn.relu, name=\n 'Hidden_1')\n", (1700, 1771), True, 'import tensorflow as tf\n'), ((1798, 1857), 'tensorflow.layers.dense', 'tf.layers.dense', (['hidden1', 'self.output_shape'], {'use_bias': '(False)'}), '(hidden1, self.output_shape, use_bias=False)\n', (1813, 1857), True, 'import tensorflow as tf\n'), ((1872, 1904), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""TP_Learning"""'], {}), "('TP_Learning')\n", (1889, 1904), True, 'import tensorflow as tf\n'), ((1931, 1996), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', (['(self._y * 10)', '(self._prediction * 10)'], {}), '(self._y * 10, self._prediction * 10)\n', (1959, 1996), True, 'import tensorflow as tf\n'), ((2576, 2611), 'tensorflow.summary.merge', 'tf.summary.merge', (['[self._histos[i]]'], {}), '([self._histos[i]])\n', (2592, 2611), True, 'import tensorflow as tf\n'), ((4953, 4995), 'Environment.Wizard.Wizard', 'Wizard', ([], {'players': 'players', 'track_tricks': '(True)'}), '(players=players, track_tricks=True)\n', (4959, 4995), False, 'from Environment.Wizard import Wizard\n'), ((5604, 5625), 'GamePlayer.Player.AverageRandomPlayer', 'AverageRandomPlayer', ([], {}), '()\n', (5623, 5625), False, 'from GamePlayer.Player import AverageRandomPlayer\n'), ((5191, 5226), 'numpy.concatenate', 'np.concatenate', (['(x, temp_x)'], {'axis': '(0)'}), '((x, temp_x), axis=0)\n', (5205, 5226), True, 'import numpy as np\n'), ((5247, 5282), 'numpy.concatenate', 'np.concatenate', (['(y, temp_y)'], {'axis': '(0)'}), '((y, temp_y), axis=0)\n', (5261, 5282), True, 'import numpy as np\n'), ((6278, 6289), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (6286, 6289), True, 'import numpy as np\n'), ((2023, 2079), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (2045, 2079), True, 'import tensorflow as tf\n'), ((2504, 2535), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self._prediction'], {}), '(self._prediction)\n', (2517, 2535), True, 'import tensorflow as tf\n')] |
import io
from itertools import count
from collections import OrderedDict
import numpy as np
import matplotlib.patches as mpatches
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvas # noqa
from PIL import Image
from .datasets import Label
class AnomalyDetector:
def __init__(self, title):
"""
Base class of anomaly detectors for different forests.
Anomaly detectors are the wrappers of forests for interaction with
the AnomalyDetectionExperiment.
Parameters
----------
title
Title for different performance plots.
"""
self.title = title
self.known_data = None
self.known_labels = None
def train(self, data):
raise NotImplementedError('abstract method called')
def score(self, data):
raise NotImplementedError('abstract method called')
def observe(self, point, label):
"""
Record the new data point.
Parameters
----------
point
Data features.
label
Data label.
Returns
-------
bool, was the forest updated?
"""
if self.known_data is None:
self.known_data = np.reshape(point, (-1, len(point)))
self.known_labels = np.reshape(label, (-1,))
else:
self.known_data = np.vstack((self.known_data, point))
self.known_labels = np.hstack((self.known_labels, label))
return False
class AnomalyDetectionExperiment:
COLORS = {Label.ANOMALY: 'red', Label.UNKNOWN: 'grey', Label.REGULAR: 'blue'}
def __init__(self, regressor, data_features, data_labels, capacity=300):
"""
Perform an experiment of anomaly detection with the expert.
Parameters
----------
regressor
The forest to detect anomalies with.
data_features
Training data. Array of features.
data_labels
Labels for the data.
capacity
Maximum number of the iterations to perform. 300 by default.
"""
self.regressor = regressor
self.capacity = capacity
self.data_features = data_features
self.data_labels = data_labels
self.trajectory = None
self.trace = None
def run(self):
"""
Run the experiment.
Returns
-------
Trajectory. The ndarray with indices of the explored data.
"""
regressor = self.regressor
data_features = self.data_features
data_labels = self.data_labels
# Indices of all the anomalies we are going to detect
anomalies_list, = np.where(self.data_labels == -1)
anomalies = set(anomalies_list)
n_anomalies = len(anomalies)
# Known labels. Dict preserves order, so we have full history.
# The values are the currect outliers with correction for missed points.
knowns = OrderedDict()
n_misses = 0
# Train before doing anything
regressor.train(data_features)
# Should we recalculate scores now?
recalculate = True
ordering = None
outlier = None
while not anomalies.issubset(knowns) and len(knowns) < self.capacity:
# Calculate the scores
if recalculate:
scores = regressor.score(data_features)
ordering = np.argsort(scores)
# Find the most anomalous unknown object
for outlier in ordering:
if outlier not in knowns:
break
# Keep the anomaly predictions at each point
knowns[outlier] = ordering[:n_anomalies + n_misses]
if data_labels[outlier] == Label.REGULAR:
n_misses += 1
# ... and observe it
recalculate = regressor.observe(data_features[outlier], data_labels[outlier])
self.trajectory = np.fromiter(knowns, dtype=int)
self.trace = list(knowns.values())
return self.trajectory
def draw_cartoon(self):
"""
Draw a animation how regressor performs.
Returns
-------
List of PIL images.
"""
if self.trajectory is None:
self.run()
data_features = self.data_features
data_labels = self.data_labels
COLORS = self.COLORS
images = []
for i, trace in zip(count(), self.trace):
fig = Figure()
canvas = FigureCanvas(fig)
ax = fig.subplots()
ax.set(title=f'{self.regressor.title}, iteration {i}',
xlabel='x1', ylabel='x2')
ax.scatter(*data_features.T, color=COLORS[Label.REGULAR], s=10)
ax.scatter(*data_features[trace, :].T, color=COLORS[Label.ANOMALY], s=10)
prehistory = self.trajectory[:i]
index = data_labels[prehistory] == Label.ANOMALY
if np.any(index):
ax.scatter(*data_features[prehistory[index], :].T, marker='*', color=COLORS[Label.ANOMALY], s=80)
index = ~index
if np.any(index):
ax.scatter(*data_features[prehistory[index], :].T, marker='*', color=COLORS[Label.REGULAR], s=80)
ax.scatter(*data_features[self.trajectory[i], :].T, marker='*', color='k', s=80)
normal_patch = mpatches.Patch(color=COLORS[Label.REGULAR], label='Regular')
anomalous_patch = mpatches.Patch(color=COLORS[Label.ANOMALY], label='Anomalous')
ax.legend(handles=[normal_patch, anomalous_patch], loc='lower left')
canvas.draw()
size = (int(canvas.renderer.width), int(canvas.renderer.height))
s = canvas.tostring_rgb()
image = Image.frombytes('RGB', size, s)
images.append(image)
del canvas
del fig
return images
def save_cartoon(self, file):
"""
(Draw and) save a cartoon.
Parameters
----------
file
Filename or file object to write GIF file to.
Returns
-------
None
"""
images = self.draw_cartoon()
images[0].save(file, format='GIF',
save_all=True, append_images=images[1:],
optimize=False, duration=500, loop=0)
def display_cartoon(self):
"""
IPython display of the drawn GIF.
Returns
-------
None
"""
import IPython.display
with io.BytesIO() as buffer:
self.save_cartoon(buffer)
return IPython.display.Image(buffer.getvalue())
| [
"matplotlib.backends.backend_agg.FigureCanvas",
"io.BytesIO",
"itertools.count",
"numpy.hstack",
"numpy.any",
"numpy.argsort",
"matplotlib.figure.Figure",
"numpy.where",
"numpy.reshape",
"numpy.fromiter",
"collections.OrderedDict",
"matplotlib.patches.Patch",
"PIL.Image.frombytes",
"numpy.... | [((2727, 2759), 'numpy.where', 'np.where', (['(self.data_labels == -1)'], {}), '(self.data_labels == -1)\n', (2735, 2759), True, 'import numpy as np\n'), ((3007, 3020), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3018, 3020), False, 'from collections import OrderedDict\n'), ((3998, 4028), 'numpy.fromiter', 'np.fromiter', (['knowns'], {'dtype': 'int'}), '(knowns, dtype=int)\n', (4009, 4028), True, 'import numpy as np\n'), ((1337, 1361), 'numpy.reshape', 'np.reshape', (['label', '(-1,)'], {}), '(label, (-1,))\n', (1347, 1361), True, 'import numpy as np\n'), ((1406, 1441), 'numpy.vstack', 'np.vstack', (['(self.known_data, point)'], {}), '((self.known_data, point))\n', (1415, 1441), True, 'import numpy as np\n'), ((1474, 1511), 'numpy.hstack', 'np.hstack', (['(self.known_labels, label)'], {}), '((self.known_labels, label))\n', (1483, 1511), True, 'import numpy as np\n'), ((4486, 4493), 'itertools.count', 'count', ([], {}), '()\n', (4491, 4493), False, 'from itertools import count\n'), ((4526, 4534), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (4532, 4534), False, 'from matplotlib.figure import Figure\n'), ((4556, 4573), 'matplotlib.backends.backend_agg.FigureCanvas', 'FigureCanvas', (['fig'], {}), '(fig)\n', (4568, 4573), False, 'from matplotlib.backends.backend_agg import FigureCanvas\n'), ((5004, 5017), 'numpy.any', 'np.any', (['index'], {}), '(index)\n', (5010, 5017), True, 'import numpy as np\n'), ((5176, 5189), 'numpy.any', 'np.any', (['index'], {}), '(index)\n', (5182, 5189), True, 'import numpy as np\n'), ((5427, 5487), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'COLORS[Label.REGULAR]', 'label': '"""Regular"""'}), "(color=COLORS[Label.REGULAR], label='Regular')\n", (5441, 5487), True, 'import matplotlib.patches as mpatches\n'), ((5518, 5580), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'COLORS[Label.ANOMALY]', 'label': '"""Anomalous"""'}), "(color=COLORS[Label.ANOMALY], label='Anomalous')\n", (5532, 5580), True, 'import matplotlib.patches as mpatches\n'), ((5824, 5855), 'PIL.Image.frombytes', 'Image.frombytes', (['"""RGB"""', 'size', 's'], {}), "('RGB', size, s)\n", (5839, 5855), False, 'from PIL import Image\n'), ((6600, 6612), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (6610, 6612), False, 'import io\n'), ((3463, 3481), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (3473, 3481), True, 'import numpy as np\n')] |
import numpy as np
import os
import sys
import ntpath
import time
from . import util
from . import html
from scipy.misc import imresize
from config import *
import cv2
import imageio
import torch
import torchvision.transforms as transforms
import projections.operations as operations
if sys.version_info[0] == 2:
VisdomExceptionBase = Exception
else:
VisdomExceptionBase = ConnectionError
def get_depth_channel(img):
dep = np.ones((img.shape[0], img.shape[1], 3), dtype = 'uint8')
for i in range(img.shape[0]):
for j in range(img.shape[1]):
dep[i][j][0]=dep[i][j][1]= dep[i][j][2]= img[i][j][2]
return dep
def remove_model_id_name(fileName):
s = fileName.split('_')
#[2]='1'
if len(s)==5:
return 'none'
return '%s_%s_%s' % (s[0], s[1], s[3])
def save_reconstruction_coord(diff_web, visuals, image_path, opt, aspect_ratio=1.0, width=256):
diff_image_dir = diff_web.web_dir
image_dir = diff_image_dir
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
diff_web.add_header(name)
ims, txts, links = [], [], []
count=0
for label, im_data in visuals.items():
new_file_name = name
image_name = '%s_%s.png' % (new_file_name, label)
exr_name = '%s_%s.exr' % (new_file_name, label)
if label=='fake_B':
fake_B_exr = im_data[0].permute(1, 2, 0).cpu().detach().numpy()
count+=1
elif label=='real_A':
real_A_exr = im_data[0].permute(1, 2, 0).cpu().detach().numpy()
elif label=='real_B':
real_B_exr = im_data[0].permute(1, 2, 0).cpu().detach().numpy()
count+=1
save_path = os.path.join(diff_image_dir,'images')
png_save_path = os.path.join(save_path, image_name)
exr_save_path = os.path.join(save_path, exr_name)
os.makedirs(save_path, exist_ok=True)
if opt.output_nc==1:
if opt.save_results:
cv2.imwrite(exr_save_path, im_data.cpu().detach().numpy()[0][0])
else:
if opt.save_results:
exr_im = im_data[0].permute(1, 2, 0).cpu().detach().numpy()
cv2.imwrite(png_save_path, exr_im*255)
if label != 'real_B' and label != 'real_A':
cv2.imwrite(exr_save_path, exr_im)
ims.append(image_name)
if count==2:
exr_loss = np.sum(np.abs(fake_B_exr - real_B_exr))/(real_B_exr.size)
txts.append('%s %f' % (label, exr_loss))
else: txts.append(label)
links.append(image_name)
diff_web.add_images(ims, txts, links, width=width)
if opt.make_coord_depth:
if os.path.exists(opt.data_dir + '/%s/%s' % (opt.gt_depth_dir, name.split('_')[0])):
gt_depth_dir = os.path.join(opt.data_dir, '%s/%s' % (opt.gt_depth_dir, name.split('_')[0])) #+ '/../%s' % opt.gt_depth_dir
else:
gt_depth_dir = os.path.join(opt.data_dir, '%s' % (opt.gt_depth_dir))
out_dir = os.path.join(opt.dataroot) + '/../%s_%s/%s' % (opt.coord_data_prefix, opt.name, opt.flag)
s = name.split('_')
model_name = '%s_%s'%(s[0], s[1])
model_id = '%s'%(s[2])
if opt.texture:
operations.make_depth_texture_pairs_from_coord(opt, fake_B_exr, real_A_exr, gt_depth_dir, out_dir, model_name, model_id, use_rgb_mask=opt.rgb_mask)
else:
operations.make_depth_pairs_from_coord(opt, fake_B_exr, real_A_exr, gt_depth_dir, out_dir, model_name, model_id, use_rgb_mask=opt.rgb_mask)
return exr_loss
def save_reconstruction_depth_dm(diff_web, visuals, image_path, opt, aspect_ratio=1.0, width=256):
diff_image_dir = diff_web.web_dir
image_dir = diff_image_dir
util.mkdir(os.path.join(diff_image_dir,'images'))
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
diff_web.add_header(name)
count=0
for label, im_data_group in visuals.items():
if label=='fake_B':
fake_B_group = im_data_group
elif label=='real_A':
real_A_group = im_data_group
elif label=='real_B':
real_B_group = im_data_group
if not opt.save_results:
return 0
fake_B_group = fake_B_group.cpu().detach().numpy()
real_A_group = real_A_group.cpu().detach().numpy()
real_B_group = real_B_group.cpu().detach().numpy()
print('fake_B_group.shape', fake_B_group.shape)
for j in range(fake_B_group.shape[0]):
ims, txts, links = [], [], []
real_a = real_A_group[j]
fake_b = fake_B_group[j]
real_b = real_B_group[j]
new_file_name = '%s_%s' % (name.split('_')[0], name.split('_')[1])
if new_file_name.find('X') !=-1:
new_file_name = new_file_name[0:new_file_name.find('X')]
real_a_image_name = '%s_%d_real_A.png' % (new_file_name, j)
fake_b_image_name = '%s_%d_fake_B.png' % (new_file_name, j)
real_b_image_name = '%s_%d_real_B.png' % (new_file_name, j)
real_a_exr_name = '%s_%d_real_A.exr' % (new_file_name, j)
fake_b_exr_name = '%s_%d_fake_B.exr' % (new_file_name, j)
real_b_exr_name = '%s_%d_real_B.exr' % (new_file_name, j)
real_a_png_save_path = os.path.join(diff_image_dir,'images', real_a_image_name)
fake_b_png_save_path = os.path.join(diff_image_dir,'images', fake_b_image_name)
real_b_png_save_path = os.path.join(diff_image_dir,'images', real_b_image_name)
real_a_exr_save_path = os.path.join(diff_image_dir,'images', real_a_exr_name)
fake_b_exr_save_path = os.path.join(diff_image_dir,'images', fake_b_exr_name)
real_b_exr_save_path = os.path.join(diff_image_dir,'images', real_b_exr_name)
if opt.output_nc==1 and opt.save_results:
cv2.imwrite(real_a_png_save_path, real_a[0]*255)
cv2.imwrite(fake_b_png_save_path, fake_b[0]*255)
cv2.imwrite(real_b_png_save_path, real_b[0]*255)
cv2.imwrite(fake_b_exr_save_path, fake_b[0])
ims.append(real_a_image_name)
ims.append(fake_b_image_name)
ims.append(real_b_image_name)
links.append(real_a_image_name)
links.append(fake_b_image_name)
links.append(real_b_image_name)
txts.append('real_a')
txts.append('fake_b')
exr_loss = np.sum(np.abs(fake_B_group[j] - real_B_group[j]))/(fake_B_group[j].size)
txts.append('real_b %f' % (exr_loss))
diff_web.add_images(ims, txts, links, width=width)
print(fake_B_group.shape, fake_B_group.size)
exr_loss = np.sum(np.abs(fake_B_group - real_B_group))/(fake_B_group.size)
return exr_loss
def save_reconstruction_depth_pix(diff_web, visuals, image_path, opt, aspect_ratio=1.0, width=256):
diff_image_dir = diff_web.web_dir
image_dir = diff_image_dir
util.mkdir(os.path.join(diff_image_dir,'images'))
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
diff_web.add_header(name)
count=0
for label, im_data_group in visuals.items():
if label=='fake_B':
fake_B_group = im_data_group
elif label=='real_A':
real_A_group = im_data_group
elif label=='real_B':
real_B_group = im_data_group
if not opt.save_results:
return 0
fake_B_group = fake_B_group.cpu().detach().numpy()[0][0]
real_A_group = real_A_group.cpu().detach().numpy()[0][0]
real_B_group = real_B_group.cpu().detach().numpy()[0][0]
num = int(fake_B_group.shape[0]/256)
print(num)
for j in range(num):
ims, txts, links = [], [], []
real_a = real_A_group[j*256:(j+1)*256,:]
fake_b = fake_B_group[j*256:(j+1)*256,:]
real_b = real_B_group[j*256:(j+1)*256,:]
new_file_name = '%s_%s' % (name.split('_')[0], name.split('_')[1])
if new_file_name.find('X') !=-1:
new_file_name = new_file_name[0:new_file_name.find('X')]
real_a_image_name = '%s_%d_real_A.png' % (new_file_name, j)
fake_b_image_name = '%s_%d_fake_B.png' % (new_file_name, j)
real_b_image_name = '%s_%d_real_B.png' % (new_file_name, j)
real_a_exr_name = '%s_%d_real_A.exr' % (new_file_name, j)
fake_b_exr_name = '%s_%d_fake_B.exr' % (new_file_name, j)
real_b_exr_name = '%s_%d_real_B.exr' % (new_file_name, j)
real_a_png_save_path = os.path.join(diff_image_dir,'images', real_a_image_name)
fake_b_png_save_path = os.path.join(diff_image_dir,'images', fake_b_image_name)
real_b_png_save_path = os.path.join(diff_image_dir,'images', real_b_image_name)
real_a_exr_save_path = os.path.join(diff_image_dir,'images', real_a_exr_name)
fake_b_exr_save_path = os.path.join(diff_image_dir,'images', fake_b_exr_name)
real_b_exr_save_path = os.path.join(diff_image_dir,'images', real_b_exr_name)
if opt.output_nc==1 and opt.save_results:
cv2.imwrite(real_a_png_save_path, real_a*255)
cv2.imwrite(fake_b_png_save_path, fake_b*255)
cv2.imwrite(real_b_png_save_path, real_b*255)
cv2.imwrite(fake_b_exr_save_path, fake_b)
ims.append(real_a_image_name)
ims.append(fake_b_image_name)
ims.append(real_b_image_name)
links.append(real_a_image_name)
links.append(fake_b_image_name)
links.append(real_b_image_name)
txts.append('real_a')
txts.append('fake_b')
exr_loss = np.sum(np.abs(fake_B_group[j] - real_B_group[j]))/(fake_B_group[j].size)
txts.append('real_b %f' % (exr_loss))
diff_web.add_images(ims, txts, links, width=width)
print(fake_B_group.shape, fake_B_group.size)
exr_loss = np.sum(np.abs(fake_B_group - real_B_group))/(fake_B_group.size)
return exr_loss
def save_reconstruction_depth_texture(diff_web, visuals, image_path, opt, aspect_ratio=1.0, width=256):
diff_image_dir = diff_web.web_dir
image_dir = diff_image_dir
util.mkdir(os.path.join(diff_image_dir,'images'))
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
diff_web.add_header(name)
count=0
for label, im_data_group in visuals.items():
if label=='fake_B':
fake_B_group = im_data_group
elif label=='real_A':
real_A_group = im_data_group
elif label=='real_B':
real_B_group = im_data_group
fake_B_group = fake_B_group.permute(0, 2, 3, 1).cpu().detach().numpy()
real_A_group = real_A_group.permute(0, 2, 3, 1).cpu().detach().numpy()
real_B_group = real_B_group.permute(0, 2, 3, 1).cpu().detach().numpy()
for j in range(fake_B_group.shape[0]):
ims, txts, links = [], [], []
real_a = real_A_group[j]
fake_b = fake_B_group[j]
real_b = real_B_group[j]
new_file_name = '%s_%s' % (name.split('_')[0], name.split('_')[1])
if new_file_name.find('X') !=-1:
new_file_name = new_file_name[0:new_file_name.find('X')]
real_a_image_name = '%s_%d_real_A.png' % (new_file_name, j)
fake_b_image_name = '%s_%d_fake_B.png' % (new_file_name, j)
real_b_image_name = '%s_%d_real_B.png' % (new_file_name, j)
real_a_exr_name = '%s_%d_real_A.exr' % (new_file_name, j)
fake_b_exr_name = '%s_%d_fake_B.exr' % (new_file_name, j)
real_b_exr_name = '%s_%d_real_B.exr' % (new_file_name, j)
real_a_png_save_path = os.path.join(diff_image_dir,'images', real_a_image_name)
fake_b_png_save_path = os.path.join(diff_image_dir,'images', fake_b_image_name)
real_b_png_save_path = os.path.join(diff_image_dir,'images', real_b_image_name)
real_a_exr_save_path = os.path.join(diff_image_dir,'images', real_a_exr_name)
fake_b_exr_save_path = os.path.join(diff_image_dir,'images', fake_b_exr_name)
real_b_exr_save_path = os.path.join(diff_image_dir,'images', real_b_exr_name)
if opt.save_results: # 4 channel.
cv2.imwrite(real_a_png_save_path, real_a[:,:,0:3]*255)
cv2.imwrite(fake_b_png_save_path, fake_b[:,:,0:3]*255)
cv2.imwrite(real_b_png_save_path, real_b[:,:,0:3]*255)
if True:
fake_b = np.concatenate((fake_b[:,:,0], fake_b[:,:,1], fake_b[:,:,2], fake_b[:,:,3]), 1)
cv2.imwrite(fake_b_exr_save_path, fake_b)
ims.append(real_a_image_name)
ims.append(fake_b_image_name)
ims.append(real_b_image_name)
links.append(real_a_image_name)
links.append(fake_b_image_name)
links.append(real_b_image_name)
txts.append('real_a')
txts.append('fake_b')
#txts.append('real_b')
exr_loss = np.sum(np.abs(fake_B_group[j] - real_B_group[j]))/(fake_B_group[j].size)
txts.append('real_b %f' % (exr_loss))
diff_web.add_images(ims, txts, links, width=width)
print(fake_B_group.shape, fake_B_group.size)
exr_loss = np.sum(np.abs(fake_B_group - real_B_group))/(fake_B_group.size)
return exr_loss
# save image to the disk
def save_images(diff_web, visuals, image_path, opt, aspect_ratio=1.0, width=256):
if opt.model =='Coord':
return save_reconstruction_coord(diff_web, visuals, image_path, opt, aspect_ratio=1.0, width=256)
elif opt.model =='RecShapeMemory': #dynamic memory
return save_reconstruction_depth_dm(diff_web, visuals, image_path, opt, aspect_ratio=1.0, width=256)
elif opt.model =='Depth': #pix2pix
return save_reconstruction_depth_pix( diff_web, visuals, image_path, opt, aspect_ratio=1.0, width=256)
elif opt.model == 'RecTextureDepth' or opt.model == 'PixDepthTexture':
return save_reconstruction_depth_texture(diff_web, visuals, image_path, opt, aspect_ratio=1.0, width=256)
class Visualizer():
def __init__(self, opt):
self.display_id = opt.display_id
self.use_html = opt.isTrain and not opt.no_html
self.win_size = opt.display_winsize
self.name = opt.name
self.opt = opt
self.saved = False
if self.display_id > 0:
import visdom
self.ncols = opt.display_ncols
self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env, raise_exceptions=True)
if self.use_html:
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web_pix')
self.img_dir = os.path.join(self.web_dir, 'images')
print('create web directory %s...' % self.web_dir)
util.mkdirs([self.web_dir, self.img_dir])
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
def reset(self):
self.saved = False
def throw_visdom_connection_error(self):
print('\n\nCould not connect to Visdom server (https://github.com/facebookresearch/visdom) for displaying training progress.\nYou can suppress connection to Visdom using the option --display_id -1. To install visdom, run \n$ pip install visdom\n, and start the server by \n$ python -m visdom.server.\n\n')
exit(1)
# |visuals|: dictionary of images to display or save
def display_current_results(self, visuals, epoch, save_result):
if self.display_id > 0: # show images in the browser
ncols = self.ncols
if ncols > 0:
ncols = min(ncols, len(visuals))
h, w = next(iter(visuals.values())).shape[:2]
table_css = """<style>
table {border-collapse: separate; border-spacing:4px; white-space:nowrap; text-align:center}
table td {width: %dpx; height: %dpx; padding: 4px; outline: 4px solid black}
</style>""" % (w, h)
title = self.name
label_html = ''
label_html_row = ''
images = []
idx = 0
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
label_html_row += '<td>%s</td>' % label
images.append(image_numpy.transpose([2, 0, 1]))
idx += 1
if idx % ncols == 0:
label_html += '<tr>%s</tr>' % label_html_row
label_html_row = ''
white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255
while idx % ncols != 0:
images.append(white_image)
label_html_row += '<td></td>'
idx += 1
if label_html_row != '':
label_html += '<tr>%s</tr>' % label_html_row
# pane col = image row
try:
self.vis.images(images, nrow=ncols, win=self.display_id + 1,
padding=2, opts=dict(title=title + ' images'))
label_html = '<table>%s</table>' % label_html
self.vis.text(table_css + label_html, win=self.display_id + 2,
opts=dict(title=title + ' labels'))
except VisdomExceptionBase:
self.throw_visdom_connection_error()
else:
idx = 1
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label),
win=self.display_id + idx)
idx += 1
if self.use_html and (save_result or not self.saved): # save images to a html file
self.saved = True
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))
util.save_image(image_numpy, img_path)
# update website
webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, reflesh=1)
for n in range(epoch, 0, -1):
webpage.add_header('epoch [%d]' % n)
ims, txts, links = [], [], []
for label, image_numpy in visuals.items():
image_numpy = util.tensor2im(image)
img_path = 'epoch%.3d_%s.png' % (n, label)
ims.append(img_path)
txts.append(label)
links.append(img_path)
webpage.add_images(ims, txts, links, width=self.win_size)
webpage.save()
# losses: dictionary of error labels and values
def plot_current_losses(self, epoch, counter_ratio, opt, losses):
if not hasattr(self, 'plot_data'):
self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}
self.plot_data['X'].append(epoch + counter_ratio)
self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])
try:
self.vis.line(
X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),
Y=np.array(self.plot_data['Y']),
opts={
'title': self.name + ' loss over time',
'legend': self.plot_data['legend'],
'xlabel': 'epoch',
'ylabel': 'loss'},
win=self.display_id)
except VisdomExceptionBase:
self.throw_visdom_connection_error()
# losses: same format as |losses| of plot_current_losses
def print_current_losses(self, epoch, i, losses, t, t_data):
message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, i, t, t_data)
for k, v in losses.items():
message += '%s: %.3f ' % (k, v)
print(message)
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message)
# losses: same format as |losses| of plot_current_losses
def print_avg_losses(self, msg):
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % msg)
| [
"projections.operations.make_depth_pairs_from_coord",
"ntpath.basename",
"os.makedirs",
"numpy.abs",
"cv2.imwrite",
"visdom.Visdom",
"numpy.ones",
"projections.operations.make_depth_texture_pairs_from_coord",
"time.strftime",
"numpy.array",
"os.path.splitext",
"os.path.join",
"numpy.concaten... | [((438, 493), 'numpy.ones', 'np.ones', (['(img.shape[0], img.shape[1], 3)'], {'dtype': '"""uint8"""'}), "((img.shape[0], img.shape[1], 3), dtype='uint8')\n", (445, 493), True, 'import numpy as np\n'), ((1009, 1039), 'ntpath.basename', 'ntpath.basename', (['image_path[0]'], {}), '(image_path[0])\n', (1024, 1039), False, 'import ntpath\n'), ((3940, 3970), 'ntpath.basename', 'ntpath.basename', (['image_path[0]'], {}), '(image_path[0])\n', (3955, 3970), False, 'import ntpath\n'), ((7226, 7256), 'ntpath.basename', 'ntpath.basename', (['image_path[0]'], {}), '(image_path[0])\n', (7241, 7256), False, 'import ntpath\n'), ((10566, 10596), 'ntpath.basename', 'ntpath.basename', (['image_path[0]'], {}), '(image_path[0])\n', (10581, 10596), False, 'import ntpath\n'), ((1051, 1079), 'os.path.splitext', 'os.path.splitext', (['short_path'], {}), '(short_path)\n', (1067, 1079), False, 'import os\n'), ((1738, 1776), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""'], {}), "(diff_image_dir, 'images')\n", (1750, 1776), False, 'import os\n'), ((1800, 1835), 'os.path.join', 'os.path.join', (['save_path', 'image_name'], {}), '(save_path, image_name)\n', (1812, 1835), False, 'import os\n'), ((1860, 1893), 'os.path.join', 'os.path.join', (['save_path', 'exr_name'], {}), '(save_path, exr_name)\n', (1872, 1893), False, 'import os\n'), ((1911, 1948), 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), '(save_path, exist_ok=True)\n', (1922, 1948), False, 'import os\n'), ((3883, 3921), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""'], {}), "(diff_image_dir, 'images')\n", (3895, 3921), False, 'import os\n'), ((3982, 4010), 'os.path.splitext', 'os.path.splitext', (['short_path'], {}), '(short_path)\n', (3998, 4010), False, 'import os\n'), ((5474, 5531), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""', 'real_a_image_name'], {}), "(diff_image_dir, 'images', real_a_image_name)\n", (5486, 5531), False, 'import os\n'), ((5562, 5619), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""', 'fake_b_image_name'], {}), "(diff_image_dir, 'images', fake_b_image_name)\n", (5574, 5619), False, 'import os\n'), ((5650, 5707), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""', 'real_b_image_name'], {}), "(diff_image_dir, 'images', real_b_image_name)\n", (5662, 5707), False, 'import os\n'), ((5739, 5794), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""', 'real_a_exr_name'], {}), "(diff_image_dir, 'images', real_a_exr_name)\n", (5751, 5794), False, 'import os\n'), ((5825, 5880), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""', 'fake_b_exr_name'], {}), "(diff_image_dir, 'images', fake_b_exr_name)\n", (5837, 5880), False, 'import os\n'), ((5911, 5966), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""', 'real_b_exr_name'], {}), "(diff_image_dir, 'images', real_b_exr_name)\n", (5923, 5966), False, 'import os\n'), ((7169, 7207), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""'], {}), "(diff_image_dir, 'images')\n", (7181, 7207), False, 'import os\n'), ((7268, 7296), 'os.path.splitext', 'os.path.splitext', (['short_path'], {}), '(short_path)\n', (7284, 7296), False, 'import os\n'), ((8805, 8862), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""', 'real_a_image_name'], {}), "(diff_image_dir, 'images', real_a_image_name)\n", (8817, 8862), False, 'import os\n'), ((8893, 8950), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""', 'fake_b_image_name'], {}), "(diff_image_dir, 'images', fake_b_image_name)\n", (8905, 8950), False, 'import os\n'), ((8981, 9038), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""', 'real_b_image_name'], {}), "(diff_image_dir, 'images', real_b_image_name)\n", (8993, 9038), False, 'import os\n'), ((9070, 9125), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""', 'real_a_exr_name'], {}), "(diff_image_dir, 'images', real_a_exr_name)\n", (9082, 9125), False, 'import os\n'), ((9156, 9211), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""', 'fake_b_exr_name'], {}), "(diff_image_dir, 'images', fake_b_exr_name)\n", (9168, 9211), False, 'import os\n'), ((9242, 9297), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""', 'real_b_exr_name'], {}), "(diff_image_dir, 'images', real_b_exr_name)\n", (9254, 9297), False, 'import os\n'), ((10509, 10547), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""'], {}), "(diff_image_dir, 'images')\n", (10521, 10547), False, 'import os\n'), ((10608, 10636), 'os.path.splitext', 'os.path.splitext', (['short_path'], {}), '(short_path)\n', (10624, 10636), False, 'import os\n'), ((12055, 12112), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""', 'real_a_image_name'], {}), "(diff_image_dir, 'images', real_a_image_name)\n", (12067, 12112), False, 'import os\n'), ((12143, 12200), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""', 'fake_b_image_name'], {}), "(diff_image_dir, 'images', fake_b_image_name)\n", (12155, 12200), False, 'import os\n'), ((12231, 12288), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""', 'real_b_image_name'], {}), "(diff_image_dir, 'images', real_b_image_name)\n", (12243, 12288), False, 'import os\n'), ((12320, 12375), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""', 'real_a_exr_name'], {}), "(diff_image_dir, 'images', real_a_exr_name)\n", (12332, 12375), False, 'import os\n'), ((12406, 12461), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""', 'fake_b_exr_name'], {}), "(diff_image_dir, 'images', fake_b_exr_name)\n", (12418, 12461), False, 'import os\n'), ((12492, 12547), 'os.path.join', 'os.path.join', (['diff_image_dir', '"""images"""', 'real_b_exr_name'], {}), "(diff_image_dir, 'images', real_b_exr_name)\n", (12504, 12547), False, 'import os\n'), ((15316, 15375), 'os.path.join', 'os.path.join', (['opt.checkpoints_dir', 'opt.name', '"""loss_log.txt"""'], {}), "(opt.checkpoints_dir, opt.name, 'loss_log.txt')\n", (15328, 15375), False, 'import os\n'), ((3040, 3091), 'os.path.join', 'os.path.join', (['opt.data_dir', "('%s' % opt.gt_depth_dir)"], {}), "(opt.data_dir, '%s' % opt.gt_depth_dir)\n", (3052, 3091), False, 'import os\n'), ((3120, 3146), 'os.path.join', 'os.path.join', (['opt.dataroot'], {}), '(opt.dataroot)\n', (3132, 3146), False, 'import os\n'), ((3357, 3508), 'projections.operations.make_depth_texture_pairs_from_coord', 'operations.make_depth_texture_pairs_from_coord', (['opt', 'fake_B_exr', 'real_A_exr', 'gt_depth_dir', 'out_dir', 'model_name', 'model_id'], {'use_rgb_mask': 'opt.rgb_mask'}), '(opt, fake_B_exr, real_A_exr,\n gt_depth_dir, out_dir, model_name, model_id, use_rgb_mask=opt.rgb_mask)\n', (3403, 3508), True, 'import projections.operations as operations\n'), ((3531, 3674), 'projections.operations.make_depth_pairs_from_coord', 'operations.make_depth_pairs_from_coord', (['opt', 'fake_B_exr', 'real_A_exr', 'gt_depth_dir', 'out_dir', 'model_name', 'model_id'], {'use_rgb_mask': 'opt.rgb_mask'}), '(opt, fake_B_exr, real_A_exr,\n gt_depth_dir, out_dir, model_name, model_id, use_rgb_mask=opt.rgb_mask)\n', (3569, 3674), True, 'import projections.operations as operations\n'), ((6058, 6108), 'cv2.imwrite', 'cv2.imwrite', (['real_a_png_save_path', '(real_a[0] * 255)'], {}), '(real_a_png_save_path, real_a[0] * 255)\n', (6069, 6108), False, 'import cv2\n'), ((6119, 6169), 'cv2.imwrite', 'cv2.imwrite', (['fake_b_png_save_path', '(fake_b[0] * 255)'], {}), '(fake_b_png_save_path, fake_b[0] * 255)\n', (6130, 6169), False, 'import cv2\n'), ((6180, 6230), 'cv2.imwrite', 'cv2.imwrite', (['real_b_png_save_path', '(real_b[0] * 255)'], {}), '(real_b_png_save_path, real_b[0] * 255)\n', (6191, 6230), False, 'import cv2\n'), ((6266, 6310), 'cv2.imwrite', 'cv2.imwrite', (['fake_b_exr_save_path', 'fake_b[0]'], {}), '(fake_b_exr_save_path, fake_b[0])\n', (6277, 6310), False, 'import cv2\n'), ((6900, 6935), 'numpy.abs', 'np.abs', (['(fake_B_group - real_B_group)'], {}), '(fake_B_group - real_B_group)\n', (6906, 6935), True, 'import numpy as np\n'), ((9405, 9452), 'cv2.imwrite', 'cv2.imwrite', (['real_a_png_save_path', '(real_a * 255)'], {}), '(real_a_png_save_path, real_a * 255)\n', (9416, 9452), False, 'import cv2\n'), ((9463, 9510), 'cv2.imwrite', 'cv2.imwrite', (['fake_b_png_save_path', '(fake_b * 255)'], {}), '(fake_b_png_save_path, fake_b * 255)\n', (9474, 9510), False, 'import cv2\n'), ((9521, 9568), 'cv2.imwrite', 'cv2.imwrite', (['real_b_png_save_path', '(real_b * 255)'], {}), '(real_b_png_save_path, real_b * 255)\n', (9532, 9568), False, 'import cv2\n'), ((9604, 9645), 'cv2.imwrite', 'cv2.imwrite', (['fake_b_exr_save_path', 'fake_b'], {}), '(fake_b_exr_save_path, fake_b)\n', (9615, 9645), False, 'import cv2\n'), ((10235, 10270), 'numpy.abs', 'np.abs', (['(fake_B_group - real_B_group)'], {}), '(fake_B_group - real_B_group)\n', (10241, 10270), True, 'import numpy as np\n'), ((12636, 12694), 'cv2.imwrite', 'cv2.imwrite', (['real_a_png_save_path', '(real_a[:, :, 0:3] * 255)'], {}), '(real_a_png_save_path, real_a[:, :, 0:3] * 255)\n', (12647, 12694), False, 'import cv2\n'), ((12703, 12761), 'cv2.imwrite', 'cv2.imwrite', (['fake_b_png_save_path', '(fake_b[:, :, 0:3] * 255)'], {}), '(fake_b_png_save_path, fake_b[:, :, 0:3] * 255)\n', (12714, 12761), False, 'import cv2\n'), ((12770, 12828), 'cv2.imwrite', 'cv2.imwrite', (['real_b_png_save_path', '(real_b[:, :, 0:3] * 255)'], {}), '(real_b_png_save_path, real_b[:, :, 0:3] * 255)\n', (12781, 12828), False, 'import cv2\n'), ((13641, 13676), 'numpy.abs', 'np.abs', (['(fake_B_group - real_B_group)'], {}), '(fake_B_group - real_B_group)\n', (13647, 13676), True, 'import numpy as np\n'), ((14882, 14994), 'visdom.Visdom', 'visdom.Visdom', ([], {'server': 'opt.display_server', 'port': 'opt.display_port', 'env': 'opt.display_env', 'raise_exceptions': '(True)'}), '(server=opt.display_server, port=opt.display_port, env=opt.\n display_env, raise_exceptions=True)\n', (14895, 14994), False, 'import visdom\n'), ((15056, 15110), 'os.path.join', 'os.path.join', (['opt.checkpoints_dir', 'opt.name', '"""web_pix"""'], {}), "(opt.checkpoints_dir, opt.name, 'web_pix')\n", (15068, 15110), False, 'import os\n'), ((15138, 15174), 'os.path.join', 'os.path.join', (['self.web_dir', '"""images"""'], {}), "(self.web_dir, 'images')\n", (15150, 15174), False, 'import os\n'), ((15445, 15464), 'time.strftime', 'time.strftime', (['"""%c"""'], {}), "('%c')\n", (15458, 15464), False, 'import time\n'), ((2245, 2285), 'cv2.imwrite', 'cv2.imwrite', (['png_save_path', '(exr_im * 255)'], {}), '(png_save_path, exr_im * 255)\n', (2256, 2285), False, 'import cv2\n'), ((6651, 6692), 'numpy.abs', 'np.abs', (['(fake_B_group[j] - real_B_group[j])'], {}), '(fake_B_group[j] - real_B_group[j])\n', (6657, 6692), True, 'import numpy as np\n'), ((9986, 10027), 'numpy.abs', 'np.abs', (['(fake_B_group[j] - real_B_group[j])'], {}), '(fake_B_group[j] - real_B_group[j])\n', (9992, 10027), True, 'import numpy as np\n'), ((12872, 12963), 'numpy.concatenate', 'np.concatenate', (['(fake_b[:, :, 0], fake_b[:, :, 1], fake_b[:, :, 2], fake_b[:, :, 3])', '(1)'], {}), '((fake_b[:, :, 0], fake_b[:, :, 1], fake_b[:, :, 2], fake_b[:,\n :, 3]), 1)\n', (12886, 12963), True, 'import numpy as np\n'), ((12968, 13009), 'cv2.imwrite', 'cv2.imwrite', (['fake_b_exr_save_path', 'fake_b'], {}), '(fake_b_exr_save_path, fake_b)\n', (12979, 13009), False, 'import cv2\n'), ((13393, 13434), 'numpy.abs', 'np.abs', (['(fake_B_group[j] - real_B_group[j])'], {}), '(fake_B_group[j] - real_B_group[j])\n', (13399, 13434), True, 'import numpy as np\n'), ((18699, 18762), 'os.path.join', 'os.path.join', (['self.img_dir', "('epoch%.3d_%s.png' % (epoch, label))"], {}), "(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))\n", (18711, 18762), False, 'import os\n'), ((2393, 2427), 'cv2.imwrite', 'cv2.imwrite', (['exr_save_path', 'exr_im'], {}), '(exr_save_path, exr_im)\n', (2404, 2427), False, 'import cv2\n'), ((2515, 2546), 'numpy.abs', 'np.abs', (['(fake_B_exr - real_B_exr)'], {}), '(fake_B_exr - real_B_exr)\n', (2521, 2546), True, 'import numpy as np\n'), ((20023, 20052), 'numpy.array', 'np.array', (["self.plot_data['Y']"], {}), "(self.plot_data['Y'])\n", (20031, 20052), True, 'import numpy as np\n'), ((19937, 19966), 'numpy.array', 'np.array', (["self.plot_data['X']"], {}), "(self.plot_data['X'])\n", (19945, 19966), True, 'import numpy as np\n')] |
from __future__ import print_function
import numpy as np
class PoseHelper(object): # dummy class to comply to original interface
def __init__(self, body_part, pbc):
self.pbc = pbc
self.body_part = body_part
def xyz(self):
return self.body_part.current_position()
def rpy(self):
return self.pbc.getEulerFromQuaternion(self.body_part.current_orientation())
def orientation(self):
return self.body_part.current_orientation()
class Link(object):
def __init__(self, body_name, body_uid, bodyPartIndex, pbc):
"""
:param body_name:
:param bodies:
:param bodyIndex:
:param bodyPartIndex:
"""
self.pbc = pbc
self.multibody_uid = body_uid
self.body_name = body_name
self.bodyPartIndex = bodyPartIndex
self.initialPosition = self.current_position()
self.initialOrientation = self.current_orientation()
self.bp_pose = PoseHelper(self, self.pbc)
@property
def id(self):
return self.bodyPartIndex
@id.setter
def id(self, bullet_body_id):
self.bodyPartIndex = bullet_body_id
@property
def multibody_uid(self):
return self._multibody_uid
@multibody_uid.setter
def multibody_uid(self, pb_uid):
self._multibody_uid = pb_uid
@property
def name(self):
return self.body_name
# A method you will most probably need a lot to get pose and orientation
def state_fields_of_pose_of(self, body_id, link_id=-1):
if link_id == -1:
(x, y, z), (a, b, c, d) = \
self.pbc.getBasePositionAndOrientation(body_id)
else:
(x, y, z), (a, b, c, d), _, _, _, _ = \
self.pbc.getLinkState(body_id, link_id)
return np.array([x, y, z, a, b, c, d])
def get_pose(self):
return self.state_fields_of_pose_of(self.multibody_uid,
self.bodyPartIndex)
def get_position(self):
return self.get_pose()[:3]
def current_position(self):
return self.get_position()
def get_orientation(self):
return self.get_pose()[3:]
def current_orientation(self):
return self.get_orientation()
def pose(self):
return self.bp_pose
def speed(self):
if self.bodyPartIndex == -1:
(vx, vy, vz), (wx, wy, wz) = self.pbc.getBaseVelocity(
self.multibody_uid
)
else:
(x,y,z), (a,b,c,d), _,_,_,_, (vx, vy, vz), (wx, wy, wz) = \
self.pbc.getLinkState(self.multibody_uid, self.bodyPartIndex,
computeLinkVelocity=1)
return np.array([vx, vy, vz, wx, wy, wz])
def get_velocity(self):
return self.speed()
def reset_position(self, position):
self.pbc.resetBasePositionAndOrientation(self.multibody_uid,
position,
self.get_orientation())
def reset_orientation(self, orientation):
self.pbc.resetBasePositionAndOrientation(self.multibody_uid,
self.get_position(),
orientation)
def reset_pose(self, position, orientation):
self.pbc.resetBasePositionAndOrientation(self.multibody_uid,
position,
orientation)
def contact_list(self):
return self.pbc.getContactPoints(self.multibody_uid, -1,
self.bodyPartIndex, -1)
class Joint(object):
def __init__(self, joint_name, body_uid, joint_index, state_idx, pbc):
"""
:param joint_name:
:param body_uid:
:param joint_index:
"""
self.pbc = pbc
self.multibody_uid = body_uid
self.joint_index = joint_index
self.joint_name = joint_name
self.dof_idx = state_idx
# Get additional info
joint_info = self.pbc.getJointInfo(self.multibody_uid, self.joint_index)
self.joint_type = joint_info[2]
self.q_idx = joint_info[3]
self.u_idx = joint_info[4]
self.lower_limit = joint_info[8]
self.upper_limit = joint_info[9]
self.joint_max_force = joint_info[10]
self.joint_max_velocity = joint_info[11]
@property
def multibody_uid(self):
return self._multibody_uid
@multibody_uid.setter
def multibody_uid(self, pb_uid):
self._multibody_uid = pb_uid
def get_state(self):
x, vx, _, _ = self.pbc.getJointState(self.multibody_uid,
self.joint_index)
return x, vx
def get_position(self):
return self.pbc.getJointState(self.multibody_uid, self.joint_index)[0]
def current_position(self): # just some synonym method
return self.get_position()
def get_velocity(self):
return self.pbc.getJointState(self.multibody_uid, self.joint_index)[1]
def get_torque(self):
return self.pbc.getJointState(self.multibody_uid, self.joint_index)[3]
def get_motor_torque(self): # just some synonym method
return self.get_torque()
def get_relative_position(self):
pos = self.get_position()
pos_mid = 0.5 * (self.lower_limit + self.upper_limit)
return 2 * (pos - pos_mid) / (self.upper_limit - self.lower_limit)
def get_reaction_forces(self):
return self.pbc.getJointState(self.multibody_uid, self.joint_index)[2]
def current_relative_position(self):
pos, vel = self.get_state()
pos_mid = 0.5 * (self.lower_limit + self.upper_limit)
return (
2 * (pos - pos_mid) / (self.upper_limit - self.lower_limit),
0.1 * vel
)
def set_state(self, x, vx):
self.pbc.resetJointState(self.multibody_uid, self.joint_index, x, vx)
def set_position(self, position):
self.pbc.setJointMotorControl2(self.multibody_uid, self.joint_index,
self.pbc.POSITION_CONTROL,
targetPosition=position)
def set_velocity(self, velocity):
self.pbc.setJointMotorControl2(self.multibody_uid, self.joint_index,
self.pbc.VELOCITY_CONTROL,
targetVelocity=velocity)
def set_torque(self, torque):
# Disable the Motors for Torque Control
# p.setJointMotorControl2(self.multibody_uid,
# self.joint_index, p.VELOCITY_CONTROL,
# targetVelocity=0, force=0)
self.pbc.setJointMotorControl2(self.multibody_uid,
self.joint_index,
self.pbc.TORQUE_CONTROL,
force=torque)
# p.setJointMotorControl2(body_index=self.multibody_uid,
# joint_index=self.joint_index,
# controlMode=p.TORQUE_CONTROL, force=torque)
# # positionGain=0.0, velocityGain=0.0)
def set_motor_torque(self, torque): # just some synonym method
self.set_torque(torque)
def reset_current_position(self, position, velocity): # just some synonym method
self.reset_position(position, velocity)
def reset_position(self, position, velocity):
self.pbc.resetJointState(self.multibody_uid, self.joint_index,
targetValue=position, targetVelocity=velocity)
# self.disable_motor()
def disable_motor(self):
self.pbc.setJointMotorControl2(
self.multibody_uid,
self.joint_index,
controlMode=self.pbc.VELOCITY_CONTROL,
force=0.0,
# # targetVelocity=0,
# positionGain=0.0,
# velocityGain=0.0,
)
def lock_motor(self, tgt_pos=0, tgt_vel=0):
self.pbc.setJointMotorControl2(
self.multibody_uid,
self.joint_index,
# controlMode=p.VELOCITY_CONTROL, force=0
controlMode=self.pbc.POSITION_CONTROL,
# controlMode=p.VELOCITY_CONTROL,
targetPosition=tgt_pos,
targetVelocity=tgt_vel,
# positionGain=0.1, velocityGain=0.1
)
def get_joint_info(pbc, multibody_uid, joint_idx):
"""Dictionary with info about multi-body joint.
Args:
pbc: pybullet client
multibody_uid: Pybullet unique ID of multibody
joint_idx: Pybullet joint index in multibody
Returns:
dict: Python dictionary with the following keys:
0 jointIndex (int): Pybullet joint index (same than joint_idx)
1 jointName (str): Name of the joint, as specified in xml file
2 jointType (int): type of the joint, this also implies the number
of position and velocity variables. JOINT_REVOLUTE,
JOINT_PRISMATIC, JOINT_SPHERICAL, JOINT_PLANAR, JOINT_FIXED.
3 qIndex (int): the first position index in the positional state
variables for this body
4 uIndex (int): the first velocity index in the velocity state
variables for this body
5 flags (int): reserved
6 jointDamping (float): the joint damping value, as specified in the
URDF file
7 jointFriction (float): the joint friction value, as specified in
the URDF file
8 jointLowerLimit (float): Positional lower limit for slider and
revolute (hinge) joints.
9 jointUpperLimit (float): Positional upper limit for slider and
revolute joints. Values ignored in case upper_lim <lower_lim.
10 jointMaxForce (float): Maximum force specified in URDF
(possibly other file formats) Note that this value is not
automatically used. You can use maxForce in 'setJointMotorControl2'.
11 jointMaxVelocity (float): Maximum velocity specified in URDF.
Note that the maximum velocity is not used in actual motor
control commands at the moment.
12 linkName (str): the name of the link, as specified in the URDF
(or SDF etc.) file
13 jointAxis (vec3): joint axis in local frame (ignored for JOINT_FIXED)
14 parentFramePos (vec3): joint position in parent frame
15 parentFrameOrn (vec3): joint orientation in parent frame
16 parentIndex (int): parent link index, -1 for base
"""
joint_info = pbc.getJointInfo(multibody_uid, joint_idx)
joint_info_dict = {
'joint_idx': joint_info[0], # Pb joint idx
'joint_name': joint_info[1].decode("utf8"),
'joint_type': joint_info[2],
'q_idx': joint_info[3],
'u_idx': joint_info[4],
'damping': joint_info[6],
'friction': joint_info[7],
'lower_limit': joint_info[8],
'upper_limit': joint_info[9],
'joint_max_force': joint_info[10],
'joint_max_velocity': joint_info[11],
'link_name': joint_info[12].decode("utf8"),
'parent_pos': np.array(joint_info[14]),
'parent_ori': np.array(joint_info[15]),
'parent_idx': joint_info[16],
}
return joint_info_dict
| [
"numpy.array"
] | [((1820, 1851), 'numpy.array', 'np.array', (['[x, y, z, a, b, c, d]'], {}), '([x, y, z, a, b, c, d])\n', (1828, 1851), True, 'import numpy as np\n'), ((2742, 2776), 'numpy.array', 'np.array', (['[vx, vy, vz, wx, wy, wz]'], {}), '([vx, vy, vz, wx, wy, wz])\n', (2750, 2776), True, 'import numpy as np\n'), ((11444, 11468), 'numpy.array', 'np.array', (['joint_info[14]'], {}), '(joint_info[14])\n', (11452, 11468), True, 'import numpy as np\n'), ((11492, 11516), 'numpy.array', 'np.array', (['joint_info[15]'], {}), '(joint_info[15])\n', (11500, 11516), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
def relu(x):
return x * (x > 0)
def error_rate(p, t):
return np.mean(p != t)
def getKaggleMNIST():
# MNIST data:
# column 0 is labels
# column 1-785 is data, with values 0 .. 255
# total size of CSV: (42000, 1, 28, 28)
train = pd.read_csv('../mnist_data/train.csv').values.astype(np.float32)
train = shuffle(train)
Xtrain = train[:-1000, 1:] / 255
Ytrain = train[:-1000, 0].astype(np.int32)
Xtest = train[-1000:, 1:] / 255
Ytest = train[-1000:, 0].astype(np.int32)
return Xtrain, Ytrain, Xtest, Ytest
def init_weights(shape):
w = np.random.randn(*shape) / np.sqrt(sum(shape))
return w.astype(np.float32)
| [
"pandas.read_csv",
"sklearn.utils.shuffle",
"numpy.mean",
"numpy.random.randn"
] | [((147, 162), 'numpy.mean', 'np.mean', (['(p != t)'], {}), '(p != t)\n', (154, 162), True, 'import numpy as np\n'), ((412, 426), 'sklearn.utils.shuffle', 'shuffle', (['train'], {}), '(train)\n', (419, 426), False, 'from sklearn.utils import shuffle\n'), ((670, 693), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (685, 693), True, 'import numpy as np\n'), ((335, 373), 'pandas.read_csv', 'pd.read_csv', (['"""../mnist_data/train.csv"""'], {}), "('../mnist_data/train.csv')\n", (346, 373), True, 'import pandas as pd\n')] |
# Este arquivo contém as funções usadas para ajustar as curvas PV
# e outras funções úteis
############################################################### BIBLIOTECAS:
import numpy as np # para fazer contas e mexer com matrizes
import pandas as pd # para montar DataFrames (tabelas de bancos de dados)
from pathlib import Path # para trabalhar com diretorios e arquivos
import pickle # para gravar e ler dados
import matplotlib.pyplot as plt # para gráficos
import seaborn as sns # para gráficos com DataFrames
from scipy.optimize import curve_fit # para ajuste das curvas dos modelos
import math # para erf()
from scipy.interpolate import interp1d # para interpolar os pontos PV
############################################################### MODELOS:
# função usada para fitar o modelo PV sigmoide (doente)
# b b
# V(x) = a + ---------------------- = a + ------------------------
# 1 + exp(-(x/d) + (c/d) 1 + exp(-x/d).exp(c/d)
#
# lim (x-> inf) V(x) = a + b
def sigmoidvenegas1(x, a, b, c, d):
return a + b/(1 + np.exp(-(x-c)/d))
########## paiva
def sigmoidpaiva(x,TLC,k1,k2):
return TLC/(1+(k1*np.exp(-k2*x)))
# modificação nossa: incluindo offset
def sigmoidpaivaoffset1(x,TLC,k1,k2,offset):
return TLC/(1+(k1*np.exp(-k2*x))) + offset
# baseado no artigo original do paiva1975, e incluindo offset:
def sigmoidpaivaoffset(x,TLC,k1,k2,offset):
return TLC/(1+(k1*TLC*np.exp(-k2*x))) + offset
######### venegas2
def sigmoidvenegas2(x,TLC,B,k,c,d):
return (TLC-(B*np.exp(-k*x)))/(1 + np.exp(-(x-c)/d))
# modificação nossa: incluindo offset
def sigmoidvenegas2offset(x,TLC,B,k,c,d,offset):
return (TLC-(B*np.exp(-k*x)))/(1 + np.exp(-(x-c)/d)) + offset
# sinal original: incorreto, pois aqui quando P -> c, V -> infty
def sigmoidvenegas2original(x,TLC,B,k,c,d):
return (TLC-(B*np.exp(-k*x)))/(1 - np.exp(-(x-c)/d))
######### murphy e engel
def sigmoidmurphy(x,VM,Vm,k1,k2,k3): ### CUIDADO: P = f(V) !!!
return ( k1/(VM-x) ) + ( k2/(Vm-x) ) + k3
# modificação nossa: incluindo offset
######### murphy e engel
def sigmoidmurphyoffset(x,TLC,offset,k1,k2,k3): ### CUIDADO: P = f(V) !!!
return ( k1/((TLC+offset)-x) ) + ( k2/(offset-x) ) + k3
######### recruit_unit
# Modelo exponencial simples de curva PV pulmonar (Salazar 1964)
# Volume = Vmax*(1-e^(-K*Paw))
# Paw = pressão na via aérea
# K = 'constante de tempo' da exponencial
def expsalazar(x,Vo,K):
return Vo*(1-np.exp(-K*x))
# modelo de unidades recrutadas com erf()
# ajustando a função para uma entrada array (para curve_fit)
def meu_erf_vec(Paw,mi,sigma):
saida_lst = []
for x_in in Paw:
x = (x_in-mi)/(sigma*1.5)
merf = math.erf(x)
saida_lst.append((merf/2)+0.5)
return np.array(saida_lst)
# modelo proposto pelo grupo (nós)
def sigmoid_recruit_units(Paw,K,Vmax,mi,sigma,offset):
Vmax_recrutado = Vmax*meu_erf_vec(Paw,mi,sigma)
V = Vmax_recrutado*(1-np.exp(-K*Paw)) + offset
return V
############################################################### FUNÇÕES:
'''
Carrega os arquivos .pickle das subpastas da pasta './porquinhos/'
e retorna um DataFrame com os dados.
As manobras C contém apenas 4 passos, e as D, apenas 5 passos.
'''
def carrega_pickles(folder = 'porquinhos'):
dataframes_lst = [] # lista de dataframe: Cada elemento da lista corresponde a um dataframe de um porco/manobra/dados PV
for file_name in Path(folder).rglob('*.pickle'):
print(f"\rLendo {file_name.name}\t\t\t")
with open(file_name, "rb") as file: # abre o arquivo.pickle
porquinho = pickle.load(file)
for manobra in porquinho: #Para cada manobra
if manobra == "D": # Posso fazer 3,4,5 passos
n_steps = 5
elif manobra == "C": # Posso fazer 3,4 passos
n_steps = 4
elif manobra == "B": # Posso fazer 3 passos
n_steps = 3
# Formato os dados de entrada
format_data = []
for pi, pe, wi, we in zip(porquinho[manobra]["p_i"], porquinho[manobra]["p_e"],
porquinho[manobra]["w_i"], porquinho[manobra]["w_e"]):
format_data.extend([pi,wi,pe,we])
format_data = np.array(format_data).reshape(-1,2) # monta matriz de N linhas e 2 colunas
##########################################################
caso = []
caso.append(porquinho.name)
caso.append(manobra)
caso.append(format_data)
caso.append(n_steps)
casodf = pd.DataFrame(caso, index = ['Animal', 'Manobra', 'Dados', 'n_steps']).T
dataframes_lst.append(casodf)
# Junta todos os dataframes da lista em um único DataFrame:
dadosdf = pd.concat(dataframes_lst, ignore_index=True)
# Extrai os dados de pressão e volume dos dados raw dos arquivos pickle:
pv_lst = []
for idx,caso in dadosdf.iterrows():
pv = []
ps,vs = Data2PV(caso.Dados)
pv.append(ps)
pv.append(vs)
pvdf = pd.DataFrame([pv], columns = ['Pressoes', 'Volumes'])
pv_lst.append(pvdf)
pvdf_all = pd.concat(pv_lst, ignore_index=True)
dadosdf_completo = pd.concat((dadosdf,pvdf_all),axis=1)
# inclui uma coluna para volume esperado...
dadosdf_completo["volume_esperado"] = 0
return dadosdf_completo
'''
Retorna os vetores de pressão e volume a partir dos dados raw disponíveis nos pickles
'''
def Data2PV(data):
data2 = data[0::2, :]
pressures = data2[:,0]
volumes = data2[:,1]
return pressures,volumes
def encontra_volumes_limites_Murphy(parameters, modelo=sigmoidmurphy, pmax=100, pmin=0): ### no modelo de Murphy, P = f(V)
v_max = 1000
v_min = 0
# encontra limite superior:
for v in range(1,10000):
p = modelo(v,*parameters)
if p > pmax:
v_max = v
break
# encontra limite superior:
for v in range(1,-10000,-1):
p = sigmoidmurphy(v,*parameters)
if p < pmin:
v_min = v
break
return int(v_min),int(v_max)
# metodos : lm, dogbox, trf
def testa_modelo(df, modelo, meu_p0 = [], metodo = 'lm', n_colunas = 4, texto = '', TLC_index = 0, meus_bounds = [], n_points_interp=0, debug=True, invert_PV = False):
numero_de_casos = len(df)
fig = plt.figure(figsize=(25,5*numero_de_casos/n_colunas))
erro_vec = []
n_fitted = 0
for caso_teste in range(numero_de_casos):
p_in = df.iloc[caso_teste].Pressoes
v_in = df.iloc[caso_teste].Volumes
# interpola pontos (se n_points_interp==0, a função não interpola)
p, v = interpola_PV(p_in,v_in,n_points_interp)
plt.subplot(int(numero_de_casos/n_colunas)+1,n_colunas,caso_teste+1)
fig.tight_layout()
if (n_points_interp > 0):
plt.scatter(p,v,label='interp',c='k',marker='x')
plt.scatter(p_in,v_in,label='raw')
try:
if (invert_PV == False): ################################### V = f(P)
if (meu_p0 == []): # sem p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo)
else: # com bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo, bounds=meus_bounds)
else: # com p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo, p0 = meu_p0)
else: # com bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo, p0 = meu_p0, bounds=meus_bounds)
else: ###################################################### P = f(V)
if (meu_p0 == []): # sem p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo)
else: # com bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo, bounds=meus_bounds)
else: # com p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo, p0 = meu_p0)
else: # com bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo, p0 = meu_p0, bounds=meus_bounds)
if debug:
textop = ""
for p in parameters:
if ( np.abs(p) > 1 ):
textop = textop + f'{p:7.1f}' + ' '
else:
textop = textop + f'{p:.3f}' + ' '
print(f'Testando caso {caso_teste}: {df.iloc[caso_teste].Animal}: [{textop}]')
if (invert_PV == False): ################################### V = f(P)
meu_p = range(1,100)
meu_v = modelo(meu_p,*parameters)
else: ###################################################### P = f(V)
v_min,v_max = encontra_volumes_limites_Murphy(parameters,modelo=modelo)
meu_v = np.asarray(range(v_min,v_max))
meu_p = modelo(meu_v,*parameters)
plt.plot(meu_p,meu_v,'r',label='fit')
n_fitted = n_fitted + 1
if ( df.iloc[caso_teste]["volume_esperado"] == 0 ):
plt.title(f'Case: {df.iloc[caso_teste].Animal}. TLC = {parameters[TLC_index]:.0f} mL')
else:
v_esperado = df.iloc[caso_teste]["volume_esperado"]
if (modelo.__name__ == 'sigmoidmurphy'):
TLC = parameters[0] - parameters[1]
else:
TLC = parameters[TLC_index]
erro = 100*(TLC-v_esperado)/v_esperado
erro_vec.append(erro)
plt.title(f'Case: {df.iloc[caso_teste].Animal}. TLC = {TLC:.0f} mL. Error: {erro:.1f}%')
except Exception as e:
print(f'\tCaso {caso_teste} ({df.iloc[caso_teste].Animal}) deu erro...')
plt.title(f'Case: {df.iloc[caso_teste].Animal}. Error fitting.')
#except:
# print('erro')
# pass
plt.xlabel('Pressure [cmH2O]')
plt.ylabel('Volume [mL]')
plt.legend()
fig.suptitle(f'PV Graph. Model: {modelo.__name__}. {texto}', fontsize=16, y=1.05)
plt.show()
if ( len(erro_vec) > 0 ):
erro_medio = np.mean(np.abs(erro_vec))
erro_norm = np.linalg.norm(erro_vec)
else:
erro_medio = -1
erro_norm = -1
if debug:
print(f'Norma(erro): {erro_norm:.1f}. Erro médio: {erro_medio:.2f}%. Ajustados: {n_fitted}.')
return erro_norm, erro_medio, n_fitted
# o mesmo que a função anterior, mas não mostra gráficos ou mensagens... para uso dentro de loops...
# metodos : lm, dogbox, trf
def testa_modelo_loop(df, modelo, meu_p0 = [], metodo = 'lm', TLC_index = 0, meus_bounds = [], n_points_interp=0, invert_PV = False):
numero_de_casos = len(df)
erro_vec = []
n_fitted = 0
for caso_teste in range(numero_de_casos):
p_in = df.iloc[caso_teste].Pressoes
v_in = df.iloc[caso_teste].Volumes
# interpola pontos (se n_points_interp==0, a função não interpola)
p, v = interpola_PV(p_in,v_in,n_points_interp)
try:
if (invert_PV == False): ################################### V = f(P)
if (meu_p0 == []): # sem p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo)
else: # com bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo, bounds=meus_bounds)
else: # com p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo, p0 = meu_p0)
else: # com bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo, p0 = meu_p0, bounds=meus_bounds)
else: ###################################################### P = f(V)
if (meu_p0 == []): # sem p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo)
else: # com bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo, bounds=meus_bounds)
else: # com p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo, p0 = meu_p0)
else: # com bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo, p0 = meu_p0, bounds=meus_bounds)
if ( df.iloc[caso_teste]["volume_esperado"] == 0 ):
pass
else:
v_esperado = df.iloc[caso_teste]["volume_esperado"]
if (modelo.__name__ == 'sigmoidmurphy'):
TLC = parameters[0] - parameters[1]
else:
TLC = parameters[TLC_index]
erro = 100*(TLC-v_esperado)/v_esperado
erro_vec.append(erro)
n_fitted = n_fitted + 1
if ( (metodo=='lm') & (parameters[TLC_index] > 6000) ): # não fitou...
n_fitted = n_fitted - 1
except Exception as e:
pass
if ( len(erro_vec) > 0 ):
erro_medio = np.mean(np.abs(erro_vec))
erro_norm = np.linalg.norm(erro_vec)
else:
erro_medio = -1
erro_norm = -1
return erro_norm, erro_medio, n_fitted
# o mesmo que a função anterior, mas solta resultado por caso individualmente
# metodos : lm, dogbox, trf
def testa_modelo_indiv(df, modelo, meu_p0 = [], metodo = 'lm', TLC_index = 0, meus_bounds = [],
n_points_interp=0, limite_vol_max = 6000, limite_vol_min = 100,
erro_factor_limit = 70, invert_PV = False):
numero_de_casos = len(df)
dfresult_lst = []
for caso_teste in range(numero_de_casos):
p_in = df.iloc[caso_teste].Pressoes
v_in = df.iloc[caso_teste].Volumes
# interpola pontos (se n_points_interp==0, a função não interpola)
p, v = interpola_PV(p_in,v_in,n_points_interp)
flag_fitted = False
erro = 0
parameters = []
erro_fit = 0
erro_factor = 0
tlc_eit = 0
try:
if (invert_PV == False): ################################### V = f(P)
if (meu_p0 == []): # sem p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo)
else: # com bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo, bounds=meus_bounds)
else: # com p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo, p0 = meu_p0)
else: # com bounds
parameters, pcov = curve_fit(modelo, p, v, method=metodo, p0 = meu_p0, bounds=meus_bounds)
else: ###################################################### P = f(V)
if (meu_p0 == []): # sem p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo)
else: # com bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo, bounds=meus_bounds)
else: # com p0
if (meus_bounds == []): # sem bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo, p0 = meu_p0)
else: # com bounds
parameters, pcov = curve_fit(modelo, v, p, method=metodo, p0 = meu_p0, bounds=meus_bounds)
if (modelo.__name__ == 'sigmoidmurphy'):
TLC = parameters[0] - parameters[1]
else:
TLC = parameters[TLC_index]
tlc_eit = TLC
# Calcula erro
if ( df.iloc[caso_teste]["volume_esperado"] == 0 ):
pass
else:
v_esperado = df.iloc[caso_teste]["volume_esperado"]
erro = 100*(TLC-v_esperado)/v_esperado
# Calcula erro do fit
if (invert_PV == False): ################################### V = f(P)
v_fit = modelo(p_in,*parameters)
erro_fit = np.linalg.norm(v_fit-v_in)
erro_factor = erro_fit/np.power(len(v_in),0.5)
else: ###################################################### P = f(V)
p_fit = modelo(v_in,*parameters)
erro_fit = np.linalg.norm(p_fit-p_in)
erro_factor = (erro_fit/np.power(len(v_in),0.5))*(( max(v_in)-min(v_in) )/( max(p_in)-min(p_in) ))
# Verifica se fitou errado -> não fitou
if ( limite_vol_min <= TLC <= limite_vol_max ): # fitou alguma coisa coerente...
flag_fitted = True
if ( erro_factor > erro_factor_limit ):
flag_fitted = False
except Exception as e:
pass
index = []
caso = []
index.append('Animal')
caso.append(df.iloc[caso_teste].Animal)
index.append('Maneuver')
caso.append(df.iloc[caso_teste].Manobra)
index.append('n_steps')
caso.append(df.iloc[caso_teste].n_steps)
index.append('Pressures')
caso.append(df.iloc[caso_teste].Pressoes)
index.append('Volumes')
caso.append(df.iloc[caso_teste].Volumes)
index.append('Model')
caso.append(modelo.__name__)
index.append('Method')
caso.append(metodo)
index.append('TLC_index')
caso.append(TLC_index)
index.append('TLC_eit')
caso.append(tlc_eit)
index.append('N_points_interp')
caso.append(n_points_interp)
index.append('p0')
caso.append(meu_p0)
index.append('bounds')
caso.append(meus_bounds)
index.append('fitted')
caso.append(flag_fitted)
index.append('parameters')
caso.append(parameters)
index.append('Vol_CT')
caso.append(df.iloc[caso_teste]["volume_esperado"])
index.append('error')
caso.append(erro)
index.append('fit error')
caso.append(erro_fit)
index.append('error factor')
caso.append(erro_factor)
index.append('Raw data')
caso.append(df.iloc[caso_teste].Dados)
casodf = pd.DataFrame(caso, index).T
dfresult_lst.append(casodf)
dfresult = pd.concat(dfresult_lst, ignore_index=True)
# garante que algumas colunas serão tratadas como float
dfresult[['Vol_CT', 'error']] = dfresult[['Vol_CT', 'error']].astype(float)
return dfresult
# interpola vetores PV
# n_points = número de pontos intermediários
def interpola_PV(pressoes,volumes,n_points=0):
if len(pressoes)<3:
kind = "linear"
elif len(pressoes)==3:
kind = "quadratic"
else:
kind = "cubic"
interp_pressures = np.linspace(pressoes[0], pressoes[-1], (len(pressoes)*(n_points+1))-n_points, endpoint=True)
interp_func = interp1d(pressoes, volumes, kind=kind)
interp_volumes = interp_func(interp_pressures)
return interp_pressures, interp_volumes
# Classe usada para dados dos modelos usados na função testa_varios
class dados_modelos:
model_function = ''
TLC_index = ''
p0 = ''
bounds = ''
invert_PV = False
def testa_varios_indiv(dadosdf, modelos, metodos = ('lm','trf','dogbox'), vec_interp = [0, 1, 2, 10, 20]):
df_lst = []
for mod in modelos:
print(f'Rodando {mod.model_function.__name__}')
for n_points_interp in vec_interp:
for metodo in metodos:
if (metodo == 'lm'): # 'lm' não aceita bounds
dfresult = testa_modelo_indiv(dadosdf, mod.model_function, metodo = metodo, meu_p0 = mod.p0,
TLC_index=mod.TLC_index, n_points_interp=n_points_interp, invert_PV=mod.invert_PV)
else:
dfresult = testa_modelo_indiv(dadosdf, mod.model_function, metodo = metodo, meu_p0 = mod.p0,
TLC_index=mod.TLC_index, meus_bounds=mod.bounds,
n_points_interp=n_points_interp, invert_PV=mod.invert_PV)
df_lst.append(dfresult)
dadosdf = pd.concat(df_lst, ignore_index=True)
return dadosdf
def testa_varios(dadosdf, modelos, metodos = ('lm','trf','dogbox'), vec_interp = [0, 1, 2, 10, 20]):
df_lst = []
for mod in modelos:
print(f'Rodando {mod.model_function.__name__}')
for n_points_interp in vec_interp:
for metodo in metodos:
if (metodo == 'lm'): # 'lm' não aceita bounds
erro_norm, erro_medio, n_fitted = testa_modelo_loop(dadosdf, mod.model_function, metodo = metodo, meu_p0 = mod.p0,
TLC_index=mod.TLC_index, n_points_interp=n_points_interp, invert_PV=mod.invert_PV)
else:
erro_norm, erro_medio, n_fitted = testa_modelo_loop(dadosdf, mod.model_function, metodo = metodo, meu_p0 = mod.p0,
TLC_index=mod.TLC_index, meus_bounds=mod.bounds,
n_points_interp=n_points_interp, invert_PV=mod.invert_PV)
caso = []
caso.append(mod.model_function.__name__)
caso.append(metodo)
caso.append(n_points_interp)
caso.append(erro_norm)
caso.append(erro_medio)
caso.append(n_fitted)
casodf = pd.DataFrame(caso, index = ['Modelo', 'Método', 'N_points_interp', 'Norma erro', 'Erro médio', 'n_fitted']).T
df_lst.append(casodf)
dadosdf = pd.concat(df_lst, ignore_index=True)
dadosdf[['Norma erro', 'Erro médio']] = dadosdf[['Norma erro', 'Erro médio']].astype(float)
dadosdf['|Erro médio|'] = dadosdf['Erro médio'].abs()
return dadosdf | [
"matplotlib.pyplot.title",
"math.erf",
"numpy.abs",
"matplotlib.pyplot.figure",
"pathlib.Path",
"numpy.linalg.norm",
"numpy.exp",
"pickle.load",
"scipy.interpolate.interp1d",
"pandas.DataFrame",
"pandas.concat",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"scipy.optimize.curve_fi... | [((2876, 2895), 'numpy.array', 'np.array', (['saida_lst'], {}), '(saida_lst)\n', (2884, 2895), True, 'import numpy as np\n'), ((5006, 5050), 'pandas.concat', 'pd.concat', (['dataframes_lst'], {'ignore_index': '(True)'}), '(dataframes_lst, ignore_index=True)\n', (5015, 5050), True, 'import pandas as pd\n'), ((5406, 5442), 'pandas.concat', 'pd.concat', (['pv_lst'], {'ignore_index': '(True)'}), '(pv_lst, ignore_index=True)\n', (5415, 5442), True, 'import pandas as pd\n'), ((5471, 5509), 'pandas.concat', 'pd.concat', (['(dadosdf, pvdf_all)'], {'axis': '(1)'}), '((dadosdf, pvdf_all), axis=1)\n', (5480, 5509), True, 'import pandas as pd\n'), ((6621, 6678), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(25, 5 * numero_de_casos / n_colunas)'}), '(figsize=(25, 5 * numero_de_casos / n_colunas))\n', (6631, 6678), True, 'import matplotlib.pyplot as plt\n'), ((10930, 10940), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10938, 10940), True, 'import matplotlib.pyplot as plt\n'), ((19989, 20031), 'pandas.concat', 'pd.concat', (['dfresult_lst'], {'ignore_index': '(True)'}), '(dfresult_lst, ignore_index=True)\n', (19998, 20031), True, 'import pandas as pd\n'), ((20586, 20624), 'scipy.interpolate.interp1d', 'interp1d', (['pressoes', 'volumes'], {'kind': 'kind'}), '(pressoes, volumes, kind=kind)\n', (20594, 20624), False, 'from scipy.interpolate import interp1d\n'), ((21890, 21926), 'pandas.concat', 'pd.concat', (['df_lst'], {'ignore_index': '(True)'}), '(df_lst, ignore_index=True)\n', (21899, 21926), True, 'import pandas as pd\n'), ((23429, 23465), 'pandas.concat', 'pd.concat', (['df_lst'], {'ignore_index': '(True)'}), '(df_lst, ignore_index=True)\n', (23438, 23465), True, 'import pandas as pd\n'), ((2814, 2825), 'math.erf', 'math.erf', (['x'], {}), '(x)\n', (2822, 2825), False, 'import math\n'), ((5300, 5351), 'pandas.DataFrame', 'pd.DataFrame', (['[pv]'], {'columns': "['Pressoes', 'Volumes']"}), "([pv], columns=['Pressoes', 'Volumes'])\n", (5312, 5351), True, 'import pandas as pd\n'), ((7200, 7236), 'matplotlib.pyplot.scatter', 'plt.scatter', (['p_in', 'v_in'], {'label': '"""raw"""'}), "(p_in, v_in, label='raw')\n", (7211, 7236), True, 'import matplotlib.pyplot as plt\n'), ((10749, 10779), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Pressure [cmH2O]"""'], {}), "('Pressure [cmH2O]')\n", (10759, 10779), True, 'import matplotlib.pyplot as plt\n'), ((10788, 10813), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Volume [mL]"""'], {}), "('Volume [mL]')\n", (10798, 10813), True, 'import matplotlib.pyplot as plt\n'), ((10822, 10834), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10832, 10834), True, 'import matplotlib.pyplot as plt\n'), ((11043, 11067), 'numpy.linalg.norm', 'np.linalg.norm', (['erro_vec'], {}), '(erro_vec)\n', (11057, 11067), True, 'import numpy as np\n'), ((14400, 14424), 'numpy.linalg.norm', 'np.linalg.norm', (['erro_vec'], {}), '(erro_vec)\n', (14414, 14424), True, 'import numpy as np\n'), ((1672, 1692), 'numpy.exp', 'np.exp', (['(-(x - c) / d)'], {}), '(-(x - c) / d)\n', (1678, 1692), True, 'import numpy as np\n'), ((1993, 2013), 'numpy.exp', 'np.exp', (['(-(x - c) / d)'], {}), '(-(x - c) / d)\n', (1999, 2013), True, 'import numpy as np\n'), ((2576, 2590), 'numpy.exp', 'np.exp', (['(-K * x)'], {}), '(-K * x)\n', (2582, 2590), True, 'import numpy as np\n'), ((3546, 3558), 'pathlib.Path', 'Path', (['folder'], {}), '(folder)\n', (3550, 3558), False, 'from pathlib import Path\n'), ((3722, 3739), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (3733, 3739), False, 'import pickle\n'), ((7143, 7195), 'matplotlib.pyplot.scatter', 'plt.scatter', (['p', 'v'], {'label': '"""interp"""', 'c': '"""k"""', 'marker': '"""x"""'}), "(p, v, label='interp', c='k', marker='x')\n", (7154, 7195), True, 'import matplotlib.pyplot as plt\n'), ((9777, 9817), 'matplotlib.pyplot.plot', 'plt.plot', (['meu_p', 'meu_v', '"""r"""'], {'label': '"""fit"""'}), "(meu_p, meu_v, 'r', label='fit')\n", (9785, 9817), True, 'import matplotlib.pyplot as plt\n'), ((11005, 11021), 'numpy.abs', 'np.abs', (['erro_vec'], {}), '(erro_vec)\n', (11011, 11021), True, 'import numpy as np\n'), ((14362, 14378), 'numpy.abs', 'np.abs', (['erro_vec'], {}), '(erro_vec)\n', (14368, 14378), True, 'import numpy as np\n'), ((19901, 19926), 'pandas.DataFrame', 'pd.DataFrame', (['caso', 'index'], {}), '(caso, index)\n', (19913, 19926), True, 'import pandas as pd\n'), ((1182, 1202), 'numpy.exp', 'np.exp', (['(-(x - c) / d)'], {}), '(-(x - c) / d)\n', (1188, 1202), True, 'import numpy as np\n'), ((1271, 1286), 'numpy.exp', 'np.exp', (['(-k2 * x)'], {}), '(-k2 * x)\n', (1277, 1286), True, 'import numpy as np\n'), ((1652, 1666), 'numpy.exp', 'np.exp', (['(-k * x)'], {}), '(-k * x)\n', (1658, 1666), True, 'import numpy as np\n'), ((1817, 1837), 'numpy.exp', 'np.exp', (['(-(x - c) / d)'], {}), '(-(x - c) / d)\n', (1823, 1837), True, 'import numpy as np\n'), ((1973, 1987), 'numpy.exp', 'np.exp', (['(-k * x)'], {}), '(-k * x)\n', (1979, 1987), True, 'import numpy as np\n'), ((3065, 3081), 'numpy.exp', 'np.exp', (['(-K * Paw)'], {}), '(-K * Paw)\n', (3071, 3081), True, 'import numpy as np\n'), ((9931, 10027), 'matplotlib.pyplot.title', 'plt.title', (['f"""Case: {df.iloc[caso_teste].Animal}. TLC = {parameters[TLC_index]:.0f} mL"""'], {}), "(\n f'Case: {df.iloc[caso_teste].Animal}. TLC = {parameters[TLC_index]:.0f} mL'\n )\n", (9940, 10027), True, 'import matplotlib.pyplot as plt\n'), ((10396, 10494), 'matplotlib.pyplot.title', 'plt.title', (['f"""Case: {df.iloc[caso_teste].Animal}. TLC = {TLC:.0f} mL. Error: {erro:.1f}%"""'], {}), "(\n f'Case: {df.iloc[caso_teste].Animal}. TLC = {TLC:.0f} mL. Error: {erro:.1f}%'\n )\n", (10405, 10494), True, 'import matplotlib.pyplot as plt\n'), ((10613, 10677), 'matplotlib.pyplot.title', 'plt.title', (['f"""Case: {df.iloc[caso_teste].Animal}. Error fitting."""'], {}), "(f'Case: {df.iloc[caso_teste].Animal}. Error fitting.')\n", (10622, 10677), True, 'import matplotlib.pyplot as plt\n'), ((17770, 17798), 'numpy.linalg.norm', 'np.linalg.norm', (['(v_fit - v_in)'], {}), '(v_fit - v_in)\n', (17784, 17798), True, 'import numpy as np\n'), ((18018, 18046), 'numpy.linalg.norm', 'np.linalg.norm', (['(p_fit - p_in)'], {}), '(p_fit - p_in)\n', (18032, 18046), True, 'import numpy as np\n'), ((1393, 1408), 'numpy.exp', 'np.exp', (['(-k2 * x)'], {}), '(-k2 * x)\n', (1399, 1408), True, 'import numpy as np\n'), ((1552, 1567), 'numpy.exp', 'np.exp', (['(-k2 * x)'], {}), '(-k2 * x)\n', (1558, 1567), True, 'import numpy as np\n'), ((1797, 1811), 'numpy.exp', 'np.exp', (['(-k * x)'], {}), '(-k * x)\n', (1803, 1811), True, 'import numpy as np\n'), ((4801, 4868), 'pandas.DataFrame', 'pd.DataFrame', (['caso'], {'index': "['Animal', 'Manobra', 'Dados', 'n_steps']"}), "(caso, index=['Animal', 'Manobra', 'Dados', 'n_steps'])\n", (4813, 4868), True, 'import pandas as pd\n'), ((23267, 23376), 'pandas.DataFrame', 'pd.DataFrame', (['caso'], {'index': "['Modelo', 'Método', 'N_points_interp', 'Norma erro', 'Erro médio', 'n_fitted']"}), "(caso, index=['Modelo', 'Método', 'N_points_interp',\n 'Norma erro', 'Erro médio', 'n_fitted'])\n", (23279, 23376), True, 'import pandas as pd\n'), ((4439, 4460), 'numpy.array', 'np.array', (['format_data'], {}), '(format_data)\n', (4447, 4460), True, 'import numpy as np\n'), ((7499, 7537), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'p', 'v'], {'method': 'metodo'}), '(modelo, p, v, method=metodo)\n', (7508, 7537), False, 'from scipy.optimize import curve_fit\n'), ((7638, 7696), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'p', 'v'], {'method': 'metodo', 'bounds': 'meus_bounds'}), '(modelo, p, v, method=metodo, bounds=meus_bounds)\n', (7647, 7696), False, 'from scipy.optimize import curve_fit\n'), ((7866, 7915), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'p', 'v'], {'method': 'metodo', 'p0': 'meu_p0'}), '(modelo, p, v, method=metodo, p0=meu_p0)\n', (7875, 7915), False, 'from scipy.optimize import curve_fit\n'), ((8018, 8087), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'p', 'v'], {'method': 'metodo', 'p0': 'meu_p0', 'bounds': 'meus_bounds'}), '(modelo, p, v, method=metodo, p0=meu_p0, bounds=meus_bounds)\n', (8027, 8087), False, 'from scipy.optimize import curve_fit\n'), ((8341, 8379), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'v', 'p'], {'method': 'metodo'}), '(modelo, v, p, method=metodo)\n', (8350, 8379), False, 'from scipy.optimize import curve_fit\n'), ((8480, 8538), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'v', 'p'], {'method': 'metodo', 'bounds': 'meus_bounds'}), '(modelo, v, p, method=metodo, bounds=meus_bounds)\n', (8489, 8538), False, 'from scipy.optimize import curve_fit\n'), ((8708, 8757), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'v', 'p'], {'method': 'metodo', 'p0': 'meu_p0'}), '(modelo, v, p, method=metodo, p0=meu_p0)\n', (8717, 8757), False, 'from scipy.optimize import curve_fit\n'), ((8860, 8929), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'v', 'p'], {'method': 'metodo', 'p0': 'meu_p0', 'bounds': 'meus_bounds'}), '(modelo, v, p, method=metodo, p0=meu_p0, bounds=meus_bounds)\n', (8869, 8929), False, 'from scipy.optimize import curve_fit\n'), ((9064, 9073), 'numpy.abs', 'np.abs', (['p'], {}), '(p)\n', (9070, 9073), True, 'import numpy as np\n'), ((12176, 12214), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'p', 'v'], {'method': 'metodo'}), '(modelo, p, v, method=metodo)\n', (12185, 12214), False, 'from scipy.optimize import curve_fit\n'), ((12315, 12373), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'p', 'v'], {'method': 'metodo', 'bounds': 'meus_bounds'}), '(modelo, p, v, method=metodo, bounds=meus_bounds)\n', (12324, 12373), False, 'from scipy.optimize import curve_fit\n'), ((12543, 12592), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'p', 'v'], {'method': 'metodo', 'p0': 'meu_p0'}), '(modelo, p, v, method=metodo, p0=meu_p0)\n', (12552, 12592), False, 'from scipy.optimize import curve_fit\n'), ((12695, 12764), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'p', 'v'], {'method': 'metodo', 'p0': 'meu_p0', 'bounds': 'meus_bounds'}), '(modelo, p, v, method=metodo, p0=meu_p0, bounds=meus_bounds)\n', (12704, 12764), False, 'from scipy.optimize import curve_fit\n'), ((13018, 13056), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'v', 'p'], {'method': 'metodo'}), '(modelo, v, p, method=metodo)\n', (13027, 13056), False, 'from scipy.optimize import curve_fit\n'), ((13157, 13215), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'v', 'p'], {'method': 'metodo', 'bounds': 'meus_bounds'}), '(modelo, v, p, method=metodo, bounds=meus_bounds)\n', (13166, 13215), False, 'from scipy.optimize import curve_fit\n'), ((13385, 13434), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'v', 'p'], {'method': 'metodo', 'p0': 'meu_p0'}), '(modelo, v, p, method=metodo, p0=meu_p0)\n', (13394, 13434), False, 'from scipy.optimize import curve_fit\n'), ((13537, 13606), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'v', 'p'], {'method': 'metodo', 'p0': 'meu_p0', 'bounds': 'meus_bounds'}), '(modelo, v, p, method=metodo, p0=meu_p0, bounds=meus_bounds)\n', (13546, 13606), False, 'from scipy.optimize import curve_fit\n'), ((15636, 15674), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'p', 'v'], {'method': 'metodo'}), '(modelo, p, v, method=metodo)\n', (15645, 15674), False, 'from scipy.optimize import curve_fit\n'), ((15775, 15833), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'p', 'v'], {'method': 'metodo', 'bounds': 'meus_bounds'}), '(modelo, p, v, method=metodo, bounds=meus_bounds)\n', (15784, 15833), False, 'from scipy.optimize import curve_fit\n'), ((16003, 16052), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'p', 'v'], {'method': 'metodo', 'p0': 'meu_p0'}), '(modelo, p, v, method=metodo, p0=meu_p0)\n', (16012, 16052), False, 'from scipy.optimize import curve_fit\n'), ((16155, 16224), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'p', 'v'], {'method': 'metodo', 'p0': 'meu_p0', 'bounds': 'meus_bounds'}), '(modelo, p, v, method=metodo, p0=meu_p0, bounds=meus_bounds)\n', (16164, 16224), False, 'from scipy.optimize import curve_fit\n'), ((16478, 16516), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'v', 'p'], {'method': 'metodo'}), '(modelo, v, p, method=metodo)\n', (16487, 16516), False, 'from scipy.optimize import curve_fit\n'), ((16617, 16675), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'v', 'p'], {'method': 'metodo', 'bounds': 'meus_bounds'}), '(modelo, v, p, method=metodo, bounds=meus_bounds)\n', (16626, 16675), False, 'from scipy.optimize import curve_fit\n'), ((16845, 16894), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'v', 'p'], {'method': 'metodo', 'p0': 'meu_p0'}), '(modelo, v, p, method=metodo, p0=meu_p0)\n', (16854, 16894), False, 'from scipy.optimize import curve_fit\n'), ((16997, 17066), 'scipy.optimize.curve_fit', 'curve_fit', (['modelo', 'v', 'p'], {'method': 'metodo', 'p0': 'meu_p0', 'bounds': 'meus_bounds'}), '(modelo, v, p, method=metodo, p0=meu_p0, bounds=meus_bounds)\n', (17006, 17066), False, 'from scipy.optimize import curve_fit\n')] |
import torch
import tables
import os
import pickle
import numpy as np
import math
import datetime
import torchvision
import cv2
import glob
from pathlib import Path
def CheckPaths(paths,dataset_name):
assert paths['path_to_superpoint_checkpoint']!=None , "Path missing!! Update 'path_to_superpoint_checkpoint' on paths/main.yaml (link for superpoint_v1.pth availiable on the github repo)"
assert os.path.isfile(paths['path_to_superpoint_checkpoint']), f"File {paths['path_to_superpoint_checkpoint']} does not exists. Update 'path_to_superpoint_checkpoint' on paths/main.yaml (link for superpoint_v1.pth availiable on the github repo)"
def LogText(text,experiment_name,log_path):
Experiment_Log_directory=GetLogsPath(experiment_name,log_path)
Log_File=Experiment_Log_directory / (experiment_name + '.txt')
print(text + " (" + datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y") + ")")
f = open(Log_File, 'a')
f.write(text + " (" + datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y") + ")" + '\n')
f.close()
def GetCheckPointsPath(experiment_name,log_path):
log_path=Path(log_path)
CheckPointDirectory=log_path / experiment_name / "CheckPoints/"
return CheckPointDirectory
def GetLogsPath(experiment_name,log_path):
log_path=Path(log_path)
Experiment_Log_directory=log_path / experiment_name / "Logs/"
return Experiment_Log_directory
def GetPathsForClusterVisualisation(experiment_name,log_path):
CheckPointDirectory=GetCheckPointsPath(experiment_name,log_path)
listoffiles=list(str(f.resolve()) for f in CheckPointDirectory.glob('*'))
path_to_keypoints=max([f for f in listoffiles if '.pickle' in f and 'UpdatedKeypoints' in f or 'Super' in f], key=os.path.getctime)
LogText('Keypoints loaded from :'+(str(path_to_keypoints)),experiment_name,log_path)
return path_to_keypoints
def GetPathsTrainSecondStep(experiment_name,log_path):
CheckPointDirectory=GetCheckPointsPath(experiment_name,log_path)
listoffiles=list(str(f.resolve()) for f in CheckPointDirectory.glob('*'))
path_to_checkpoint=max([f for f in listoffiles if '.pth' in f and 'FirstStep' in f ], key=os.path.getctime)
path_to_keypoints=max([f for f in listoffiles if '.pickle' in f and 'UpdatedKeypoints' in f], key=os.path.getctime)
LogText('Keypoints loaded from :'+(str(path_to_keypoints)),experiment_name,log_path)
LogText('Checkpoint loaded from :'+(str(path_to_checkpoint)),experiment_name,log_path)
return path_to_checkpoint,path_to_keypoints
def GetPathsResumeFirstStep(experiment_name,log_path):
CheckPointDirectory=GetCheckPointsPath(experiment_name,log_path)
listoffiles=list(str(f.resolve()) for f in CheckPointDirectory.glob('*'))
path_to_keypoints=max([f for f in listoffiles if '.pickle' in f and ('SuperPoint' in f or 'Updated' in f) ] , key=os.path.getctime)
sortedlistoffiles=sorted(listoffiles,key=os.path.getctime)
indexofcheckpoint=sortedlistoffiles.index(path_to_keypoints)
sortedlistoffiles=sortedlistoffiles[:indexofcheckpoint]
try:
path_to_checkpoint=max([f for f in sortedlistoffiles if '.pth' in f and 'FirstStep' in f ], key=os.path.getctime)
except:
path_to_checkpoint=None
LogText('Checkpoint was not found',experiment_name,log_path)
LogText('Keypoints loaded from :'+(str(path_to_keypoints)),experiment_name,log_path)
LogText('Checkpoint loaded from :'+(str(path_to_checkpoint)),experiment_name,log_path)
return path_to_checkpoint,path_to_keypoints
def GetPathsResumeSecondStep(experiment_name,log_path):
CheckPointDirectory=GetCheckPointsPath(experiment_name,log_path)
listoffiles=list(str(f.resolve()) for f in CheckPointDirectory.glob('*'))
sortedlistoffiles=sorted(listoffiles,key=os.path.getctime)
path_to_checkpoint=max([f for f in listoffiles if '.pth' in f and 'SecondStep' in f], key=os.path.getctime)
indexofcheckpoint=sortedlistoffiles.index(path_to_checkpoint)
sortedlistoffiles=sortedlistoffiles[:indexofcheckpoint]
path_to_keypoints=max([f for f in sortedlistoffiles if '.pickle' in f and ('Merged' in f or 'Updated' in f) ] , key=os.path.getctime)
LogText('Keypoints loaded from :'+(str(path_to_keypoints)),experiment_name,log_path)
LogText('Checkpoint loaded from :'+(str(path_to_checkpoint)),experiment_name,log_path)
return path_to_checkpoint,path_to_keypoints
def GetPathsEval(experiment_name,log_path):
CheckPointDirectory=GetCheckPointsPath(experiment_name,log_path)
listoffiles=list(str(f.resolve()) for f in CheckPointDirectory.glob('*'))
path_to_checkpoint=max([f for f in listoffiles if '.pth' in f and 'SecondStep' in f], key=os.path.getctime)
LogText('Checkpoint loaded from :'+(str(path_to_checkpoint)),experiment_name,log_path)
return path_to_checkpoint
def initialize_log_dirs(experiment_name,log_path):
CheckPointDirectory=GetCheckPointsPath(experiment_name,log_path)
Experiment_Log_directory=GetLogsPath(experiment_name,log_path)
if not Experiment_Log_directory.exists():
os.makedirs(Experiment_Log_directory)
if not CheckPointDirectory.exists():
os.makedirs(CheckPointDirectory)
def load_keypoints(filename):
checkPointFile=filename
with open(checkPointFile, 'rb') as handle:
Image_Keypoints=pickle.load( handle)
return Image_Keypoints
def Cuda(model):
if torch.cuda.is_available():
return model.cuda()
else:
return model
def CreateFileArray(name,columns):
filename = name+f'.npy'
if os.path.exists(filename):
os.remove(filename)
f = tables.open_file(filename, mode='w')
atom = tables.Float64Atom()
f.create_earray(f.root, 'data', atom, (0, columns))
f.close()
def AppendFileArray(array,name):
filename = name+f'.npy'
f = tables.open_file(filename, mode='a')
f.root.data.append(array)
f.close()
def OpenreadFileArray(name):
filename = name+f'.npy'
f = tables.open_file(filename, mode='r')
a=f.root.data
return a,f
def ClosereadFileArray(f,name):
filename = name+f'.npy'
f.close()
if os.path.exists(filename):
os.remove(filename)
def load_keypoints(filename):
with open(filename, 'rb') as handle:
Image_Keypoints=pickle.load( handle)
return Image_Keypoints
def save_keypoints(Image_Keypoints,filename,experiment):
checkPointFile=checkPointdir+filename
with open(checkPointFile, 'wb') as handle:
pickle.dump(Image_Keypoints, handle, protocol=pickle.HIGHEST_PROTOCOL)
def BuildMultiChannelGaussians(outputChannels,keypoints,resolution=64,size=3):
points = keypoints.copy()
points[:, 0] = points[:, 0] + 1
points[:, 1] = points[:, 1] + 1
numberOfAnnotationsPoints = points.shape[0]
heatMaps=torch.zeros(outputChannels, resolution, resolution)
for i in range(numberOfAnnotationsPoints):
p=np.asarray(points[i])
try:
heatMaps[int(p[2])] = fastDrawGaussian(heatMaps[int(p[2])], p, size)
except:
pass
return heatMaps
def MergeScales(points,thres):
nmsPoints=points.T
newpoints = torch.cat((nmsPoints[0:1, :] - thres, nmsPoints[1:2, :] - thres, nmsPoints[0:1, :] + thres,
nmsPoints[1:2, :] + thres, nmsPoints[2:3, :]), 0).T
res = torchvision.ops.nms(newpoints[:, 0:4], newpoints[:, 4], 0.01)
points = nmsPoints[:, res].T
return points
def BuildGaussians(keypoints,resolution=64,size=1):
points = keypoints.copy()
points[:, 0] = points[:, 0] + 1
points[:, 1] = points[:, 1] + 1
numberOfAnnotationsPoints = points.shape[0]
if (numberOfAnnotationsPoints == 0):
heatMaps=torch.zeros(1, resolution, resolution)
else:
heatMaps = torch.zeros(numberOfAnnotationsPoints, resolution, resolution)
for i in range(numberOfAnnotationsPoints):
p=np.asarray(points[i])
try:
heatMaps[i] = fastDrawGaussian(heatMaps[i], p, size)
except:
pass
heatmap = torch.max(heatMaps, 0)[0]
return heatmap
def MergePoints(current_points,oldPoints):
current_points[:,2]=0.1
oldPoints[:,2]=0.2
thres=1
points_concat=torch.cat((current_points,oldPoints),dim=0)
nmsPoints=points_concat.T
newpoints = torch.cat((nmsPoints[0:1, :] - thres, nmsPoints[1:2, :] - thres, nmsPoints[0:1, :] + thres,
nmsPoints[1:2, :] + thres, nmsPoints[2:3, :]), 0).T
res = torchvision.ops.nms(newpoints[:, 0:4], newpoints[:, 4], 0.01)
points=nmsPoints[:,res].T
return points
def GetBatchMultipleHeatmap(confidenceMap,threshold,NMSthes=1,mode='batchLevel'):
mask=confidenceMap>threshold
prob =confidenceMap[mask]
pred=torch.nonzero(mask)
points = pred[:, 2:4]
points=points.flip(1)
if mode=='clustersLevel':
idx=100*pred[:,0]+pred[:,1]
elif mode=='batchLevel':
idx =pred[:, 0]
nmsPoints=torch.cat((points.float(),prob.unsqueeze(1)),1).T
thres = math.ceil(NMSthes / 2)
newpoints = torch.cat((nmsPoints[0:1, :] - thres, nmsPoints[1:2, :] - thres, nmsPoints[0:1, :] + thres,
nmsPoints[1:2, :] + thres, nmsPoints[2:3, :]), 0).T
res = torchvision.ops.boxes.batched_nms(newpoints[:, 0:4], newpoints[:, 4],idx, 0.01)
p=torch.cat((pred[res,:1].float(),nmsPoints[:,res].T,pred[res,1:2].float()),dim=1)
value, indices = p[:,0].sort()
p=p[indices]
return p
def GetDescriptors(descriptor_volume, points, W, H):
D = descriptor_volume.shape[0]
if points.shape[0] == 0:
descriptors = torch.zeros((0, D))
else:
coarse_desc = descriptor_volume.unsqueeze(0)
samp_pts = points.clone().T
samp_pts[0, :] = (samp_pts[0, :] / (float(W) / 2.)) - 1.
samp_pts[1, :] = (samp_pts[1, :] / (float(H) / 2.)) - 1.
samp_pts = samp_pts.transpose(0, 1).contiguous()
samp_pts = samp_pts.view(1, 1, -1, 2)
samp_pts = samp_pts.float()
densedesc = torch.nn.functional.grid_sample(coarse_desc, samp_pts)
densedesc = densedesc.view(D, -1)
densedesc /= torch.norm(densedesc, dim=0).unsqueeze(0)
descriptors = densedesc.T
return descriptors
def GetPointsFromHeatmaps(heatmapOutput):
# get max for each batch sample
keypoints = torch.zeros(heatmapOutput.size(0), 4)
val, idx = torch.max(heatmapOutput.view(heatmapOutput.shape[0], -1), 1)
keypoints[:, 2] = val
keypoints[:, :2] = idx.view(idx.size(0), 1).repeat(1, 1, 2).float()
keypoints[..., 0] = (keypoints[..., 0] - 1) % heatmapOutput.size(2) + 1
keypoints[..., 1] = keypoints[..., 1].add_(-1).div_(heatmapOutput.size(1)).floor()
keypoints[:, 3] = torch.arange(heatmapOutput.size(0))
keypoints[:, :2] = 4 * keypoints[:, :2]
return keypoints
def fastDrawGaussian(img,pt,size):
if (size == 3):
g = gaussian3
elif (size == 1):
g = gaussian1
s = 1
ul = torch.tensor([[math.floor(pt[0] - s)], [math.floor(pt[1] -s)]])
br = torch.tensor([[math.floor(pt[0] + s)], [math.floor(pt[1] +s)]])
if (ul[0] > img.shape[1] or ul[1] > img.shape[0] or br[0] < 1 or br[1] < 1):
return img
g_x = torch.tensor([[max(1, -ul[0])], [min(br[0], img.shape[1]) - max(1, ul[0]) + max(1, -ul[0])]])
g_y = torch.tensor([[max(1, -ul[1])], [min(br[1], img.shape[0]) - max(1, ul[1]) + max(1, -ul[1])]])
img_x = torch.tensor([[max(1, ul[0])], [min(br[0], img.shape[1])]])
img_y = torch.tensor([[max(1, ul[1])], [min(br[1], img.shape[0])]])
assert (g_x[0] > 0 and g_y[0] > 0)
img[int(img_y[0])-1:int(img_y[1]), int(img_x[0])-1:int(img_x[1])] += g[int(g_y[0])-1:int(g_y[1]), int(g_x[0])-1:int(g_x[1])]
return img
def gaussian(size=3, sigma=0.25, amplitude=1, normalize=False, width=None,
height=None, sigma_horz=None, sigma_vert=None, mean_horz=0.5, mean_vert=0.5):
# handle some defaults
if width is None:
width = size
if height is None:
height = size
if sigma_horz is None:
sigma_horz = sigma
if sigma_vert is None:
sigma_vert = sigma
center_x = mean_horz * width + 0.5
center_y = mean_vert * height + 0.5
gauss = np.empty((height, width), dtype=np.float32)
# generate kernel
for i in range(height):
for j in range(width):
gauss[i][j] = amplitude * math.exp(-(math.pow((j + 1 - center_x) / (sigma_horz * width), 2) / 2.0 + math.pow(
(i + 1 - center_y) / (sigma_vert * height), 2) / 2.0))
if normalize:
gauss = gauss / np.sum(gauss)
return gauss
gaussian3=torch.tensor([[0.16901332, 0.41111228, 0.16901332],
[0.41111228, 1. , 0.41111228],
[0.16901332, 0.41111228, 0.16901332]])
gaussian1=torch.tensor([[0.0, 0.0, 0.0],
[0.0, 1. , 0.0],
[0.0, 0.0, 0.0]])
colorlist = [
"#ffdd41",
"#0043db",
"#62ef00",
"#ff34ff",
"#00ff5e",
"#ef00de",
"#00bd00",
"#8f00c3",
"#e5f700",
"#a956ff",
"#4bba00",
"#ee00c2",
"#4cbb0e",
"#ff00c2",
"#00ffa8",
"#fe60f9",
"#55b200",
"#0052e5",
"#ffe000",
"#001e96",
"#f1e215",
"#336dff",
"#e9d800",
"#6056e7",
"#ffd910",
"#0070ff",
"#8cbb00",
"#0041c2",
"#61bf2c",
"#2a007b",
"#00b64b",
"#8237c0",
"#00c87c",
"#750091",
"#00ffd3",
"#f50000",
"#00ffff",
"#ff003a",
"#00ffff",
"#d40000",
"#00ffff",
"#c70000",
"#00ffff",
"#cb0008",
"#00ffff",
"#ed4000",
"#00ffff",
"#ff005d",
"#00e8c5",
"#d20022",
"#00ffff",
"#b00000",
"#00f2f6",
"#d50031",
"#00edd2",
"#ce004e",
"#00c47f",
"#7f56e3",
"#ffa900",
"#0046c7",
"#d0b325",
"#001175",
"#ff8600",
"#0080fb",
"#ca9700",
"#0060d9",
"#ac9a00",
"#006de2",
"#f2a533",
"#0088ff",
"#d74900",
"#0066d8",
"#618a00",
"#d679ff",
"#077200",
"#ff88ff",
"#008a2d",
"#590077",
"#00d19a",
"#c8005b",
"#00f0e1",
"#ac000f",
"#00f0ff",
"#a10000",
"#00ecff",
"#c12d0c",
"#00e9ff",
"#d34f00",
"#005fd0",
"#6a8600",
"#0060cd",
"#c67900",
"#0066d1",
"#9b8700",
"#210052",
"#ffe585",
"#1a0045",
"#00b87b",
"#d665cf",
"#005600",
"#ff97ff",
"#005100",
"#ff9dff",
"#005100",
"#e685e5",
"#1a5800",
"#ffa6ff",
"#004900",
"#ff6eb9",
"#00540e",
"#d49dff",
"#004500",
"#ffb1ff",
"#004300",
"#dd337b",
"#00e3d0",
"#940000",
"#00e8ff",
"#ae191d",
"#00e3f8",
"#860000",
"#70faff",
"#860000",
"#69f6fa",
"#790000",
"#00d6ff",
"#a21221",
"#00d6e0",
"#bf1b3d",
"#00d5dd",
"#780002",
"#00d3ff",
"#730000",
"#00d2ff",
"#ac4f00",
"#00a6ff",
"#f37e3b",
"#0081e3",
"#496000",
"#533d9a",
"#138b53",
"#7c005e",
"#00814b",
"#60005b",
"#005919",
"#ffb7ff",
"#004000",
"#ffb9ff",
"#003800",
"#cca8ff",
"#003200",
"#ff7bab",
"#003200",
"#ff7ca1",
"#002f00",
"#ff84a8",
"#002900",
"#cebbff",
"#324d00",
"#0094f1",
"#765400",
"#00aaff",
"#693600",
"#00adff",
"#6b4400",
"#0094eb",
"#595900",
"#006bc4",
"#dbb670",
"#001f6a",
"#ffc49b",
"#000432",
"#eef6e3",
"#230028",
"#bef9ff",
"#a90041",
"#00bfc1",
"#85002e",
"#63dbe9",
"#750024",
"#00c7ff",
"#480900",
"#21c6ff",
"#4c1400",
"#00bdff",
"#655400",
"#0096e6",
"#ffa078",
"#001d57",
"#ffac96",
"#382b7f",
"#374900",
"#0091e2",
"#3f4900",
"#00bdff",
"#360000",
"#6adcff",
"#350000",
"#61d9ff",
"#6c002f",
"#00c4f8",
"#813529",
"#7cdbfd",
"#1e0a00",
"#ede4ff",
"#001400",
"#ece4ff",
"#001b00",
"#ffd9ef",
"#002900",
"#e1e1ff",
"#002500",
"#aeadee",
"#002400",
"#eaa6a5",
"#002200",
"#b0709f",
"#004822",
"#003a7d",
"#a4965a",
"#004b86",
"#8c7d42",
"#005b99",
"#483000",
"#008cc3",
"#9a633e",
"#005083",
"#a68c61",
"#001e3d",
"#82764a",
"#003054",
"#1c3100",
"#4d2c57",
"#009b9e",
"#2b1500",
"#00929e",
"#441c30",
"#127c72",
"#261700",
"#005b88",
"#2a2800",
"#00456f",
"#003c19",
"#7f6788",
"#062300",
"#505880",
"#004c29",
"#00496e",
"#2b2500",
"#628687",
"#020d14",
"#005e6b",
"#271b00",
"#00546d",
"#1d1617",
"#264133",
"#252737",
"#002f24",
"#002f3d",
"#001919"
] | [
"os.remove",
"pickle.dump",
"numpy.sum",
"numpy.empty",
"torch.cat",
"os.path.isfile",
"pathlib.Path",
"pickle.load",
"torch.nn.functional.grid_sample",
"math.pow",
"os.path.exists",
"tables.Float64Atom",
"torch.zeros",
"datetime.datetime.now",
"math.ceil",
"torch.norm",
"numpy.asarr... | [((12861, 12987), 'torch.tensor', 'torch.tensor', (['[[0.16901332, 0.41111228, 0.16901332], [0.41111228, 1.0, 0.41111228], [\n 0.16901332, 0.41111228, 0.16901332]]'], {}), '([[0.16901332, 0.41111228, 0.16901332], [0.41111228, 1.0, \n 0.41111228], [0.16901332, 0.41111228, 0.16901332]])\n', (12873, 12987), False, 'import torch\n'), ((13016, 13081), 'torch.tensor', 'torch.tensor', (['[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]])\n', (13028, 13081), False, 'import torch\n'), ((408, 462), 'os.path.isfile', 'os.path.isfile', (["paths['path_to_superpoint_checkpoint']"], {}), "(paths['path_to_superpoint_checkpoint'])\n", (422, 462), False, 'import os\n'), ((1137, 1151), 'pathlib.Path', 'Path', (['log_path'], {}), '(log_path)\n', (1141, 1151), False, 'from pathlib import Path\n'), ((1308, 1322), 'pathlib.Path', 'Path', (['log_path'], {}), '(log_path)\n', (1312, 1322), False, 'from pathlib import Path\n'), ((5436, 5461), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5459, 5461), False, 'import torch\n'), ((5594, 5618), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (5608, 5618), False, 'import os\n'), ((5656, 5692), 'tables.open_file', 'tables.open_file', (['filename'], {'mode': '"""w"""'}), "(filename, mode='w')\n", (5672, 5692), False, 'import tables\n'), ((5704, 5724), 'tables.Float64Atom', 'tables.Float64Atom', ([], {}), '()\n', (5722, 5724), False, 'import tables\n'), ((5865, 5901), 'tables.open_file', 'tables.open_file', (['filename'], {'mode': '"""a"""'}), "(filename, mode='a')\n", (5881, 5901), False, 'import tables\n'), ((6013, 6049), 'tables.open_file', 'tables.open_file', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (6029, 6049), False, 'import tables\n'), ((6165, 6189), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (6179, 6189), False, 'import os\n'), ((6836, 6887), 'torch.zeros', 'torch.zeros', (['outputChannels', 'resolution', 'resolution'], {}), '(outputChannels, resolution, resolution)\n', (6847, 6887), False, 'import torch\n'), ((7472, 7533), 'torchvision.ops.nms', 'torchvision.ops.nms', (['newpoints[:, 0:4]', 'newpoints[:, 4]', '(0.01)'], {}), '(newpoints[:, 0:4], newpoints[:, 4], 0.01)\n', (7491, 7533), False, 'import torchvision\n'), ((8358, 8403), 'torch.cat', 'torch.cat', (['(current_points, oldPoints)'], {'dim': '(0)'}), '((current_points, oldPoints), dim=0)\n', (8367, 8403), False, 'import torch\n'), ((8630, 8691), 'torchvision.ops.nms', 'torchvision.ops.nms', (['newpoints[:, 0:4]', 'newpoints[:, 4]', '(0.01)'], {}), '(newpoints[:, 0:4], newpoints[:, 4], 0.01)\n', (8649, 8691), False, 'import torchvision\n'), ((8897, 8916), 'torch.nonzero', 'torch.nonzero', (['mask'], {}), '(mask)\n', (8910, 8916), False, 'import torch\n'), ((9164, 9186), 'math.ceil', 'math.ceil', (['(NMSthes / 2)'], {}), '(NMSthes / 2)\n', (9173, 9186), False, 'import math\n'), ((9386, 9471), 'torchvision.ops.boxes.batched_nms', 'torchvision.ops.boxes.batched_nms', (['newpoints[:, 0:4]', 'newpoints[:, 4]', 'idx', '(0.01)'], {}), '(newpoints[:, 0:4], newpoints[:, 4], idx, 0.01\n )\n', (9419, 9471), False, 'import torchvision\n'), ((12426, 12469), 'numpy.empty', 'np.empty', (['(height, width)'], {'dtype': 'np.float32'}), '((height, width), dtype=np.float32)\n', (12434, 12469), True, 'import numpy as np\n'), ((5111, 5148), 'os.makedirs', 'os.makedirs', (['Experiment_Log_directory'], {}), '(Experiment_Log_directory)\n', (5122, 5148), False, 'import os\n'), ((5199, 5231), 'os.makedirs', 'os.makedirs', (['CheckPointDirectory'], {}), '(CheckPointDirectory)\n', (5210, 5231), False, 'import os\n'), ((5363, 5382), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (5374, 5382), False, 'import pickle\n'), ((5628, 5647), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (5637, 5647), False, 'import os\n'), ((6199, 6218), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (6208, 6218), False, 'import os\n'), ((6316, 6335), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (6327, 6335), False, 'import pickle\n'), ((6520, 6590), 'pickle.dump', 'pickle.dump', (['Image_Keypoints', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(Image_Keypoints, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (6531, 6590), False, 'import pickle\n'), ((6946, 6967), 'numpy.asarray', 'np.asarray', (['points[i]'], {}), '(points[i])\n', (6956, 6967), True, 'import numpy as np\n'), ((7291, 7437), 'torch.cat', 'torch.cat', (['(nmsPoints[0:1, :] - thres, nmsPoints[1:2, :] - thres, nmsPoints[0:1, :] +\n thres, nmsPoints[1:2, :] + thres, nmsPoints[2:3, :])', '(0)'], {}), '((nmsPoints[0:1, :] - thres, nmsPoints[1:2, :] - thres, nmsPoints[\n 0:1, :] + thres, nmsPoints[1:2, :] + thres, nmsPoints[2:3, :]), 0)\n', (7300, 7437), False, 'import torch\n'), ((7850, 7888), 'torch.zeros', 'torch.zeros', (['(1)', 'resolution', 'resolution'], {}), '(1, resolution, resolution)\n', (7861, 7888), False, 'import torch\n'), ((7918, 7980), 'torch.zeros', 'torch.zeros', (['numberOfAnnotationsPoints', 'resolution', 'resolution'], {}), '(numberOfAnnotationsPoints, resolution, resolution)\n', (7929, 7980), False, 'import torch\n'), ((8038, 8059), 'numpy.asarray', 'np.asarray', (['points[i]'], {}), '(points[i])\n', (8048, 8059), True, 'import numpy as np\n'), ((8186, 8208), 'torch.max', 'torch.max', (['heatMaps', '(0)'], {}), '(heatMaps, 0)\n', (8195, 8208), False, 'import torch\n'), ((8449, 8595), 'torch.cat', 'torch.cat', (['(nmsPoints[0:1, :] - thres, nmsPoints[1:2, :] - thres, nmsPoints[0:1, :] +\n thres, nmsPoints[1:2, :] + thres, nmsPoints[2:3, :])', '(0)'], {}), '((nmsPoints[0:1, :] - thres, nmsPoints[1:2, :] - thres, nmsPoints[\n 0:1, :] + thres, nmsPoints[1:2, :] + thres, nmsPoints[2:3, :]), 0)\n', (8458, 8595), False, 'import torch\n'), ((9203, 9349), 'torch.cat', 'torch.cat', (['(nmsPoints[0:1, :] - thres, nmsPoints[1:2, :] - thres, nmsPoints[0:1, :] +\n thres, nmsPoints[1:2, :] + thres, nmsPoints[2:3, :])', '(0)'], {}), '((nmsPoints[0:1, :] - thres, nmsPoints[1:2, :] - thres, nmsPoints[\n 0:1, :] + thres, nmsPoints[1:2, :] + thres, nmsPoints[2:3, :]), 0)\n', (9212, 9349), False, 'import torch\n'), ((9759, 9778), 'torch.zeros', 'torch.zeros', (['(0, D)'], {}), '((0, D))\n', (9770, 9778), False, 'import torch\n'), ((10167, 10221), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['coarse_desc', 'samp_pts'], {}), '(coarse_desc, samp_pts)\n', (10198, 10221), False, 'import torch\n'), ((12814, 12827), 'numpy.sum', 'np.sum', (['gauss'], {}), '(gauss)\n', (12820, 12827), True, 'import numpy as np\n'), ((10285, 10313), 'torch.norm', 'torch.norm', (['densedesc'], {'dim': '(0)'}), '(densedesc, dim=0)\n', (10295, 10313), False, 'import torch\n'), ((11137, 11158), 'math.floor', 'math.floor', (['(pt[0] - s)'], {}), '(pt[0] - s)\n', (11147, 11158), False, 'import math\n'), ((11162, 11183), 'math.floor', 'math.floor', (['(pt[1] - s)'], {}), '(pt[1] - s)\n', (11172, 11183), False, 'import math\n'), ((11210, 11231), 'math.floor', 'math.floor', (['(pt[0] + s)'], {}), '(pt[0] + s)\n', (11220, 11231), False, 'import math\n'), ((11235, 11256), 'math.floor', 'math.floor', (['(pt[1] + s)'], {}), '(pt[1] + s)\n', (11245, 11256), False, 'import math\n'), ((868, 891), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (889, 891), False, 'import datetime\n'), ((988, 1011), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1009, 1011), False, 'import datetime\n'), ((12616, 12670), 'math.pow', 'math.pow', (['((j + 1 - center_x) / (sigma_horz * width))', '(2)'], {}), '((j + 1 - center_x) / (sigma_horz * width), 2)\n', (12624, 12670), False, 'import math\n'), ((12679, 12734), 'math.pow', 'math.pow', (['((i + 1 - center_y) / (sigma_vert * height))', '(2)'], {}), '((i + 1 - center_y) / (sigma_vert * height), 2)\n', (12687, 12734), False, 'import math\n')] |
import numpy as np
def create_D(Nx, Ny):
diff = np.vstack([np.eye(Nx, Nx, k=0) - np.eye(Nx, Nx, k=-1), np.hstack([np.zeros(Nx - 1), -1])])
D = np.vstack([np.kron(np.eye(Ny), diff), np.kron(diff, np.eye(Nx))])
return D
def laplacian(Nx, Ny):
D = create_D(Nx, Ny)
return D.T.dot(D)
| [
"numpy.eye",
"numpy.zeros"
] | [((65, 84), 'numpy.eye', 'np.eye', (['Nx', 'Nx'], {'k': '(0)'}), '(Nx, Nx, k=0)\n', (71, 84), True, 'import numpy as np\n'), ((87, 107), 'numpy.eye', 'np.eye', (['Nx', 'Nx'], {'k': '(-1)'}), '(Nx, Nx, k=-1)\n', (93, 107), True, 'import numpy as np\n'), ((172, 182), 'numpy.eye', 'np.eye', (['Ny'], {}), '(Ny)\n', (178, 182), True, 'import numpy as np\n'), ((205, 215), 'numpy.eye', 'np.eye', (['Nx'], {}), '(Nx)\n', (211, 215), True, 'import numpy as np\n'), ((120, 136), 'numpy.zeros', 'np.zeros', (['(Nx - 1)'], {}), '(Nx - 1)\n', (128, 136), True, 'import numpy as np\n')] |
import numpy as np
import cProfile
def list_add_two(l, iterations):
for _ in range(iterations):
l = [i + 2 for i in l]
return l
def array_add_two(a, iterations):
for _ in range(iterations):
a = a + 2
return a
def test():
my_list = list(range(1000000))
my_array = np.array(my_list)
iterations = 100
list_add_two(my_list, iterations)
array_add_two(my_array, iterations)
cProfile.run('test()') | [
"numpy.array",
"cProfile.run"
] | [((429, 451), 'cProfile.run', 'cProfile.run', (['"""test()"""'], {}), "('test()')\n", (441, 451), False, 'import cProfile\n'), ((309, 326), 'numpy.array', 'np.array', (['my_list'], {}), '(my_list)\n', (317, 326), True, 'import numpy as np\n')] |
import numpy as np
from cost_functions import trajectory_cost_fn
import time
class Controller():
def __init__(self):
pass
# Get the appropriate action(s) for this state(s)
def get_action(self, state):
pass
class RandomController(Controller):
def __init__(self, env):
self.env = env
def get_action(self, state):
return self.env.action_space.sample()
class MPCcontroller(Controller):
""" Controller built using the MPC method outlined in https://arxiv.org/abs/1708.02596 """
def __init__(self,
env,
dyn_model,
horizon=5,
cost_fn=None,
num_simulated_paths=10,
):
self.env = env
self.dyn_model = dyn_model
self.horizon = horizon
self.cost_fn = cost_fn
self.num_simulated_paths = num_simulated_paths
def get_action(self, state):
obs, obs_list, obs_next_list, act_list = [], [], [], []
[obs.append(state) for _ in range(self.num_simulated_paths)]
for _ in range(self.horizon):
obs_list.append(obs)
# get random actions
actions = []
[actions.append(self.env.action_space.sample()) for _ in range(self.num_simulated_paths)]
act_list.append(actions)
obs = self.dyn_model.predict(np.array(obs), np.array(actions))
obs_next_list.append(obs)
trajectory_cost_list = trajectory_cost_fn(self.cost_fn, np.array(obs_list), np.array(act_list), np.array(obs_next_list))
j = np.argmin(trajectory_cost_list)
return act_list[0][j]
| [
"numpy.array",
"numpy.argmin"
] | [((1383, 1414), 'numpy.argmin', 'np.argmin', (['trajectory_cost_list'], {}), '(trajectory_cost_list)\n', (1392, 1414), True, 'import numpy as np\n'), ((1302, 1320), 'numpy.array', 'np.array', (['obs_list'], {}), '(obs_list)\n', (1310, 1320), True, 'import numpy as np\n'), ((1322, 1340), 'numpy.array', 'np.array', (['act_list'], {}), '(act_list)\n', (1330, 1340), True, 'import numpy as np\n'), ((1342, 1365), 'numpy.array', 'np.array', (['obs_next_list'], {}), '(obs_next_list)\n', (1350, 1365), True, 'import numpy as np\n'), ((1180, 1193), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (1188, 1193), True, 'import numpy as np\n'), ((1195, 1212), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (1203, 1212), True, 'import numpy as np\n')] |
"""
Run this script if you wish to save the images for any further use
and not load it as a MAT file
"""
import h5py
import os
import numpy as np
import cv2
matPath = './data/nyu_depth_v2_labeled.mat'
img_folder = 'imgs'
dep_folder = 'deps'
if not os.path.exists(img_folder):
os.makedirs(img_folder)
if not os.path.exists(dep_folder):
os.makedirs(dep_folder)
f = h5py.File(matPath)
img_dim = 224
def save_image_dep(image_id):
i = image_id
print("Processing "+i)
img = f['images'][i]
depth = f['depths'][i]
img_=np.empty([img_dim,img_dim,3])
img_[:,:,0] = cv2.resize(img[2,:,:].T,(img_dim,img_dim))
img_[:,:,1] = cv2.resize(img[1,:,:].T,(img_dim,img_dim))
img_[:,:,2] = cv2.resize(img[0,:,:].T,(img_dim,img_dim))
depth_ = np.empty([img_dim, img_dim, 3])
depth_[:,:,0] = cv2.resize(depth[:,:].T,(img_dim,img_dim))
depth_[:,:,1] = cv2.resize(depth[:,:].T,(img_dim,img_dim))
depth_[:,:,2] = cv2.resize(depth[:,:].T,(img_dim,img_dim))
img_ = img_#/255.0
print (np.amax(depth_))
depth_ = 255.*cv2.normalize(depth_, 0, 255, cv2.NORM_MINMAX)
cv2.imwrite(os.path.join(img_folder,'img_{}.jpg'.format(i)), img_)
cv2.imwrite(os.path.join(dep_folder,'dep_{}.jpg'.format(i)), depth_)
map(save_image_dep, range(len(f['images'])))
| [
"h5py.File",
"os.makedirs",
"numpy.empty",
"os.path.exists",
"numpy.amax",
"cv2.normalize",
"cv2.resize"
] | [((375, 393), 'h5py.File', 'h5py.File', (['matPath'], {}), '(matPath)\n', (384, 393), False, 'import h5py\n'), ((251, 277), 'os.path.exists', 'os.path.exists', (['img_folder'], {}), '(img_folder)\n', (265, 277), False, 'import os\n'), ((283, 306), 'os.makedirs', 'os.makedirs', (['img_folder'], {}), '(img_folder)\n', (294, 306), False, 'import os\n'), ((314, 340), 'os.path.exists', 'os.path.exists', (['dep_folder'], {}), '(dep_folder)\n', (328, 340), False, 'import os\n'), ((346, 369), 'os.makedirs', 'os.makedirs', (['dep_folder'], {}), '(dep_folder)\n', (357, 369), False, 'import os\n'), ((545, 576), 'numpy.empty', 'np.empty', (['[img_dim, img_dim, 3]'], {}), '([img_dim, img_dim, 3])\n', (553, 576), True, 'import numpy as np\n'), ((593, 639), 'cv2.resize', 'cv2.resize', (['img[2, :, :].T', '(img_dim, img_dim)'], {}), '(img[2, :, :].T, (img_dim, img_dim))\n', (603, 639), False, 'import cv2\n'), ((654, 700), 'cv2.resize', 'cv2.resize', (['img[1, :, :].T', '(img_dim, img_dim)'], {}), '(img[1, :, :].T, (img_dim, img_dim))\n', (664, 700), False, 'import cv2\n'), ((715, 761), 'cv2.resize', 'cv2.resize', (['img[0, :, :].T', '(img_dim, img_dim)'], {}), '(img[0, :, :].T, (img_dim, img_dim))\n', (725, 761), False, 'import cv2\n'), ((771, 802), 'numpy.empty', 'np.empty', (['[img_dim, img_dim, 3]'], {}), '([img_dim, img_dim, 3])\n', (779, 802), True, 'import numpy as np\n'), ((823, 868), 'cv2.resize', 'cv2.resize', (['depth[:, :].T', '(img_dim, img_dim)'], {}), '(depth[:, :].T, (img_dim, img_dim))\n', (833, 868), False, 'import cv2\n'), ((886, 931), 'cv2.resize', 'cv2.resize', (['depth[:, :].T', '(img_dim, img_dim)'], {}), '(depth[:, :].T, (img_dim, img_dim))\n', (896, 931), False, 'import cv2\n'), ((949, 994), 'cv2.resize', 'cv2.resize', (['depth[:, :].T', '(img_dim, img_dim)'], {}), '(depth[:, :].T, (img_dim, img_dim))\n', (959, 994), False, 'import cv2\n'), ((1026, 1041), 'numpy.amax', 'np.amax', (['depth_'], {}), '(depth_)\n', (1033, 1041), True, 'import numpy as np\n'), ((1061, 1107), 'cv2.normalize', 'cv2.normalize', (['depth_', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(depth_, 0, 255, cv2.NORM_MINMAX)\n', (1074, 1107), False, 'import cv2\n')] |
#!/usr/bin/env python3
# vim: set fileencoding=utf-8 :
"""Computes the bounding sphere around sets of points."""
import argparse
from collections import Counter, OrderedDict
from itertools import chain, combinations
import logging
import os
# import miniball
import numpy as np
import pandas as pd
from cc_emergency.utils.vector_io import read_vectors
from cc_emergency.utils.vectors import angular_distance
def parse_arguments():
parser = argparse.ArgumentParser(
description='Computes the bounding sphere around sets of points.')
parser.add_argument('vector_file', help='the word vector file.')
parser.add_argument('--bev', '-b', action='append', default=[],
help='the BEV list file(s) (one word per line).')
parser.add_argument('--subsets', '-s', action='store_true',
help='also compute the statistics for the various '
'combinations and subsets of the specified '
'lists: intersections and items unique to a set.')
parser.add_argument('--write-real', '-w', action='append', default=[],
help='Write the points that "really belong" to these '
'sets (i.e. they are in the diagonals of the confusion '
'matrix) to file.')
parser.add_argument('--log-level', '-L', type=str, default='critical',
choices=['debug', 'info', 'warning', 'error', 'critical'],
help='the logging level.')
args = parser.parse_args()
if len(args.bev) == 0:
parser.error('At least one list file should be specified.')
if args.subsets and len(args.bev) == 1:
parser.error('--subsets doesn\'t make sense if there is only one list.')
return args
def filename_to_set(file_name):
"""Converts the file name to a set name."""
name = os.path.basename(file_name)
n, _, ext = name.rpartition('.')
return n if len(ext) == 3 else name
def read_stuff(vector_file, bev_files, normalize):
sets = OrderedDict()
if bev_files:
for bev_file in bev_files:
with open(bev_file) as inf:
sets[filename_to_set(bev_file)] = set(inf.read().strip().split('\n'))
words, vectors = read_vectors(
vector_file, normalize, keep_words=set(chain(*sets.values())))
return words, vectors, sets
def centroid_distribution(vectors):
"""
Momentums of a cluster based on the distances of its points from the
centriod.
"""
centroid = vectors.mean(axis=0)
centroid /= np.linalg.norm(centroid)
dists = np.squeeze(angular_distance(centroid[np.newaxis, :], vectors))
dists_mean = dists.mean()
dists_std = dists.std()
return OrderedDict([
('centroid', centroid),
('max', dists.max()),
('mean', dists_mean),
('std', dists_std),
('p_in_std', 100 * np.sum(
np.logical_and(dists_mean - dists_std < dists,
dists < dists_mean + dists_std)) / len(dists)),
('num_words', len(dists))
])
# def bounding_sphere(vectors):
# """Computes the bounding sphere of the vectors."""
# mb = miniball.Miniball(vectors)
# mb_center = mb.center()
# dists = np.squeeze(angular_distance(np.array(mb_center)[np.newaxis, :], vectors))
# dists_mean = dists.mean()
# dists_std = dists.std()
# return OrderedDict([
# ('center', mb_center),
# ('radius', np.sqrt(mb.squared_radius())),
# ('bs_max', dists.max()),
# ('bs_mean', dists_mean),
# ('bs_std', dists_std),
# ('bs_pinstd', np.sum(np.logical_and(dists_mean - dists_std < dists,
# dists < dists_mean + dists_std)) / len(dists))
# ])
def generate_subsets(set_indices, words):
unique_indices = {
s + '-unique': [i for i in indices if i not in
set(chain(*[vs for k, vs in set_indices.items() if k != s]))]
for s, indices in set_indices.items()
}
if len(set_indices) > 2:
common_indices = {
s + '-common': [i for i in indices if i not in unique_indices[s + '-unique']]
for s, indices in set_indices.items()
}
else:
common_indices = {}
intersections = {
'{}-{}'.format(s1, s2):
sorted(set(set_indices[s1]) & set(set_indices[s2]))
for s1, s2 in combinations(sorted(set_indices.keys()), 2)
}
intersections = {k: v for k, v in intersections.items() if v}
set_indices.update(unique_indices)
set_indices.update(common_indices)
set_indices.update(intersections)
def main():
args = parse_arguments()
logging.basicConfig(level=getattr(logging, args.log_level.upper()),
format='%(asctime)s - %(levelname)s - %(message)s')
words, vectors, sets = read_stuff(args.vector_file, args.bev, True)
vectors = np.asarray(vectors) # Miniball doesn't work on matrices
set_indices = OrderedDict((s, []) for s in sets.keys())
orig_sets = list(set_indices.keys())
print(orig_sets)
for i, word in enumerate(words):
for s, swords in sets.items():
if word in swords:
set_indices[s].append(i)
if args.subsets:
generate_subsets(set_indices, words)
centroids = np.zeros((len(orig_sets), vectors.shape[1]), dtype=vectors.dtype)
for s, indices in set_indices.items():
v = vectors[indices]
logging.debug('Computing centroid stats for set {}'.format(s))
stats = centroid_distribution(v)
logging.debug('Done computing centroid stats for set {}'.format(s))
# logging.debug('Computing bounding sphere for set {}'.format(s))
# bstats = bounding_sphere(v)
# logging.debug('Computed bounding sphere for set {}'.format(s))
centroid = stats.pop('centroid')
if s in orig_sets:
centroids[orig_sets.index(s)] = centroid
# center = bstats.pop('center')
# stats.update(bstats)
# stats['cdist'] = angular_distance(
# np.array([centroid, center / np.linalg.norm(center)]))[0, 1]
print('Stats for {}:\n {}'.format(
s, '\n '.join(': '.join(map(str, kv)) for kv in stats.items())))
# Which centroids do the points lie closest?
closest_centroid = angular_distance(centroids, vectors).argmin(axis=0)
closest_matrix = np.zeros((len(orig_sets), len(orig_sets)), dtype=int)
for i, s in enumerate(orig_sets):
for k, v in Counter(closest_centroid[set_indices[s]]).items():
closest_matrix[i, k] = v
closest_table = pd.DataFrame(data=closest_matrix, index=orig_sets,
columns=['->' + s for s in orig_sets])
print('Confusion matrix based on centroids:')
print(closest_table)
for set_to_write in args.write_real:
try:
c = orig_sets.index(set_to_write)
with open('real_' + set_to_write, 'wt') as outf:
words_to_print = [
words[i]
for i in set_indices[set_to_write]
if closest_centroid[i] == c
]
print('\n'.join(sorted(words_to_print)), file=outf)
except ValueError:
logging.warning('No such set: {}'.format(set_to_write))
if __name__ == '__main__':
main()
| [
"pandas.DataFrame",
"argparse.ArgumentParser",
"numpy.logical_and",
"os.path.basename",
"cc_emergency.utils.vectors.angular_distance",
"numpy.asarray",
"numpy.linalg.norm",
"collections.OrderedDict",
"collections.Counter"
] | [((450, 545), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Computes the bounding sphere around sets of points."""'}), "(description=\n 'Computes the bounding sphere around sets of points.')\n", (473, 545), False, 'import argparse\n'), ((1903, 1930), 'os.path.basename', 'os.path.basename', (['file_name'], {}), '(file_name)\n', (1919, 1930), False, 'import os\n'), ((2072, 2085), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2083, 2085), False, 'from collections import Counter, OrderedDict\n'), ((2596, 2620), 'numpy.linalg.norm', 'np.linalg.norm', (['centroid'], {}), '(centroid)\n', (2610, 2620), True, 'import numpy as np\n'), ((4964, 4983), 'numpy.asarray', 'np.asarray', (['vectors'], {}), '(vectors)\n', (4974, 4983), True, 'import numpy as np\n'), ((6687, 6782), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'closest_matrix', 'index': 'orig_sets', 'columns': "[('->' + s) for s in orig_sets]"}), "(data=closest_matrix, index=orig_sets, columns=[('->' + s) for\n s in orig_sets])\n", (6699, 6782), True, 'import pandas as pd\n'), ((2644, 2694), 'cc_emergency.utils.vectors.angular_distance', 'angular_distance', (['centroid[np.newaxis, :]', 'vectors'], {}), '(centroid[np.newaxis, :], vectors)\n', (2660, 2694), False, 'from cc_emergency.utils.vectors import angular_distance\n'), ((6394, 6430), 'cc_emergency.utils.vectors.angular_distance', 'angular_distance', (['centroids', 'vectors'], {}), '(centroids, vectors)\n', (6410, 6430), False, 'from cc_emergency.utils.vectors import angular_distance\n'), ((6579, 6620), 'collections.Counter', 'Counter', (['closest_centroid[set_indices[s]]'], {}), '(closest_centroid[set_indices[s]])\n', (6586, 6620), False, 'from collections import Counter, OrderedDict\n'), ((2946, 3024), 'numpy.logical_and', 'np.logical_and', (['(dists_mean - dists_std < dists)', '(dists < dists_mean + dists_std)'], {}), '(dists_mean - dists_std < dists, dists < dists_mean + dists_std)\n', (2960, 3024), True, 'import numpy as np\n')] |
from typing import Sequence, List, Optional
import numpy as np
import torch
from ..basic_typing import Tensor, TensorNCX
from ..transforms.stack import stack
def flip(array: Tensor, axis: int) -> Tensor:
"""
Flip an axis of an array
Args:
array: a :class:`numpy.ndarray` or :class:`torch.Tensor` n-dimensional array
axis: the xis to flip
Returns:
an array with specified axis flipped
"""
if isinstance(array, np.ndarray):
return np.flip(array, axis=axis)
elif isinstance(array, torch.Tensor):
return torch.flip(array, [axis])
else:
raise NotImplementedError()
def transform_batch_random_flip(
array: TensorNCX,
axis: int,
flip_probability: Optional[float] = 0.5,
flip_choices: Sequence[bool] = None) -> TensorNCX:
"""
Randomly flip an image with a given probability
Args:
array: a :class:`numpy.ndarray` or :class:`torch.Tensor` n-dimensional array. Samples are stored on axis 0
axis: the axis to flip
flip_probability: the probability that a sample is flipped
flip_choices: for each sample, `True` or `False` to indicate if the sample is flipped or not
Returns:
an array
"""
if flip_choices is None:
r = np.random.rand(array.shape[0])
flip_choices = r <= flip_probability
else:
assert len(flip_choices) == len(array)
samples = []
for flip_choice, sample in zip(flip_choices, array):
if flip_choice:
samples.append(flip(sample, axis=axis - 1))
else:
samples.append(sample)
return stack(samples)
def transform_batch_random_flip_joint(
arrays: List[TensorNCX],
axis: int,
flip_probability: float = 0.5) -> List[TensorNCX]:
"""
Randomly flip a joint images with a given probability
Args:
arrays: a list of a :class:`numpy.ndarray` or :class:`torch.Tensor` n-dimensional array. Samples for
each array are stored on axis 0
axis: the axis to flip
flip_probability: the probability that a sample is flipped
Returns:
an array
"""
assert isinstance(arrays, list), 'must be a list of arrays'
nb_samples = len(arrays[0])
for a in arrays[1:]:
assert len(a) == nb_samples
r = np.random.rand(nb_samples)
flip_choices = r <= flip_probability
transformed_arrays = [transform_batch_random_flip(a, axis=axis, flip_probability=None, flip_choices=flip_choices) for a in arrays]
return transformed_arrays
| [
"numpy.random.rand",
"torch.flip",
"numpy.flip"
] | [((2347, 2373), 'numpy.random.rand', 'np.random.rand', (['nb_samples'], {}), '(nb_samples)\n', (2361, 2373), True, 'import numpy as np\n'), ((490, 515), 'numpy.flip', 'np.flip', (['array'], {'axis': 'axis'}), '(array, axis=axis)\n', (497, 515), True, 'import numpy as np\n'), ((1298, 1328), 'numpy.random.rand', 'np.random.rand', (['array.shape[0]'], {}), '(array.shape[0])\n', (1312, 1328), True, 'import numpy as np\n'), ((573, 598), 'torch.flip', 'torch.flip', (['array', '[axis]'], {}), '(array, [axis])\n', (583, 598), False, 'import torch\n')] |
""" gradient and hessian readers
"""
import numpy
import autoread as ar
import autoparse.pattern as app
import autoparse.find as apf
def gradient(output_string):
""" read gradient from the output string
"""
# Grab a block of text containing the gradient
block_ptt = ('Molecular gradient' +
app.capturing(app.one_or_more(app.WILDCARD, greedy=False)) +
'Molecular gradient norm')
block = apf.last_capture(block_ptt, output_string)
# Trim the block to start it at the gradient lines
blank_count = 0
for i, line in enumerate(block.splitlines()):
if line.strip() == '':
blank_count += 1
if blank_count == 3:
grad_start = i
break
trim_block = '\n'.join(block.splitlines()[grad_start:])
# Grab the gradient from the trimmed block string
grad = ar.matrix.read(
trim_block,
line_start_ptt=app.LINESPACES.join([
app.LETTER,
app.escape('#') + app.UNSIGNED_INTEGER,
app.maybe(app.UNSIGNED_INTEGER)]))
print(grad)
assert numpy.shape(grad)[1] == 3
return grad
# def hessian(output_string):
# """ read hessian from the output string
# """
# try:
# comp_ptt = app.one_of_these(['X', 'Y', 'Z']) + app.UNSIGNED_INTEGER
# mat = ar.matrix.read(
# output_string,
# start_ptt=(app.escape('The second derivative matrix:') +
# app.lpadded(app.NEWLINE)),
# block_start_ptt=(app.series(comp_ptt, app.LINESPACES) +
# app.padded(app.NEWLINE)),
# line_start_ptt=comp_ptt,
# tril=True)
# except TypeError:
# comp_ptt = app.UNSIGNED_INTEGER
# mat = ar.matrix.read(
# output_string,
# val_ptt=app.EXPONENTIAL_FLOAT_D,
# start_ptt=(
# app.escape('Force constants in Cartesian coordinates:') +
# app.lpadded(app.NEWLINE)),
# block_start_ptt=(app.series(comp_ptt, app.LINESPACES) +
# app.padded(app.NEWLINE)),
# line_start_ptt=comp_ptt,
# tril=True)
#
# mat = [[_cast(apf.replace('d', 'e', dst, case=False)) for dst in row]
# for row in mat]
#
# mat = tuple(map(tuple, mat))
# return mat
| [
"autoparse.pattern.escape",
"autoparse.pattern.one_or_more",
"numpy.shape",
"autoparse.find.last_capture",
"autoparse.pattern.maybe"
] | [((443, 485), 'autoparse.find.last_capture', 'apf.last_capture', (['block_ptt', 'output_string'], {}), '(block_ptt, output_string)\n', (459, 485), True, 'import autoparse.find as apf\n'), ((1115, 1132), 'numpy.shape', 'numpy.shape', (['grad'], {}), '(grad)\n', (1126, 1132), False, 'import numpy\n'), ((340, 383), 'autoparse.pattern.one_or_more', 'app.one_or_more', (['app.WILDCARD'], {'greedy': '(False)'}), '(app.WILDCARD, greedy=False)\n', (355, 383), True, 'import autoparse.pattern as app\n'), ((1053, 1084), 'autoparse.pattern.maybe', 'app.maybe', (['app.UNSIGNED_INTEGER'], {}), '(app.UNSIGNED_INTEGER)\n', (1062, 1084), True, 'import autoparse.pattern as app\n'), ((1001, 1016), 'autoparse.pattern.escape', 'app.escape', (['"""#"""'], {}), "('#')\n", (1011, 1016), True, 'import autoparse.pattern as app\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 20 17:38:34 2020
@author: mlampert
"""
import os
import copy
import numpy as np
import pickle
import pandas
import time as time_module
import flap
import flap_nstx
thisdir = os.path.dirname(os.path.realpath(__file__))
fn = os.path.join(thisdir,"../flap_nstx.cfg")
flap.config.read(file_name=fn)
flap_nstx.register()
from flap_nstx.gpi import calculate_nstx_gpi_avg_frame_velocity, calculate_nstx_gpi_smooth_velocity
from flap_nstx.thomson import flap_nstx_thomson_data, get_nstx_thomson_gradient, get_fit_nstx_thomson_profiles
from matplotlib import ticker
from matplotlib.backends.backend_pdf import PdfPages
def calculate_magnetics_spectrogram(exp_id=None,
time_range=None,
channel=1,
time_res=1e-3,
freq_res=None,
frange=None,
recalc=False,
plot=True,
pdf=False,
pdfobject=None,
):
wd=flap.config.get_all_section('Module NSTX_GPI')['Working directory']
filename=flap_nstx.analysis.filename(exp_id=exp_id,
working_directory=wd+'/processed_data',
time_range=time_range,
comment='magnetic_spectrogram_hf_ch'+str(channel)+'_tr_'+str(time_res)+'_frange_'+str(frange[0])+'_'+str(frange[1]),
extension='pickle')
if not recalc and not os.path.exists(filename):
print('File doesn\'t exist, needs to be calculated!')
recalc=True
if recalc or not os.path.exists(filename):
if freq_res is None:
freq_res=2/time_res
magnetics=flap.get_data('NSTX_MDSPlus',
name='\OPS_PC::\\BDOT_L1DMIVVHF'+str(channel)+'_RAW',
exp_id=139901,
object_name='MIRNOV')
magnetics.coordinates.append(copy.deepcopy(flap.Coordinate(name='Time equi',
unit='s',
mode=flap.CoordinateMode(equidistant=True),
shape = [],
start=magnetics.coordinate('Time')[0][0],
step=magnetics.coordinate('Time')[0][1]-magnetics.coordinate('Time')[0][0],
dimension_list=[0])))
n_time=int((time_range[1]-time_range[0])/time_res)
spectrum=[]
for i in range(n_time-1):
spectrum.append(flap.apsd('MIRNOV',
coordinate='Time equi',
intervals={'Time equi':flap.Intervals(time_range[0]+(i-0.5)*time_res,
time_range[0]+(i+1.5)*time_res)},
options={'Res':freq_res,
'Range':frange,
'Interval':1,
'Trend':None,
'Logarithmic':False,
'Hanning':True},
output_name='MIRNOV_TWIN_APSD').data)
time=np.arange(n_time-1)*time_res+time_range[0]
freq=flap.get_data_object_ref('MIRNOV_TWIN_APSD').coordinate('Frequency')[0]
data=np.asarray(spectrum).T
pickle.dump((time,freq,data), open(filename, 'wb'))
else:
time, freq, data = pickle.load(open(filename, 'rb'))
if plot:
import matplotlib
matplotlib.use('QT5Agg')
import matplotlib.pyplot as plt
else:
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
if pdf:
filename=flap_nstx.analysis.filename(exp_id=exp_id,
working_directory=wd+'/plots',
time_range=time_range,
comment='magnetic_spectrogram_hf_ch'+str(channel)+'_tr_'+str(time_res)+'_frange_'+str(frange[0])+'_'+str(frange[1]),
extension='pdf')
spectrogram_pdf=PdfPages(filename)
plt.figure()
plt.contourf(time,
freq/1000.,
data,
locator=ticker.LogLocator(),
cmap='jet',
levels=101)
plt.title('BDOT_L1DMIVVHF'+str(channel)+' spectrogram for '+str(exp_id)+' with fres '+str(1/time_res/1000.)+'kHz')
plt.xlabel('Time [s]')
plt.ylabel('Frequency [kHz]')
plt.pause(0.001)
if pdf:
spectrogram_pdf.savefig()
spectrogram_pdf.close()
def calculate_elm_db_magnetics_spectrogram(channel=1,
recalc=False,
pdf=True,
plot=True):
database_file='/Users/mlampert/work/NSTX_workspace/db/ELM_findings_mlampert_velocity_good.csv'
db=pandas.read_csv(database_file, index_col=0)
elm_index=list(db.index)
shot_elm={'Shot':[],
'ELM':[]}
for index_elm in range(len(elm_index)):
elm_time=db.loc[index_elm]['ELM time']/1000.
shot=int(db.loc[index_elm]['Shot'])
shot_elm['Shot'].append(shot)
shot_elm['ELM'].append(elm_time)
unique_shots=np.unique(shot_elm['Shot'])
n_shot=unique_shots.shape[0]
ind_step=0.
wd=flap.config.get_all_section('Module NSTX_GPI')['Working directory']
pdfobject=PdfPages(wd+'/plots/all_elm_spectrograms.pdf')
for u_shot in unique_shots:
start_time=time_module.time()
ind=np.where(shot_elm['Shot'] == u_shot)[0]
spec=calculate_magnetics_spectrogram(exp_id=u_shot,
time_range=[shot_elm['ELM'][int(ind[0])]-10e-3,shot_elm['ELM'][int(ind[-1])]+10e-3],
time_res=0.1e-3,
frange=[20e3,1000e3],
channel=channel,
recalc=recalc,
pdf=pdf,
plot=plot,
pdfobject=pdfobject)
pdfobject.savefig()
spec=calculate_magnetics_spectrogram(exp_id=u_shot,
time_range=[shot_elm['ELM'][int(ind[0])]-10e-3,shot_elm['ELM'][int(ind[-1])]+10e-3],
time_res=1e-3,
frange=[2e3,1000e3],
channel=channel,
recalc=recalc,
pdf=pdf,
plot=plot,
pdfobject=pdfobject)
pdfobject.savefig()
print('Remaining time is: '+str((time_module.time()-start_time)*(n_shot-1-ind_step)))
ind_step+=1
pdfobject.close() | [
"matplotlib.backends.backend_pdf.PdfPages",
"pandas.read_csv",
"flap.CoordinateMode",
"matplotlib.pyplot.figure",
"numpy.arange",
"os.path.join",
"numpy.unique",
"flap.config.get_all_section",
"flap.get_data_object_ref",
"os.path.exists",
"flap.Intervals",
"matplotlib.pyplot.pause",
"os.path... | [((298, 339), 'os.path.join', 'os.path.join', (['thisdir', '"""../flap_nstx.cfg"""'], {}), "(thisdir, '../flap_nstx.cfg')\n", (310, 339), False, 'import os\n'), ((339, 369), 'flap.config.read', 'flap.config.read', ([], {'file_name': 'fn'}), '(file_name=fn)\n', (355, 369), False, 'import flap\n'), ((370, 390), 'flap_nstx.register', 'flap_nstx.register', ([], {}), '()\n', (388, 390), False, 'import flap_nstx\n'), ((265, 291), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (281, 291), False, 'import os\n'), ((4698, 4710), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4708, 4710), True, 'import matplotlib.pyplot as plt\n'), ((5014, 5036), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (5024, 5036), True, 'import matplotlib.pyplot as plt\n'), ((5041, 5070), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency [kHz]"""'], {}), "('Frequency [kHz]')\n", (5051, 5070), True, 'import matplotlib.pyplot as plt\n'), ((5075, 5091), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (5084, 5091), True, 'import matplotlib.pyplot as plt\n'), ((5515, 5558), 'pandas.read_csv', 'pandas.read_csv', (['database_file'], {'index_col': '(0)'}), '(database_file, index_col=0)\n', (5530, 5558), False, 'import pandas\n'), ((5880, 5907), 'numpy.unique', 'np.unique', (["shot_elm['Shot']"], {}), "(shot_elm['Shot'])\n", (5889, 5907), True, 'import numpy as np\n'), ((6051, 6099), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (["(wd + '/plots/all_elm_spectrograms.pdf')"], {}), "(wd + '/plots/all_elm_spectrograms.pdf')\n", (6059, 6099), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((1236, 1282), 'flap.config.get_all_section', 'flap.config.get_all_section', (['"""Module NSTX_GPI"""'], {}), "('Module NSTX_GPI')\n", (1263, 1282), False, 'import flap\n'), ((4056, 4080), 'matplotlib.use', 'matplotlib.use', (['"""QT5Agg"""'], {}), "('QT5Agg')\n", (4070, 4080), False, 'import matplotlib\n'), ((4165, 4186), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (4179, 4186), False, 'import matplotlib\n'), ((4675, 4693), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['filename'], {}), '(filename)\n', (4683, 4693), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((5969, 6015), 'flap.config.get_all_section', 'flap.config.get_all_section', (['"""Module NSTX_GPI"""'], {}), "('Module NSTX_GPI')\n", (5996, 6015), False, 'import flap\n'), ((6154, 6172), 'time.time', 'time_module.time', ([], {}), '()\n', (6170, 6172), True, 'import time as time_module\n'), ((1750, 1774), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (1764, 1774), False, 'import os\n'), ((1879, 1903), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (1893, 1903), False, 'import os\n'), ((3855, 3875), 'numpy.asarray', 'np.asarray', (['spectrum'], {}), '(spectrum)\n', (3865, 3875), True, 'import numpy as np\n'), ((4812, 4831), 'matplotlib.ticker.LogLocator', 'ticker.LogLocator', ([], {}), '()\n', (4829, 4831), False, 'from matplotlib import ticker\n'), ((6185, 6221), 'numpy.where', 'np.where', (["(shot_elm['Shot'] == u_shot)"], {}), "(shot_elm['Shot'] == u_shot)\n", (6193, 6221), True, 'import numpy as np\n'), ((3714, 3735), 'numpy.arange', 'np.arange', (['(n_time - 1)'], {}), '(n_time - 1)\n', (3723, 3735), True, 'import numpy as np\n'), ((3770, 3814), 'flap.get_data_object_ref', 'flap.get_data_object_ref', (['"""MIRNOV_TWIN_APSD"""'], {}), "('MIRNOV_TWIN_APSD')\n", (3794, 3814), False, 'import flap\n'), ((2398, 2435), 'flap.CoordinateMode', 'flap.CoordinateMode', ([], {'equidistant': '(True)'}), '(equidistant=True)\n', (2417, 2435), False, 'import flap\n'), ((7576, 7594), 'time.time', 'time_module.time', ([], {}), '()\n', (7592, 7594), True, 'import time as time_module\n'), ((3088, 3183), 'flap.Intervals', 'flap.Intervals', (['(time_range[0] + (i - 0.5) * time_res)', '(time_range[0] + (i + 1.5) * time_res)'], {}), '(time_range[0] + (i - 0.5) * time_res, time_range[0] + (i + \n 1.5) * time_res)\n', (3102, 3183), False, 'import flap\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 2 14:54:37 2021
@author: dv516
"""
from algorithms.Bayesian_opt_Pyro.utilities_full import BayesOpt
from test_functions import rosenbrock_constrained
import numpy as np
import pickle
import pyro
pyro.enable_validation(True) # can help with debugging
def Problem_rosenbrock(x):
f1 = rosenbrock_constrained.rosenbrock_f
g1 = rosenbrock_constrained.rosenbrock_g1
g2 = rosenbrock_constrained.rosenbrock_g2
return f1(x), [g1(x), g2(x)]
def Problem_rosenbrockRand(x):
f1 = rosenbrock_constrained.rosenbrock_f
f_noise = np.random.normal(0, 0.05)
g1 = rosenbrock_constrained.rosenbrock_g1
g2 = rosenbrock_constrained.rosenbrock_g2
g_noise1 = np.random.normal(0, 0.02)
g_noise2 = np.random.normal(0, 0.02)
return f1(x) + f_noise, [g1(x) + g_noise1, g2(x) + g_noise2]
def Problem_rosenbrockSAA(x):
N_SAA = 5
f_SAA = 0
g_SAA1, g_SAA2 = - np.inf, -np.inf
f1 = rosenbrock_constrained.rosenbrock_f
g1 = rosenbrock_constrained.rosenbrock_g1
g2 = rosenbrock_constrained.rosenbrock_g2
for i in range(N_SAA):
f_SAA += (f1(x) + np.random.normal(0, 0.05))/N_SAA
g_SAA1 = max(g1(x) + np.random.normal(0, 0.02), g_SAA1)
g_SAA2 = max(g2(x) + np.random.normal(0, 0.02), g_SAA2)
return f_SAA, [g_SAA1, g_SAA2]
def Problem_rosenbrockNoise(x, noise_std, N_SAA):
f_SAA = 0
g_SAA1, g_SAA2 = - np.inf, -np.inf
f1 = rosenbrock_constrained.rosenbrock_f
g1 = rosenbrock_constrained.rosenbrock_g1
g2 = rosenbrock_constrained.rosenbrock_g2
for i in range(N_SAA):
f_SAA += (f1(x) + np.random.normal(0, noise_std[0]))/N_SAA
g_SAA1 = max(g1(x) + np.random.normal(0, noise_std[1]), g_SAA1)
g_SAA2 = max(g2(x) + np.random.normal(0, noise_std[2]), g_SAA2)
return f_SAA, [g_SAA1, g_SAA2]
bounds = np.array([[-1.5,1.5],[-1.5,1.5]])
x0 = np.array([-0.5,1.5])
# max_f_eval = 100
# max_it = 50
nbr_feval = 30
N = 10
RB_Bayes_list = []
for i in range(N):
Bayes = BayesOpt()
pyro.set_rng_seed(i)
RB_Bayes = Bayes.solve(Problem_rosenbrock, x0, acquisition='EI',bounds=bounds.T, \
print_iteration = True, constraints=2, casadi=True, \
maxfun = nbr_feval, ).output_dict
RB_Bayes_list.append(RB_Bayes)
print('10 BayesOpt deterministic iterations completed')
with open('BayesRB_list.pickle', 'wb') as handle:
pickle.dump(RB_Bayes_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
N = 10
RBRand_Bayes_list = []
for i in range(N):
Bayes = BayesOpt()
pyro.set_rng_seed(i)
RBRand_Bayes = Bayes.solve(Problem_rosenbrockRand, x0, acquisition='EI',bounds=bounds.T, \
print_iteration = True, constraints=2, casadi=True, \
maxfun = nbr_feval, ).output_dict
RBRand_Bayes_list.append(RBRand_Bayes)
print('10 BayesOpt random iterations completed')
with open('BayesRB_listRand.pickle', 'wb') as handle:
pickle.dump(RBRand_Bayes_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
N = 10
RBSAA_Bayes_list = []
for i in range(N):
Bayes = BayesOpt()
pyro.set_rng_seed(i)
RBSAA_Bayes = Bayes.solve(Problem_rosenbrockSAA, x0, acquisition='EI',bounds=bounds.T, \
print_iteration = True, constraints=2, casadi=True, \
maxfun = nbr_feval, ).output_dict
RBSAA_Bayes_list.append(RBSAA_Bayes)
print('10 BayesOpt iterations completed')
with open('BayesRB_listRandSAA.pickle', 'wb') as handle:
pickle.dump(RBSAA_Bayes_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
n_noise = 6
noise_matrix = np.zeros((n_noise, 3))
for i in range(n_noise):
noise_matrix[i] = np.array([0.05/3, 0.02/3, 0.02/3])*i
bounds = np.array([[-1.5,1.5],[-1.5,1.5]])
x0 = np.array([-0.5,1.5])
max_f_eval = 50 ; N_SAA = 1
N_SAA = 1
N_samples = 20
RBnoise_list_Bayes = []
RBconstraint_list_Bayes = []
for i in range(n_noise):
print('Outer Iteration ', i+1, ' out of ', n_noise,' of BayesOpt')
best = []
best_constr = []
for j in range(N_samples):
f = lambda x: Problem_rosenbrockNoise(x, noise_matrix[i], N_SAA)
sol = Bayes.solve(f, x0, acquisition='EI',bounds=bounds.T, \
print_iteration = True, constraints=2, casadi=True, \
maxfun = nbr_feval, ).output_dict
best.append(sol['f_best_so_far'][-1])
_, g = Problem_rosenbrockNoise(sol['x_best_so_far'][-1], [0, 0, 0], N_SAA)
best_constr.append(np.sum(np.maximum(g, 0)))
RBnoise_list_Bayes.append(best)
RBconstraint_list_Bayes.append(best_constr)
with open('BayesRB_listNoiseConv.pickle', 'wb') as handle:
pickle.dump(RBnoise_list_Bayes, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('BayesRB_listNoiseConstr.pickle', 'wb') as handle:
pickle.dump(RBconstraint_list_Bayes, handle, protocol=pickle.HIGHEST_PROTOCOL)
N_SAA = 2
nbr_feval = 30
N_samples = 20
RBnoiseSAA_list_Bayes = []
RBconstraintSAA_list_Bayes = []
for i in range(n_noise):
print('Outer Iteration ', i+1, ' out of ', n_noise,' of BayesOpt')
best = []
best_constr = []
for j in range(N_samples):
f = lambda x: Problem_rosenbrockNoise(x, noise_matrix[i], N_SAA)
sol = Bayes.solve(f, x0, acquisition='EI',bounds=bounds.T, \
print_iteration = True, constraints=2, casadi=True, \
maxfun = nbr_feval, ).output_dict
best.append(sol['f_best_so_far'][-1])
_, g = Problem_rosenbrockNoise(sol['x_best_so_far'][-1], [0, 0, 0], N_SAA)
best_constr.append(np.sum(np.maximum(g, 0)))
RBnoiseSAA_list_Bayes.append(best)
RBconstraintSAA_list_Bayes.append(best_constr)
with open('BayesRB_listNoiseConvSAA.pickle', 'wb') as handle:
pickle.dump(RBnoiseSAA_list_Bayes, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('BayesRB_listNoiseConstrSAA.pickle', 'wb') as handle:
pickle.dump(RBconstraintSAA_list_Bayes, handle, protocol=pickle.HIGHEST_PROTOCOL)
| [
"pyro.enable_validation",
"pickle.dump",
"numpy.maximum",
"algorithms.Bayesian_opt_Pyro.utilities_full.BayesOpt",
"numpy.zeros",
"pyro.set_rng_seed",
"numpy.array",
"numpy.random.normal"
] | [((247, 275), 'pyro.enable_validation', 'pyro.enable_validation', (['(True)'], {}), '(True)\n', (269, 275), False, 'import pyro\n'), ((1869, 1905), 'numpy.array', 'np.array', (['[[-1.5, 1.5], [-1.5, 1.5]]'], {}), '([[-1.5, 1.5], [-1.5, 1.5]])\n', (1877, 1905), True, 'import numpy as np\n'), ((1908, 1929), 'numpy.array', 'np.array', (['[-0.5, 1.5]'], {}), '([-0.5, 1.5])\n', (1916, 1929), True, 'import numpy as np\n'), ((3690, 3712), 'numpy.zeros', 'np.zeros', (['(n_noise, 3)'], {}), '((n_noise, 3))\n', (3698, 3712), True, 'import numpy as np\n'), ((3807, 3843), 'numpy.array', 'np.array', (['[[-1.5, 1.5], [-1.5, 1.5]]'], {}), '([[-1.5, 1.5], [-1.5, 1.5]])\n', (3815, 3843), True, 'import numpy as np\n'), ((3846, 3867), 'numpy.array', 'np.array', (['[-0.5, 1.5]'], {}), '([-0.5, 1.5])\n', (3854, 3867), True, 'import numpy as np\n'), ((593, 618), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.05)'], {}), '(0, 0.05)\n', (609, 618), True, 'import numpy as np\n'), ((726, 751), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.02)'], {}), '(0, 0.02)\n', (742, 751), True, 'import numpy as np\n'), ((767, 792), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.02)'], {}), '(0, 0.02)\n', (783, 792), True, 'import numpy as np\n'), ((2038, 2048), 'algorithms.Bayesian_opt_Pyro.utilities_full.BayesOpt', 'BayesOpt', ([], {}), '()\n', (2046, 2048), False, 'from algorithms.Bayesian_opt_Pyro.utilities_full import BayesOpt\n'), ((2053, 2073), 'pyro.set_rng_seed', 'pyro.set_rng_seed', (['i'], {}), '(i)\n', (2070, 2073), False, 'import pyro\n'), ((2458, 2526), 'pickle.dump', 'pickle.dump', (['RB_Bayes_list', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(RB_Bayes_list, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (2469, 2526), False, 'import pickle\n'), ((2589, 2599), 'algorithms.Bayesian_opt_Pyro.utilities_full.BayesOpt', 'BayesOpt', ([], {}), '()\n', (2597, 2599), False, 'from algorithms.Bayesian_opt_Pyro.utilities_full import BayesOpt\n'), ((2604, 2624), 'pyro.set_rng_seed', 'pyro.set_rng_seed', (['i'], {}), '(i)\n', (2621, 2624), False, 'import pyro\n'), ((3022, 3094), 'pickle.dump', 'pickle.dump', (['RBRand_Bayes_list', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(RBRand_Bayes_list, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (3033, 3094), False, 'import pickle\n'), ((3160, 3170), 'algorithms.Bayesian_opt_Pyro.utilities_full.BayesOpt', 'BayesOpt', ([], {}), '()\n', (3168, 3170), False, 'from algorithms.Bayesian_opt_Pyro.utilities_full import BayesOpt\n'), ((3175, 3195), 'pyro.set_rng_seed', 'pyro.set_rng_seed', (['i'], {}), '(i)\n', (3192, 3195), False, 'import pyro\n'), ((3585, 3656), 'pickle.dump', 'pickle.dump', (['RBSAA_Bayes_list', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(RBSAA_Bayes_list, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (3596, 3656), False, 'import pickle\n'), ((4754, 4827), 'pickle.dump', 'pickle.dump', (['RBnoise_list_Bayes', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(RBnoise_list_Bayes, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (4765, 4827), False, 'import pickle\n'), ((4898, 4976), 'pickle.dump', 'pickle.dump', (['RBconstraint_list_Bayes', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(RBconstraint_list_Bayes, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (4909, 4976), False, 'import pickle\n'), ((5869, 5945), 'pickle.dump', 'pickle.dump', (['RBnoiseSAA_list_Bayes', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(RBnoiseSAA_list_Bayes, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (5880, 5945), False, 'import pickle\n'), ((6019, 6105), 'pickle.dump', 'pickle.dump', (['RBconstraintSAA_list_Bayes', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(RBconstraintSAA_list_Bayes, handle, protocol=pickle.\n HIGHEST_PROTOCOL)\n', (6030, 6105), False, 'import pickle\n'), ((3760, 3800), 'numpy.array', 'np.array', (['[0.05 / 3, 0.02 / 3, 0.02 / 3]'], {}), '([0.05 / 3, 0.02 / 3, 0.02 / 3])\n', (3768, 3800), True, 'import numpy as np\n'), ((1147, 1172), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.05)'], {}), '(0, 0.05)\n', (1163, 1172), True, 'import numpy as np\n'), ((1209, 1234), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.02)'], {}), '(0, 0.02)\n', (1225, 1234), True, 'import numpy as np\n'), ((1273, 1298), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.02)'], {}), '(0, 0.02)\n', (1289, 1298), True, 'import numpy as np\n'), ((1638, 1671), 'numpy.random.normal', 'np.random.normal', (['(0)', 'noise_std[0]'], {}), '(0, noise_std[0])\n', (1654, 1671), True, 'import numpy as np\n'), ((1708, 1741), 'numpy.random.normal', 'np.random.normal', (['(0)', 'noise_std[1]'], {}), '(0, noise_std[1])\n', (1724, 1741), True, 'import numpy as np\n'), ((1780, 1813), 'numpy.random.normal', 'np.random.normal', (['(0)', 'noise_std[2]'], {}), '(0, noise_std[2])\n', (1796, 1813), True, 'import numpy as np\n'), ((4587, 4603), 'numpy.maximum', 'np.maximum', (['g', '(0)'], {}), '(g, 0)\n', (4597, 4603), True, 'import numpy as np\n'), ((5693, 5709), 'numpy.maximum', 'np.maximum', (['g', '(0)'], {}), '(g, 0)\n', (5703, 5709), True, 'import numpy as np\n')] |
# Copyright 2020 Regents of the University of Minnesota.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from argparse import ArgumentParser, Namespace
from datetime import datetime
from pathlib import Path
from typing import Optional, Callable, Union
import math
import numpy as np
from mtap import Pipeline, Event, EventsClient, RemoteProcessor, Document
from mtap.processing import PipelineResult, ProcessingResult, ProcessingSource
SERVICES = ['events', 'sentences', 'tagger']
class PipelineConf:
"""Configuration for the biomedicus default pipeline to connect to.
By default will connect to ``host`` and ``SERVICE_port`` of each service unless
``SERVICE_address`` is specified or ``use_discovery`` is ``true``.
"""
def __init__(self):
self.id = ""
self.use_discovery = False
self.host = '127.0.0.1'
self.events_port = '10100'
self.events_address = None
self.sentences_port = '10102'
self.sentences_address = None
self.sentences_id = 'biomedicus-sentences'
self.tagger_port = '10103'
self.tagger_address = None
self.tagger_id = 'biomedicus-tnt-tagger'
self.include_label_text = False
self.workers = 1
self.serializer = None
self.input_directory = None
self.output_directory = None
self.limit = sys.maxsize
def populate_addresses(self):
for service in SERVICES:
if getattr(self, service + '_address') is None:
setattr(self, service + '_address',
self.host + ':' + getattr(self, service + '_port'))
class DefaultPipeline:
"""The biomedicus default pipeline for processing clinical documents.
Attributes
events_client (mtap.EventsClient): An MTAP events client used by the pipeline.
pipeline (mtap.Pipeline): An MTAP pipeline to use to process documents.
"""
def __init__(self, conf: PipelineConf, *, events_client: EventsClient = None):
conf.populate_addresses()
if events_client is not None:
self.close_client = False
self.events_client = events_client
elif conf.events_address is not None:
self.close_client = True
self.events_client = EventsClient(address=conf.events_address)
else:
raise ValueError("Events client or address not specified.")
pipeline = [
(conf.sentences_id, conf.sentences_address),
(conf.tagger_id, conf.tagger_address)
]
if conf.use_discovery:
self.pipeline = Pipeline(
*[RemoteProcessor(identifier) for identifier, _ in pipeline]
)
else:
self.pipeline = Pipeline(
*[RemoteProcessor(identifier, address=addr) for identifier, addr in pipeline]
)
def process_text(self, text: str, *, event_id: str = None) -> ProcessingResult:
with Event(event_id=event_id, client=self.events_client) as event:
document = event.create_document('plaintext', text=text)
f = self.pipeline.run(document)
return f
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.pipeline.close()
if self.close_client:
self.events_client.close()
def _add_address(parser: ArgumentParser, service: str, default_port: str,
service_id: Optional[str] = None):
mutex = parser.add_mutually_exclusive_group()
mutex.add_argument('--' + service + '-port', default=default_port,
help='The port for the ' + service + ' service to use in conjunction with '
'the default host.')
mutex.add_argument('--' + service + '-address', default=None,
help='A full address (host and port) to use instead of the default host '
'and --' + service + '-port.')
if service_id is not None:
parser.add_argument('--' + service + '-id', default=service_id,
help='A service ID to use instead of the default service ID.')
def run_default_pipeline(conf: PipelineConf):
with DefaultPipeline(conf) as default_pipeline:
input_dir = Path(conf.input_directory)
total = min(sum(1 for _ in input_dir.rglob('*.txt')), conf.limit)
times = []
chars = []
class Source(ProcessingSource):
def provide(self, consume: Callable[[Union[Document, Event]], None]):
for i, path in enumerate(input_dir.rglob('*.txt'), start=1):
if i > conf.limit:
break
with path.open('r', errors='replace') as f:
txt = f.read()
relative = str(path.relative_to(input_dir))
with Event(event_id=relative, client=default_pipeline.events_client,
only_create_new=True) as e:
doc = e.create_document('plaintext', txt)
consume(doc)
def receive_result(self, result: PipelineResult, event: Event):
times.append(result.elapsed_time)
chars.append(len(event.documents['plaintext'].text))
start = datetime.now()
default_pipeline.pipeline.run_multithread(Source(), total=total, n_threads=conf.workers)
duration = datetime.now() - start
print('Total time elapsed:', duration)
print('Per document time:', duration / total)
with open('{}-times-{}workers.csv'.format(conf.id, conf.workers), 'w') as f:
f.write(default_pipeline.pipeline.pipeline_timer_stats().csv_header())
for line in default_pipeline.pipeline.pipeline_timer_stats().timing_csv():
f.write(line)
for proc in default_pipeline.pipeline.processor_timer_stats():
for line in proc.timing_csv():
f.write(line)
i = list(map(lambda x: x.total_seconds(), times))
np.save('{}-times-series-{}-workers'.format(conf.id, conf.workers), np.array(i))
np.save('{}-chars-per-doc-{}-workers'.format(conf.id, conf.workers), np.array(chars))
def main(args=None):
"""The argument parser for the biomedicus default pipeline.
Returns: ArgumentParser object.
"""
parser = ArgumentParser(add_help=False)
parser.add_argument('input_directory', help="The input directory of text files to process.")
parser.add_argument('--host', default='127.0.0.1',
help='A hostname to connect to for all services.')
# events
_add_address(parser, 'events', '10100')
_add_address(parser, 'sentences', '10102', 'biomedicus-sentences')
_add_address(parser, 'tagger', '10103', 'biomedicus-tnt-tagger')
parser.add_argument('--id', default="", help="An identifier for output files.")
parser.add_argument('--workers', default=1, type=int,
help="The number of workers to process with.")
parser.add_argument('--limit', default=math.inf, type=int,
help="The number of documents (at most) to process.")
parser.add_argument('--use-discovery', action='store_true',
help="If this flag is specified, all ports will be ignored and instead "
"service discovery will be used to connect to services.")
parser.add_argument('--serializer', default='json', choices=['json', 'yml', 'pickle'],
help="The identifier for the serializer to use, see MTAP serializers.")
parser.add_argument('--include-label-text', action='store_true',
help="Flag to include the covered text for every label")
conf = parser.parse_args(args, namespace=PipelineConf())
run_default_pipeline(conf)
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"mtap.RemoteProcessor",
"mtap.Event",
"mtap.EventsClient",
"pathlib.Path",
"numpy.array",
"datetime.datetime.now"
] | [((6962, 6992), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (6976, 6992), False, 'from argparse import ArgumentParser, Namespace\n'), ((4839, 4865), 'pathlib.Path', 'Path', (['conf.input_directory'], {}), '(conf.input_directory)\n', (4843, 4865), False, 'from pathlib import Path\n'), ((5879, 5893), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5891, 5893), False, 'from datetime import datetime\n'), ((3485, 3536), 'mtap.Event', 'Event', ([], {'event_id': 'event_id', 'client': 'self.events_client'}), '(event_id=event_id, client=self.events_client)\n', (3490, 3536), False, 'from mtap import Pipeline, Event, EventsClient, RemoteProcessor, Document\n'), ((6010, 6024), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6022, 6024), False, 'from datetime import datetime\n'), ((6709, 6720), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (6717, 6720), True, 'import numpy as np\n'), ((6799, 6814), 'numpy.array', 'np.array', (['chars'], {}), '(chars)\n', (6807, 6814), True, 'import numpy as np\n'), ((2800, 2841), 'mtap.EventsClient', 'EventsClient', ([], {'address': 'conf.events_address'}), '(address=conf.events_address)\n', (2812, 2841), False, 'from mtap import Pipeline, Event, EventsClient, RemoteProcessor, Document\n'), ((3154, 3181), 'mtap.RemoteProcessor', 'RemoteProcessor', (['identifier'], {}), '(identifier)\n', (3169, 3181), False, 'from mtap import Pipeline, Event, EventsClient, RemoteProcessor, Document\n'), ((3297, 3338), 'mtap.RemoteProcessor', 'RemoteProcessor', (['identifier'], {'address': 'addr'}), '(identifier, address=addr)\n', (3312, 3338), False, 'from mtap import Pipeline, Event, EventsClient, RemoteProcessor, Document\n'), ((5440, 5529), 'mtap.Event', 'Event', ([], {'event_id': 'relative', 'client': 'default_pipeline.events_client', 'only_create_new': '(True)'}), '(event_id=relative, client=default_pipeline.events_client,\n only_create_new=True)\n', (5445, 5529), False, 'from mtap import Pipeline, Event, EventsClient, RemoteProcessor, Document\n')] |
from dataclasses import dataclass
import os
import pathlib
import random
import numpy as np
import torch
from logger import logger
class Configuration(object):
DEFAULT_RANDOM_SEED = 777
@classmethod
def apply(cls, random_seed=DEFAULT_RANDOM_SEED):
Configuration.set_torch_seed(random_seed=random_seed)
Configuration.set_numpy_seed(random_seed=random_seed)
Configuration.set_python_random_seed(random_seed=random_seed)
logger.info(f"Complete to apply the random seed, RANDOM_SEED : {random_seed}")
@classmethod
def set_torch_seed(cls, random_seed):
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
@classmethod
def set_numpy_seed(cls, random_seed):
np.random.seed(random_seed)
@classmethod
def set_python_random_seed(cls, random_seed):
random.seed(random_seed)
@dataclass
class ApplicationProperties:
CURRENT_MODULE_PATH = pathlib.Path(__file__).parent.absolute()
CONFIG_DIRECTORY_PATH = os.path.join(CURRENT_MODULE_PATH, "config")
DATA_DIRECTORY_PATH = os.path.join(CURRENT_MODULE_PATH, "data")
DATASET_DIRECTORY_PATH = os.path.join(CURRENT_MODULE_PATH, "dataset")
MODEL_DIRECTORY_PATH = os.path.join(CURRENT_MODULE_PATH, "model")
INFERENCE_DIRECTORY_PATH = os.path.join(CURRENT_MODULE_PATH, "inference")
TRAINER_DIRECTORY_PATH = os.path.join(CURRENT_MODULE_PATH, "trainer")
DEFAULT_RANDOM_SEED = 777
DEVICE_CPU = "cpu"
DEVICE_GPU = "cuda"
def __post_init__(self):
Configuration.apply(random_seed=self.DEFAULT_RANDOM_SEED)
APPLICATION_PROPERTIES = ApplicationProperties()
| [
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.manual_seed",
"logger.logger.info",
"pathlib.Path",
"random.seed",
"os.path.join"
] | [((1124, 1167), 'os.path.join', 'os.path.join', (['CURRENT_MODULE_PATH', '"""config"""'], {}), "(CURRENT_MODULE_PATH, 'config')\n", (1136, 1167), False, 'import os\n'), ((1194, 1235), 'os.path.join', 'os.path.join', (['CURRENT_MODULE_PATH', '"""data"""'], {}), "(CURRENT_MODULE_PATH, 'data')\n", (1206, 1235), False, 'import os\n'), ((1265, 1309), 'os.path.join', 'os.path.join', (['CURRENT_MODULE_PATH', '"""dataset"""'], {}), "(CURRENT_MODULE_PATH, 'dataset')\n", (1277, 1309), False, 'import os\n'), ((1337, 1379), 'os.path.join', 'os.path.join', (['CURRENT_MODULE_PATH', '"""model"""'], {}), "(CURRENT_MODULE_PATH, 'model')\n", (1349, 1379), False, 'import os\n'), ((1411, 1457), 'os.path.join', 'os.path.join', (['CURRENT_MODULE_PATH', '"""inference"""'], {}), "(CURRENT_MODULE_PATH, 'inference')\n", (1423, 1457), False, 'import os\n'), ((1487, 1531), 'os.path.join', 'os.path.join', (['CURRENT_MODULE_PATH', '"""trainer"""'], {}), "(CURRENT_MODULE_PATH, 'trainer')\n", (1499, 1531), False, 'import os\n'), ((469, 547), 'logger.logger.info', 'logger.info', (['f"""Complete to apply the random seed, RANDOM_SEED : {random_seed}"""'], {}), "(f'Complete to apply the random seed, RANDOM_SEED : {random_seed}')\n", (480, 547), False, 'from logger import logger\n'), ((616, 646), 'torch.manual_seed', 'torch.manual_seed', (['random_seed'], {}), '(random_seed)\n', (633, 646), False, 'import torch\n'), ((655, 690), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['random_seed'], {}), '(random_seed)\n', (677, 690), False, 'import torch\n'), ((857, 884), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (871, 884), True, 'import numpy as np\n'), ((961, 985), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (972, 985), False, 'import random\n'), ((1054, 1076), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (1066, 1076), False, 'import pathlib\n')] |
import numpy as np
import torch
from .distributions import CRP_Generator
from ..utils.graph_utils import shuffle_adj_matrix_batch_and_labels
from ..utils.graph_utils import create_torch_geom_batch, create_dgl_batch
from ..utils.graph_utils import create_torch_geom_single_graph, create_dgl_single_graph
from ..utils.graph_positional_encoding import laplacian_positional_encoding
def get_sbm_beta_crp_generator(params):
partition_generator = CRP_Generator(
alpha=params['alpha'], maxK=params['maxK'])
sbm_generator = SBM_BetaGenerator(
params, partition_generator=partition_generator)
return sbm_generator
class SBM_BetaGenerator():
"""Generate graphs using stochastic block model (SBM) parameterized by Beta distributions.
"""
def __init__(self, params, partition_generator):
self.partition_generator = partition_generator
self.params = params
self.random_embed_dim = params['random_embed_dim']
self.pos_enc_dim = params['pos_enc_dim']
def generate(self, N=None, batch_size=1):
if N is None:
N = np.random.randint(self.params['Nmin'], self.params['Nmax'])
alpha0 = self.params['between_alpha']
beta0 = self.params['between_beta']
alpha1 = self.params['within_alpha']
beta1 = self.params['within_beta']
clusters = self.partition_generator.generate(N=N)
assert(np.all(clusters > 0))
N = np.sum(clusters)
K = len(clusters)
cumsum = np.cumsum(np.insert(clusters, 0, [0]))
adj_matrix = np.zeros([batch_size, N, N])
labels = np.empty(N, dtype=np.int32)
for i in range(K):
for j in range(i, K):
if j == i:
p = np.random.beta(alpha1, beta1, batch_size).reshape(
[batch_size, 1])
else:
p = np.random.beta(alpha0, beta0, batch_size).reshape(
[batch_size, 1])
p = np.repeat(p, clusters[i]*clusters[j], axis=1).reshape(
[batch_size, clusters[i], clusters[j]])
rands = np.random.rand(batch_size, clusters[i], clusters[j])
adj_matrix[:, cumsum[i]:cumsum[i+1],
cumsum[j]:cumsum[j+1]] = (rands < p)
labels[cumsum[i]:cumsum[i+1]] = i
# make the matrix symmetric
i_lower = np.tril_indices(N, -1)
idd = (np.arange(N), np.arange(N))
for b in range(batch_size):
adj_matrix[b, :, :][i_lower] = adj_matrix[b, :, :].T[i_lower]
adj_matrix[b, :, :][idd] = 0
# shuffle the assignment order and relabel clusters so that they appear in order
adj_matrix, labels = shuffle_adj_matrix_batch_and_labels(
adj_matrix, labels)
adj_matrix = torch.from_numpy(adj_matrix).float()
labels = torch.from_numpy(labels).int()
# node features
node_features = self.build_node_features(adj_matrix)
return adj_matrix, labels, node_features
def build_node_features(self, adj_matrix):
batch_size, N = adj_matrix.shape[0], adj_matrix.shape[1]
node_features = []
if self.random_embed_dim:
node_features.append(
torch.normal(0, 1, size=(batch_size, N, self.random_embed_dim)))
if self.pos_enc_dim:
pos_enc = torch.zeros(batch_size, N, self.pos_enc_dim)
for b in range(batch_size):
pos_enc[b] = laplacian_positional_encoding(
adj_matrix[b], self.pos_enc_dim)
node_features.append(pos_enc)
node_features = torch.cat(node_features, dim=-1)
# append extra features if needed
return node_features
def generate_batch(self, batch_size, data_lib, device):
# nodes are not sorted by labels, as we can sort the encoder output in the model
adj_matrix, labels, node_features = self.generate(
batch_size=batch_size)
if data_lib == "torch_geom":
batch = create_torch_geom_batch(adj_matrix, node_features, device)
elif data_lib == "dgl":
batch = create_dgl_batch(adj_matrix, node_features, device)
else:
raise ValueError("data_lib should be 'torch_geom' or 'dgl'")
return batch, labels
def generate_single(self, data_lib, device):
adj_matrix, labels, node_features = self.generate(batch_size=1)
shape = adj_matrix.shape
adj_matrix, node_features = adj_matrix[0], node_features[0]
if data_lib == "torch_geom":
data = create_torch_geom_single_graph(
adj_matrix, node_features, device)
elif data_lib == "dgl":
data = create_dgl_single_graph(adj_matrix, node_features, device)
else:
raise ValueError("data_lib should be 'torch_geom' or 'dgl'")
labels = labels.to(device)
data.shape = shape
return data, labels
| [
"numpy.tril_indices",
"numpy.sum",
"numpy.random.rand",
"numpy.empty",
"numpy.random.beta",
"numpy.zeros",
"torch.cat",
"numpy.insert",
"torch.normal",
"numpy.random.randint",
"numpy.arange",
"numpy.repeat",
"torch.zeros",
"numpy.all",
"torch.from_numpy"
] | [((1412, 1432), 'numpy.all', 'np.all', (['(clusters > 0)'], {}), '(clusters > 0)\n', (1418, 1432), True, 'import numpy as np\n'), ((1446, 1462), 'numpy.sum', 'np.sum', (['clusters'], {}), '(clusters)\n', (1452, 1462), True, 'import numpy as np\n'), ((1567, 1595), 'numpy.zeros', 'np.zeros', (['[batch_size, N, N]'], {}), '([batch_size, N, N])\n', (1575, 1595), True, 'import numpy as np\n'), ((1613, 1640), 'numpy.empty', 'np.empty', (['N'], {'dtype': 'np.int32'}), '(N, dtype=np.int32)\n', (1621, 1640), True, 'import numpy as np\n'), ((2418, 2440), 'numpy.tril_indices', 'np.tril_indices', (['N', '(-1)'], {}), '(N, -1)\n', (2433, 2440), True, 'import numpy as np\n'), ((3672, 3704), 'torch.cat', 'torch.cat', (['node_features'], {'dim': '(-1)'}), '(node_features, dim=-1)\n', (3681, 3704), False, 'import torch\n'), ((1099, 1158), 'numpy.random.randint', 'np.random.randint', (["self.params['Nmin']", "self.params['Nmax']"], {}), "(self.params['Nmin'], self.params['Nmax'])\n", (1116, 1158), True, 'import numpy as np\n'), ((1517, 1544), 'numpy.insert', 'np.insert', (['clusters', '(0)', '[0]'], {}), '(clusters, 0, [0])\n', (1526, 1544), True, 'import numpy as np\n'), ((2456, 2468), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (2465, 2468), True, 'import numpy as np\n'), ((2470, 2482), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (2479, 2482), True, 'import numpy as np\n'), ((3408, 3452), 'torch.zeros', 'torch.zeros', (['batch_size', 'N', 'self.pos_enc_dim'], {}), '(batch_size, N, self.pos_enc_dim)\n', (3419, 3452), False, 'import torch\n'), ((2146, 2198), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'clusters[i]', 'clusters[j]'], {}), '(batch_size, clusters[i], clusters[j])\n', (2160, 2198), True, 'import numpy as np\n'), ((2845, 2873), 'torch.from_numpy', 'torch.from_numpy', (['adj_matrix'], {}), '(adj_matrix)\n', (2861, 2873), False, 'import torch\n'), ((2899, 2923), 'torch.from_numpy', 'torch.from_numpy', (['labels'], {}), '(labels)\n', (2915, 2923), False, 'import torch\n'), ((3291, 3354), 'torch.normal', 'torch.normal', (['(0)', '(1)'], {'size': '(batch_size, N, self.random_embed_dim)'}), '(0, 1, size=(batch_size, N, self.random_embed_dim))\n', (3303, 3354), False, 'import torch\n'), ((2006, 2053), 'numpy.repeat', 'np.repeat', (['p', '(clusters[i] * clusters[j])'], {'axis': '(1)'}), '(p, clusters[i] * clusters[j], axis=1)\n', (2015, 2053), True, 'import numpy as np\n'), ((1755, 1796), 'numpy.random.beta', 'np.random.beta', (['alpha1', 'beta1', 'batch_size'], {}), '(alpha1, beta1, batch_size)\n', (1769, 1796), True, 'import numpy as np\n'), ((1893, 1934), 'numpy.random.beta', 'np.random.beta', (['alpha0', 'beta0', 'batch_size'], {}), '(alpha0, beta0, batch_size)\n', (1907, 1934), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import os
import time
import datetime
from joblib import Parallel, delayed
data_path = '../data/'
in_dir = os.path.join(data_path, 'backtest/')
### create order folders ####
def generate_order(df, start, end):
# df['date'] = df.index.map(lambda x: x[1].date())
# df.set_index('date', append=True, inplace=True)
df = df.groupby('date').take(range(start, end)).droplevel(level=0)
div = df['$volume0'].rolling((end - start)*60).mean().shift(1).groupby(level='date').transform('first')
order = df.groupby(level=(2, 0)).mean().dropna()
order = pd.DataFrame(order)
order['amount'] = np.random.lognormal(-3.28, 1.14) * order['$volume0']
order['order_type'] = 0
order = order.drop(columns=["$volume0", "$vwap0"])
return order
def w_order(f, start, end):
df = pd.read_pickle(in_dir + f)
#df['date'] = df.index.get_level_values(1).map(lambda x: x.date())
#df = df.set_index('date', append=True, drop=True)
# old_order = pd.read_pickle('../v-zeh/full-07-20/order/ratio_test/' + f)
order = generate_order(df, start, end)
# order = order[order.index.isin(old_order.index)]
order_train = order[order.index.get_level_values(0) < '2020-12-01']
order_test = order[order.index.get_level_values(0) >= '2020-12-01']
order_valid = order_test[order_test.index.get_level_values(0) < '2021-01-01']
order_test = order_test[order_test.index.get_level_values(0) >= '2021-01-01']
if len(order_train) > 0:
train_path = os.path.join(data_path, "order/train/")
if not os.path.exists(train_path):
os.makedirs(train_path)
order_train.to_pickle(train_path + f[:-9] + '.target')
if len(order_valid) > 0:
valid_path = os.path.join(data_path, "order/valid/")
if not os.path.exists(valid_path):
os.makedirs(valid_path)
order_valid.to_pickle(valid_path + f[:-9] + '.target')
if len(order_test) > 0:
test_path = os.path.join(data_path, "order/test/")
if not os.path.exists(test_path):
os.makedirs(test_path)
order_test.to_pickle(test_path + f[:-9] + '.target')
if len(order) > 0:
all_path = os.path.join(data_path, "order/all/")
if not os.path.exists(all_path):
os.makedirs(all_path)
order_test.to_pickle(all_path + f[:-9] + '.target')
return 0
res = Parallel(n_jobs=64)(delayed(w_order)(f, 0, 239) for f in os.listdir(in_dir))
print(sum(res))
| [
"pandas.DataFrame",
"os.listdir",
"os.makedirs",
"os.path.exists",
"joblib.Parallel",
"pandas.read_pickle",
"joblib.delayed",
"os.path.join",
"numpy.random.lognormal"
] | [((147, 183), 'os.path.join', 'os.path.join', (['data_path', '"""backtest/"""'], {}), "(data_path, 'backtest/')\n", (159, 183), False, 'import os\n'), ((605, 624), 'pandas.DataFrame', 'pd.DataFrame', (['order'], {}), '(order)\n', (617, 624), True, 'import pandas as pd\n'), ((838, 864), 'pandas.read_pickle', 'pd.read_pickle', (['(in_dir + f)'], {}), '(in_dir + f)\n', (852, 864), True, 'import pandas as pd\n'), ((2399, 2418), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(64)'}), '(n_jobs=64)\n', (2407, 2418), False, 'from joblib import Parallel, delayed\n'), ((647, 679), 'numpy.random.lognormal', 'np.random.lognormal', (['(-3.28)', '(1.14)'], {}), '(-3.28, 1.14)\n', (666, 679), True, 'import numpy as np\n'), ((1525, 1564), 'os.path.join', 'os.path.join', (['data_path', '"""order/train/"""'], {}), "(data_path, 'order/train/')\n", (1537, 1564), False, 'import os\n'), ((1757, 1796), 'os.path.join', 'os.path.join', (['data_path', '"""order/valid/"""'], {}), "(data_path, 'order/valid/')\n", (1769, 1796), False, 'import os\n'), ((1987, 2025), 'os.path.join', 'os.path.join', (['data_path', '"""order/test/"""'], {}), "(data_path, 'order/test/')\n", (1999, 2025), False, 'import os\n'), ((2206, 2243), 'os.path.join', 'os.path.join', (['data_path', '"""order/all/"""'], {}), "(data_path, 'order/all/')\n", (2218, 2243), False, 'import os\n'), ((1580, 1606), 'os.path.exists', 'os.path.exists', (['train_path'], {}), '(train_path)\n', (1594, 1606), False, 'import os\n'), ((1620, 1643), 'os.makedirs', 'os.makedirs', (['train_path'], {}), '(train_path)\n', (1631, 1643), False, 'import os\n'), ((1812, 1838), 'os.path.exists', 'os.path.exists', (['valid_path'], {}), '(valid_path)\n', (1826, 1838), False, 'import os\n'), ((1852, 1875), 'os.makedirs', 'os.makedirs', (['valid_path'], {}), '(valid_path)\n', (1863, 1875), False, 'import os\n'), ((2041, 2066), 'os.path.exists', 'os.path.exists', (['test_path'], {}), '(test_path)\n', (2055, 2066), False, 'import os\n'), ((2080, 2102), 'os.makedirs', 'os.makedirs', (['test_path'], {}), '(test_path)\n', (2091, 2102), False, 'import os\n'), ((2259, 2283), 'os.path.exists', 'os.path.exists', (['all_path'], {}), '(all_path)\n', (2273, 2283), False, 'import os\n'), ((2297, 2318), 'os.makedirs', 'os.makedirs', (['all_path'], {}), '(all_path)\n', (2308, 2318), False, 'import os\n'), ((2419, 2435), 'joblib.delayed', 'delayed', (['w_order'], {}), '(w_order)\n', (2426, 2435), False, 'from joblib import Parallel, delayed\n'), ((2456, 2474), 'os.listdir', 'os.listdir', (['in_dir'], {}), '(in_dir)\n', (2466, 2474), False, 'import os\n')] |
import chainer
import chainer.functions as F
import chainer.initializers as I
import chainer.links as L
import chainer.optimizers as O
from chainer import reporter
import numpy as np
def toOneHot(n, n_participants):
res = np.eye(n_participants, dtype=np.float32)[n]
return res
class BaselineClassifier(chainer.Chain):
def __init__(self, mean, loss_func):
super(Baseline, self).__init__()
self.loss_func = loss_func
self.mean = mean
def __call__(self, inputs, target):
o = np.full(target.shape, self.mean, dtype=np.float32)
loss = self.loss_func(o, target)
acc = F.accuracy(F.softmax(o), target)
reporter.report({'loss': loss}, self)
reporter.report({'accuracy':acc}, self)
return loss
class EyetrackingClassifier(chainer.Chain):
def __init__(self, n_vocab, n_units, n_participants, n_classes, loss_func, out, n_layers=0, window=0, wlen=False,
pos=False, prev_fix=False, freq=False, surprisal=False, n_pos=None, n_hidden=200, n_pos_units=50, loss_ratio=1.0):
super(EyetrackingClassifier, self).__init__()
self.n_units = n_units
self.n_pos_units = n_pos_units
self.pos = pos
self.wlen = wlen
self.prev_fix = prev_fix
self.freq = freq
self.surprisal = surprisal
self.loss_ratio = loss_ratio
self.n_participants = n_participants
self.n_layers = n_layers
with self.init_scope():
self.embed = L.EmbedID(n_vocab, n_units, initialW=I.Uniform(1. / n_units))
n_inputs = n_units
if self.pos:
assert(n_pos)
self.embed_pos = L.EmbedID(n_pos, n_pos_units, initialW=I.Uniform(1. / n_pos_units))
n_inputs += n_pos_units
if self.prev_fix:
n_inputs += n_participants
if self.wlen:
n_inputs += 1
if self.freq:
n_inputs += 1
if self.surprisal:
n_inputs += 1
n_inputs *= (window + 1)
if n_layers > 0:
self.layer0 = L.Linear(n_inputs, n_hidden, initialW=I.Uniform(1. / n_hidden))
for i in range(1, n_layers):
setattr(self, 'layer{}'.format(i), L.Linear(n_hidden, n_hidden, initialW=I.Uniform(1. / n_hidden)))
self.outlayer = L.Linear(n_hidden, n_classes, initialW=I.Uniform(1. / n_hidden))
else:
self.outlayer = L.Linear(n_inputs, n_classes, initialW=I.Uniform(1. / n_inputs))
self.out = out
self.loss_func = loss_func
def _embed_input(self, inputs):
variables = []
w = chainer.Variable(inputs['words'], name='words')
e_w = F.reshape(self.embed(w), (-1,w.shape[1]*self.n_units))
variables.append(e_w)
if self.pos:
p = chainer.Variable(inputs['pos'], name='pos_tags')
e_p = F.reshape(self.embed_pos(p), (-1,w.shape[1]*self.n_pos_units))
e_p.name = 'pos_embeddings'
variables.append(e_p)
if self.wlen:
l = chainer.Variable(inputs['wlen'], name='word_lengths')
variables.append(l)
if self.prev_fix:
t = chainer.Variable(toOneHot(inputs['prev_fix'], self.n_participants), name='previous_fixations')
t = F.reshape(t, (-1, w.shape[1]*self.n_participants))
t.name = 'previous_fixations'
variables.append(t)
if self.freq:
f = chainer.Variable(inputs['freq'], name='frequency')
variables.append(f)
if self.surprisal:
s = chainer.Variable(inputs['surprisal'], name='surprisal')
variables.append(s)
h = F.concat(tuple(variables), axis=1)# * (1. / w.shape[1])
h.name = 'concatenated_word_embeddings'
return h
def __call__(self, inputs, target):
target = chainer.Variable(target, name='target')
h = self._embed_input(inputs) # called from superclass
for i in range(self.n_layers):
h = self.out(getattr(self,'layer{}'.format(i))(h))
o = self.outlayer(h)
o.name = 'output_time_prediction'
loss = self.loss_func(o, target)
acc = F.accuracy(F.softmax(o), target)
reporter.report({'loss': loss}, self)
reporter.report({'accuracy':acc}, self)
return self.loss_ratio * loss
def inference(self, inputs):
h = self._embed_input(inputs) # called from superclass
for i in range(self.n_layers):
h = self.out(getattr(self,'layer{}'.format(i))(h))
o = self.outlayer(h)
return F.softmax(o)
class BaselineLinreg(chainer.Chain):
def __init__(self, mean, loss_func):
super(Baseline, self).__init__()
self.loss_func = loss_func
self.mean = mean
def __call__(self, inputs, target):
loss = self.loss_func(np.full(target.shape, self.mean, dtype=np.float32), target)
reporter.report({'loss': loss}, self)
return loss
class EyetrackingLinreg(chainer.Chain):
def __init__(self, n_vocab, n_units, loss_func, out, window=0, n_layers=0, n_hidden=200, wlen=False,
pos=False, prev_fix=False, freq=False, surprisal=False, n_pos=None, n_pos_units=50, loss_ratio=1.0):
super(EyetrackingLinreg, self).__init__()
self.n_units = n_units
self.n_pos_units = n_pos_units
self.pos = pos
self.wlen = wlen
self.prev_fix = prev_fix
self.freq = freq
self.surprisal = surprisal
self.loss_ratio = loss_ratio
self.n_layers = n_layers
self.n_pos = n_pos
with self.init_scope():
self.embed = L.EmbedID(n_vocab, n_units, initialW=I.Uniform(1. / n_units))
n_inputs = n_units
if self.pos:
assert(n_pos)
## embedding
# self.embed_pos = L.EmbedID(n_pos, n_pos_units, initialW=I.Uniform(1. / n_pos_units))
# n_inputs += n_pos_units
## one-hot
n_inputs += n_pos
if self.prev_fix:
n_inputs += 1
if self.wlen:
n_inputs += 1
if self.freq:
n_inputs += 1
if self.surprisal:
n_inputs += 1
n_inputs *= (window + 1)
if n_layers > 0:
self.layer0 = L.Linear(n_inputs, n_hidden, initialW=I.Uniform(1. / n_hidden))
for i in range(1, n_layers):
setattr(self, 'layer{}'.format(i), L.Linear(n_hidden, n_hidden, initialW=I.Uniform(1. / n_hidden)))
self.outlayer = L.Linear(n_hidden, 1, initialW=I.Uniform(1. / n_hidden))
else:
self.outlayer = L.Linear(n_inputs, 1, initialW=I.Uniform(1. / n_inputs))
self.out = out
self.loss_func = loss_func
def _embed_input(self, inputs):
variables = []
w = chainer.Variable(inputs['words'], name='words')
e_w = F.reshape(self.embed(w), (-1,w.shape[1]*self.n_units))
variables.append(e_w)
if self.pos:
## embedding
# p = chainer.Variable(inputs['pos'], name='pos_tags')
# e_p = F.reshape(self.embed_pos(p), (-1,w.shape[1]*self.n_pos_units))
# e_p.name = 'pos_embeddings'
## one-hot
p = chainer.Variable(toOneHot(inputs['pos'],self.n_pos), name='pos_tags')
e_p = F.reshape(p, (-1,w.shape[1]*self.n_pos))
e_p.name = 'postags_onehot'
variables.append(e_p)
if self.wlen:
l = chainer.Variable(inputs['wlen'], name='word_lengths')
variables.append(l)
if self.prev_fix:
t = chainer.Variable(inputs['prev_fix'], name='previous_fixations')
t.name = 'previous_fixations'
variables.append(t)
if self.freq:
f = chainer.Variable(inputs['freq'], name='frequency')
variables.append(f)
if self.surprisal:
s = chainer.Variable(inputs['surprisal'], name='surprisal')
variables.append(s)
h = F.concat(tuple(variables), axis=1)# * (1. / w.shape[1])
h.name = 'concatenated_word_embeddings'
return h
def __call__(self, inputs, target):
target = chainer.Variable(target, name='target')
h = self._embed_input(inputs) # called from superclass
for i in range(self.n_layers):
h = self.out(getattr(self,'layer{}'.format(i))(h))
o = self.out(self.outlayer(h))
o.name = 'output_time_prediction'
loss = self.loss_func(o, target)
reporter.report({'loss': loss}, self)
return self.loss_ratio * loss
def inference(self, inputs):
h = self._embed_input(inputs) # called from superclass
for i in range(self.n_layers):
h = self.out(getattr(self,'layer{}'.format(i))(h))
o = self.out(self.outlayer(h))
return o.data
def convert(batch, device):
x, targets = batch
for k in x:
if device >= 0:
x[k] = cuda.to_gpu(x[k])
if device >= 0:
targets = cuda.to_gpu(targets)
return x, targets | [
"numpy.full",
"chainer.Variable",
"chainer.reporter.report",
"chainer.functions.reshape",
"chainer.functions.softmax",
"numpy.eye",
"chainer.initializers.Uniform"
] | [((227, 267), 'numpy.eye', 'np.eye', (['n_participants'], {'dtype': 'np.float32'}), '(n_participants, dtype=np.float32)\n', (233, 267), True, 'import numpy as np\n'), ((523, 573), 'numpy.full', 'np.full', (['target.shape', 'self.mean'], {'dtype': 'np.float32'}), '(target.shape, self.mean, dtype=np.float32)\n', (530, 573), True, 'import numpy as np\n'), ((670, 707), 'chainer.reporter.report', 'reporter.report', (["{'loss': loss}", 'self'], {}), "({'loss': loss}, self)\n", (685, 707), False, 'from chainer import reporter\n'), ((716, 756), 'chainer.reporter.report', 'reporter.report', (["{'accuracy': acc}", 'self'], {}), "({'accuracy': acc}, self)\n", (731, 756), False, 'from chainer import reporter\n'), ((2768, 2815), 'chainer.Variable', 'chainer.Variable', (["inputs['words']"], {'name': '"""words"""'}), "(inputs['words'], name='words')\n", (2784, 2815), False, 'import chainer\n'), ((4008, 4047), 'chainer.Variable', 'chainer.Variable', (['target'], {'name': '"""target"""'}), "(target, name='target')\n", (4024, 4047), False, 'import chainer\n'), ((4381, 4418), 'chainer.reporter.report', 'reporter.report', (["{'loss': loss}", 'self'], {}), "({'loss': loss}, self)\n", (4396, 4418), False, 'from chainer import reporter\n'), ((4427, 4467), 'chainer.reporter.report', 'reporter.report', (["{'accuracy': acc}", 'self'], {}), "({'accuracy': acc}, self)\n", (4442, 4467), False, 'from chainer import reporter\n'), ((4748, 4760), 'chainer.functions.softmax', 'F.softmax', (['o'], {}), '(o)\n', (4757, 4760), True, 'import chainer.functions as F\n'), ((5081, 5118), 'chainer.reporter.report', 'reporter.report', (["{'loss': loss}", 'self'], {}), "({'loss': loss}, self)\n", (5096, 5118), False, 'from chainer import reporter\n'), ((7148, 7195), 'chainer.Variable', 'chainer.Variable', (["inputs['words']"], {'name': '"""words"""'}), "(inputs['words'], name='words')\n", (7164, 7195), False, 'import chainer\n'), ((8542, 8581), 'chainer.Variable', 'chainer.Variable', (['target'], {'name': '"""target"""'}), "(target, name='target')\n", (8558, 8581), False, 'import chainer\n'), ((8878, 8915), 'chainer.reporter.report', 'reporter.report', (["{'loss': loss}", 'self'], {}), "({'loss': loss}, self)\n", (8893, 8915), False, 'from chainer import reporter\n'), ((640, 652), 'chainer.functions.softmax', 'F.softmax', (['o'], {}), '(o)\n', (649, 652), True, 'import chainer.functions as F\n'), ((2953, 3001), 'chainer.Variable', 'chainer.Variable', (["inputs['pos']"], {'name': '"""pos_tags"""'}), "(inputs['pos'], name='pos_tags')\n", (2969, 3001), False, 'import chainer\n'), ((3196, 3249), 'chainer.Variable', 'chainer.Variable', (["inputs['wlen']"], {'name': '"""word_lengths"""'}), "(inputs['wlen'], name='word_lengths')\n", (3212, 3249), False, 'import chainer\n'), ((3436, 3488), 'chainer.functions.reshape', 'F.reshape', (['t', '(-1, w.shape[1] * self.n_participants)'], {}), '(t, (-1, w.shape[1] * self.n_participants))\n', (3445, 3488), True, 'import chainer.functions as F\n'), ((3600, 3650), 'chainer.Variable', 'chainer.Variable', (["inputs['freq']"], {'name': '"""frequency"""'}), "(inputs['freq'], name='frequency')\n", (3616, 3650), False, 'import chainer\n'), ((3727, 3782), 'chainer.Variable', 'chainer.Variable', (["inputs['surprisal']"], {'name': '"""surprisal"""'}), "(inputs['surprisal'], name='surprisal')\n", (3743, 3782), False, 'import chainer\n'), ((4351, 4363), 'chainer.functions.softmax', 'F.softmax', (['o'], {}), '(o)\n', (4360, 4363), True, 'import chainer.functions as F\n'), ((5013, 5063), 'numpy.full', 'np.full', (['target.shape', 'self.mean'], {'dtype': 'np.float32'}), '(target.shape, self.mean, dtype=np.float32)\n', (5020, 5063), True, 'import numpy as np\n'), ((7674, 7717), 'chainer.functions.reshape', 'F.reshape', (['p', '(-1, w.shape[1] * self.n_pos)'], {}), '(p, (-1, w.shape[1] * self.n_pos))\n', (7683, 7717), True, 'import chainer.functions as F\n'), ((7828, 7881), 'chainer.Variable', 'chainer.Variable', (["inputs['wlen']"], {'name': '"""word_lengths"""'}), "(inputs['wlen'], name='word_lengths')\n", (7844, 7881), False, 'import chainer\n'), ((7957, 8020), 'chainer.Variable', 'chainer.Variable', (["inputs['prev_fix']"], {'name': '"""previous_fixations"""'}), "(inputs['prev_fix'], name='previous_fixations')\n", (7973, 8020), False, 'import chainer\n'), ((8134, 8184), 'chainer.Variable', 'chainer.Variable', (["inputs['freq']"], {'name': '"""frequency"""'}), "(inputs['freq'], name='frequency')\n", (8150, 8184), False, 'import chainer\n'), ((8261, 8316), 'chainer.Variable', 'chainer.Variable', (["inputs['surprisal']"], {'name': '"""surprisal"""'}), "(inputs['surprisal'], name='surprisal')\n", (8277, 8316), False, 'import chainer\n'), ((1548, 1572), 'chainer.initializers.Uniform', 'I.Uniform', (['(1.0 / n_units)'], {}), '(1.0 / n_units)\n', (1557, 1572), True, 'import chainer.initializers as I\n'), ((5858, 5882), 'chainer.initializers.Uniform', 'I.Uniform', (['(1.0 / n_units)'], {}), '(1.0 / n_units)\n', (5867, 5882), True, 'import chainer.initializers as I\n'), ((1733, 1761), 'chainer.initializers.Uniform', 'I.Uniform', (['(1.0 / n_pos_units)'], {}), '(1.0 / n_pos_units)\n', (1742, 1761), True, 'import chainer.initializers as I\n'), ((2224, 2249), 'chainer.initializers.Uniform', 'I.Uniform', (['(1.0 / n_hidden)'], {}), '(1.0 / n_hidden)\n', (2233, 2249), True, 'import chainer.initializers as I\n'), ((2486, 2511), 'chainer.initializers.Uniform', 'I.Uniform', (['(1.0 / n_hidden)'], {}), '(1.0 / n_hidden)\n', (2495, 2511), True, 'import chainer.initializers as I\n'), ((2601, 2626), 'chainer.initializers.Uniform', 'I.Uniform', (['(1.0 / n_inputs)'], {}), '(1.0 / n_inputs)\n', (2610, 2626), True, 'import chainer.initializers as I\n'), ((6620, 6645), 'chainer.initializers.Uniform', 'I.Uniform', (['(1.0 / n_hidden)'], {}), '(1.0 / n_hidden)\n', (6629, 6645), True, 'import chainer.initializers as I\n'), ((6874, 6899), 'chainer.initializers.Uniform', 'I.Uniform', (['(1.0 / n_hidden)'], {}), '(1.0 / n_hidden)\n', (6883, 6899), True, 'import chainer.initializers as I\n'), ((6981, 7006), 'chainer.initializers.Uniform', 'I.Uniform', (['(1.0 / n_inputs)'], {}), '(1.0 / n_inputs)\n', (6990, 7006), True, 'import chainer.initializers as I\n'), ((2388, 2413), 'chainer.initializers.Uniform', 'I.Uniform', (['(1.0 / n_hidden)'], {}), '(1.0 / n_hidden)\n', (2397, 2413), True, 'import chainer.initializers as I\n'), ((6784, 6809), 'chainer.initializers.Uniform', 'I.Uniform', (['(1.0 / n_hidden)'], {}), '(1.0 / n_hidden)\n', (6793, 6809), True, 'import chainer.initializers as I\n')] |
import glob
from time import time
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.callbacks import TensorBoard
from keras.optimizers import RMSprop
from numpy.random import seed
from sklearn.model_selection import StratifiedKFold
from keras.models import load_model
from keras. models import Model
from pyemd import emd_samples
from models.models import *
from models.SelfAttentionModule import *
from utils.configs import *
from utils.metric import *
from utils.ultilities import *
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
from keras.callbacks import LambdaCallback
import sys
def generator_test(b_s, imgs_test_path):
images = [imgs_test_path + f for f in os.listdir(imgs_test_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
images.sort()
gaussian = np.zeros((b_s, nb_gaussian, shape_r_gt, shape_c_gt))
counter = 0
while True:
if net.startswith("ms"):
yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c),
preprocess_images(images[counter:counter + b_s], int(shape_r/2), int(shape_c/2))]
elif net.startswith("ts"):
yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c),
preprocess_images(images[counter:counter + b_s], int(shape_r / 2), int(shape_c / 2)),
preprocess_images(images[counter:counter + b_s], int(shape_r / 4), int(shape_c / 4))]
else:
yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c)]
counter = (counter + b_s) % len(images)
def load_data():
images = [imgs_train_path + f for f in os.listdir(imgs_train_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
maps = [maps_train_path + f for f in os.listdir(maps_train_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
fixs = [fixs_train_path + f for f in os.listdir(fixs_train_path) if f.endswith('.mat')]
images.sort()
maps.sort()
fixs.sort()
counter = 0
X_train = []
Y_train = []
while True:
Y = preprocess_maps(maps[counter:counter+b_s], shape_r_out, shape_c_out)
Y_fix = preprocess_fixmaps_salicon(fixs[counter:counter + b_s], shape_r_out, shape_c_out)
if net.startswith("ms"):
X = [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), preprocess_images(images[counter:counter + b_s], int(shape_r / 2), int(shape_c / 2))]
elif net.startswith("ts"):
X = [preprocess_images(images[counter:counter + b_s], shape_r, shape_c),
preprocess_images(images[counter:counter + b_s], int(shape_r / 2), int(shape_c / 2)),
preprocess_images(images[counter:counter + b_s], int(shape_r / 4), int(shape_c / 4))]
else:
X = [preprocess_images(images[counter:counter + b_s], shape_r, shape_c)]
X_train.append(X)
Y_train.append([Y, Y, Y_fix])
counter = (counter + b_s) % len(images)
if counter == 0:
break
y_dummy = np.zeros(shape=(len(X_train), 1))
folds = list(StratifiedKFold(n_splits=10, shuffle=True, random_state=1).split(X_train, y_dummy))
return folds, X_train, Y_train
def batch_generator(X, Y, batch_size = 1):
while True:
for x, y in zip(X, Y):
yield x, y
def batch_generator_test(X):
while True:
for x in X:
yield x
def create_model():
if net == 'msdensenet':
print('Compiling multiscale densenet')
m = Model(inputs=[x, x1], outputs=msdensenet([x, x1]))
elif net == 'tsdensenet':
print('Compiling multiscale(3) densenet')
m = Model(inputs=[x, x1, x2], outputs=tsdensenet([x, x1, x2]))
elif net == 'msdensenetnon':
print('Compiling multiscale densenet without dilated block')
m = Model(input=[x, x1], output=msdensenet_non([x, x1]))
elif net == 'sdensenet':
print('Compiling singlescale densenet')
m = Model(input=[x], output=sdensenet([x]))
elif net == 'msdensenet_att':
print('Compiling multiscale densenet dilated with att')
m = Model(input=[x, x1], output=msdensenet_att([x, x1]))
elif net == 'dense':
print('Compiling dense')
m = Model(input=[x], output=dense([x]))
else:
raise NotImplementedError
return m
def traning_process(path, model, batch_gen_train, nb_train, batch_gen_val, nb_val, fold, weight=None):
print(weight)
if weight is not None:
import tensorflow as tf
initepochstr = weight[weight.find(".", 9) + 1:weight.find("-")]
initepoch = int(initepochstr)
model.load_weights(path + weight)
del model
model = load_model(path + weight, custom_objects={"tf": tf, "kl_divergence": kl_divergence,
"correlation_coefficient": correlation_coefficient,
"nss": nss, "SelfAttention": SelfAttention})
print(initepoch)
print(path+weight)
else:
initepoch = 0
model.fit_generator(batch_gen_train, nb_train,
initial_epoch=initepoch,
epochs=nb_epoch,
validation_data=batch_gen_val,
validation_steps=nb_val,
callbacks=[tensorboard,
ModelCheckpoint(path + '/weights.'+net+'f' + str(fold) + '.{epoch:02d}-{val_loss:.4f}.h5',
save_best_only=True)])
if __name__ == '__main__':
if len(sys.argv) == 1:
raise NotImplementedError
else:
print("Version 1.2")
K.set_image_data_format("channels_first")
phase = sys.argv[1]
x = Input((3, shape_r, shape_c))
x1 = Input((3, shape_r / 2, shape_c / 2))
x2 = Input((3, shape_r / 4, shape_c / 4))
x_maps = Input((nb_gaussian, shape_r_gt, shape_c_gt))
m = 0
if phase == 'train':
path = "weight/cv/" + net
try:
weight = sys.argv[2]
except:
weight = None
if not os.path.exists(path):
os.makedirs(path)
folds, X_train, Y_train = load_data()
sum_nss = 0
sum_cc = 0
sum_kl = 0
h = 0
for j, (train_idx, val_idx) in enumerate(folds):
m = create_model()
print("Fold: ", j)
m.output_names = ['output_1', 'output_2', 'output_3']
tensorboard = TensorBoard(log_dir="logs/{}_{}_{}".format(net, j, time()))
m.compile(RMSprop(lr=1e-4), loss=[kl_divergence, correlation_coefficient, nss], metrics={'output_1': kl_divergence, 'output_2': correlation_coefficient,'output_3': nss})
X_train_cv = [X_train[i] for i in train_idx]
y_train_cv = [Y_train[i] for i in train_idx]
X_valid_cv = [X_train[i] for i in val_idx]
y_valid_cv = [Y_train[i] for i in val_idx]
print("Number of train image ", len(X_train_cv))
print("Number of validation image ", len(X_valid_cv))
traning_process(path, m, batch_generator(X_train_cv, y_train_cv), len(X_train_cv),
batch_generator(X_valid_cv, y_valid_cv), len(X_valid_cv), j, weight=weight)
weight=None
elif phase == "test":
# Output Folder Path
output_folder = "pred/" + net + '/'
if not os.path.exists(output_folder):
os.makedirs(output_folder)
path_test = sys.argv[3]
file_names = [f for f in os.listdir(path_test) if f.endswith(('.jpg', '.jpeg', '.png'))]
file_names.sort()
nb_imgs_test = len(file_names)
m = create_model()
if nb_imgs_test % b_s != 0:
print("The number of test images should be a multiple of the batch size. Please change your batch size in config.py accordingly.")
exit()
print("Loading weights")
weight_path = sys.argv[2]
m.load_weights(weight_path)
print("Predicting saliency maps for " + path_test)
predictions = m.predict_generator(generator_test(b_s=b_s, imgs_test_path=path_test), nb_imgs_test)[0]
for pred, name in zip(predictions, file_names):
original_image = cv2.imread(path_test + name, 0)
res = postprocess_predictions(pred[0], original_image.shape[0], original_image.shape[1])
cv2.imwrite(output_folder + '%s' % name, res.astype(int))
elif phase == 'foldcal':
folds, X_train, Y_train = load_data()
path = "weight/cv/" + net + "/result"
f = open('doc/'+net+'_salicon10f.csv', 'a')
sum_aucjud = 0
sum_sim = 0
sum_emd = 0
sum_aucbor = 0
sum_sauc = 0
sum_nss = 0
sum_cc = 0
sum_kl = 0
m = create_model()
m.compile(RMSprop(lr=1e-4), loss=[kl_divergence, correlation_coefficient, nss])
smap = cv2.imread("data/shuffle_map.png", 0)
smap = cv2.resize(smap, (640, 480))
for j, (train_idx, val_idx) in enumerate(folds):
print("Fold: ", j)
X_train_cv = [X_train[i] for i in train_idx]
y_train_cv = [Y_train[i] for i in train_idx]
X_valid_cv = [X_train[i] for i in val_idx]
y_valid_cv = [Y_train[i] for i in val_idx]
nb_val = len(X_valid_cv)
lastest_file = glob.glob(path + '/weights.' + net + 'f' + str(j) + '*.*')
if not lastest_file:
print("not found")
continue
lastest_file = max(lastest_file, key=os.path.getctime)
print(lastest_file)
m.load_weights(lastest_file)
predictions = m.predict_generator(batch_generator_test(X_valid_cv), nb_val)[0]
nss_tmp = 0
cc_tmp = 0
kl_tmp = 0
emd_tmp = 0
aucjud_tmp = 0
sim_tmp = 0
aucbor_tmp = 0
sauc_tmp = 0
for pred, gt in zip(predictions, y_valid_cv):
res = postprocess_predictions(pred[0], shape_r_out, shape_c_out)
res = res/255
aucjud_tmp += auc_judd(res, gt[2][0, 0])
sim_tmp += similarity(res, gt[0][0, 0])
aucbor_tmp += auc_borji(res, gt[2][0, 0])
nss_tmp += nss_metric(gt[2][0, 0], res)
cc_tmp += cc(gt[0][0, 0], res)
kl_tmp += kldiv(gt[0][0, 0], res)
emdgt = gt[0][0, 0]*255
emdres = res*255
emd_tmp += emd_samples(emdgt.flatten(), emdres.flatten(), bins=255)
sauc_tmp += auc_shuff(res, gt[2][0, 0], smap)
print(emd_tmp/nb_val)
sum_nss += nss_tmp / nb_val
sum_cc += cc_tmp / nb_val
sum_kl += kl_tmp / nb_val
sum_emd += emd_tmp / nb_val
sum_aucjud += aucjud_tmp / nb_val
sum_sim += sim_tmp / nb_val
sum_aucbor += aucbor_tmp / nb_val
sum_sauc += sauc_tmp / nb_val
f.write("{},{},{},{},{},{},{},{}\n".format(aucjud_tmp / nb_val, sim_tmp / nb_val, emd_tmp / nb_val,
aucbor_tmp / nb_val, sauc_tmp / nb_val, cc_tmp / nb_val, nss_tmp / nb_val,
kl_tmp / nb_val))
f.write("{},{},{},{},{},{},{},{}\n".format(sum_aucjud/10, sum_sim/10, sum_emd/10,
sum_aucbor/10, sum_sauc/10, sum_cc/10, sum_nss/10, sum_kl/10))
f.close()
| [
"keras.models.load_model",
"numpy.random.seed",
"os.makedirs",
"os.path.exists",
"tensorflow.set_random_seed",
"time.time",
"sklearn.model_selection.StratifiedKFold",
"keras.optimizers.RMSprop",
"os.listdir"
] | [((558, 565), 'numpy.random.seed', 'seed', (['(1)'], {}), '(1)\n', (562, 565), False, 'from numpy.random import seed\n'), ((605, 623), 'tensorflow.set_random_seed', 'set_random_seed', (['(2)'], {}), '(2)\n', (620, 623), False, 'from tensorflow import set_random_seed\n'), ((4757, 4945), 'keras.models.load_model', 'load_model', (['(path + weight)'], {'custom_objects': "{'tf': tf, 'kl_divergence': kl_divergence, 'correlation_coefficient':\n correlation_coefficient, 'nss': nss, 'SelfAttention': SelfAttention}"}), "(path + weight, custom_objects={'tf': tf, 'kl_divergence':\n kl_divergence, 'correlation_coefficient': correlation_coefficient,\n 'nss': nss, 'SelfAttention': SelfAttention})\n", (4767, 4945), False, 'from keras.models import load_model\n'), ((763, 789), 'os.listdir', 'os.listdir', (['imgs_test_path'], {}), '(imgs_test_path)\n', (773, 789), False, 'import os\n'), ((1715, 1742), 'os.listdir', 'os.listdir', (['imgs_train_path'], {}), '(imgs_train_path)\n', (1725, 1742), False, 'import os\n'), ((1826, 1853), 'os.listdir', 'os.listdir', (['maps_train_path'], {}), '(maps_train_path)\n', (1836, 1853), False, 'import os\n'), ((1937, 1964), 'os.listdir', 'os.listdir', (['fixs_train_path'], {}), '(fixs_train_path)\n', (1947, 1964), False, 'import os\n'), ((3135, 3193), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(10)', 'shuffle': '(True)', 'random_state': '(1)'}), '(n_splits=10, shuffle=True, random_state=1)\n', (3150, 3193), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((6233, 6253), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6247, 6253), False, 'import os\n'), ((6271, 6288), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (6282, 6288), False, 'import os\n'), ((6747, 6765), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (6754, 6765), False, 'from keras.optimizers import RMSprop\n'), ((7649, 7678), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (7663, 7678), False, 'import os\n'), ((7696, 7722), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (7707, 7722), False, 'import os\n'), ((7796, 7817), 'os.listdir', 'os.listdir', (['path_test'], {}), '(path_test)\n', (7806, 7817), False, 'import os\n'), ((9213, 9231), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (9220, 9231), False, 'from keras.optimizers import RMSprop\n'), ((6711, 6717), 'time.time', 'time', ([], {}), '()\n', (6715, 6717), False, 'from time import time\n')] |
import unittest
import numpy as np
import spdivik.score
from spdivik.distance import ScipyDistance, KnownMetric
class TestDunn(unittest.TestCase):
def test_computes_inter_to_intracluster_distances_rate(self):
data = np.array([[1], [3], [4], [6]])
centroids = np.array([[2], [5]])
labels = np.array([1, 1, 2, 2], dtype=int)
distance = ScipyDistance(KnownMetric.euclidean)
dunn = spdivik.score.dunn(data, labels, centroids, distance)
self.assertAlmostEqual(dunn, 3.)
| [
"numpy.array",
"spdivik.distance.ScipyDistance"
] | [((232, 262), 'numpy.array', 'np.array', (['[[1], [3], [4], [6]]'], {}), '([[1], [3], [4], [6]])\n', (240, 262), True, 'import numpy as np\n'), ((283, 303), 'numpy.array', 'np.array', (['[[2], [5]]'], {}), '([[2], [5]])\n', (291, 303), True, 'import numpy as np\n'), ((321, 354), 'numpy.array', 'np.array', (['[1, 1, 2, 2]'], {'dtype': 'int'}), '([1, 1, 2, 2], dtype=int)\n', (329, 354), True, 'import numpy as np\n'), ((374, 410), 'spdivik.distance.ScipyDistance', 'ScipyDistance', (['KnownMetric.euclidean'], {}), '(KnownMetric.euclidean)\n', (387, 410), False, 'from spdivik.distance import ScipyDistance, KnownMetric\n')] |
import torch
import torch.nn as nn
import numpy as np
from typing import Dict, Tuple
from yacs.config import CfgNode
class FCHead(nn.Module):
def __init__(self, cfg: CfgNode):
"""
Fully connected head for camera and betas regression.
Args:
cfg (CfgNode): Model config as yacs CfgNode.
"""
super(FCHead, self).__init__()
self.cfg = cfg
self.npose = 6 * (cfg.SMPL.NUM_BODY_JOINTS + 1)
self.layers = nn.Sequential(nn.Linear(cfg.MODEL.FLOW.CONTEXT_FEATURES,
cfg.MODEL.FC_HEAD.NUM_FEATURES),
nn.ReLU(inplace=False),
nn.Linear(cfg.MODEL.FC_HEAD.NUM_FEATURES, 13))
nn.init.xavier_uniform_(self.layers[2].weight, gain=0.02)
mean_params = np.load(cfg.SMPL.MEAN_PARAMS)
init_cam = torch.from_numpy(mean_params['cam'].astype(np.float32))[None, None]
init_betas = torch.from_numpy(mean_params['shape'].astype(np.float32))[None, None]
self.register_buffer('init_cam', init_cam)
self.register_buffer('init_betas', init_betas)
def forward(self, smpl_params: Dict, feats: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Run forward pass.
Args:
smpl_params (Dict): Dictionary containing predicted SMPL parameters.
feats (torch.Tensor): Tensor of shape (N, C) containing the features computed by the backbone.
Returns:
pred_betas (torch.Tensor): Predicted SMPL betas.
pred_cam (torch.Tensor): Predicted camera parameters.
"""
batch_size = feats.shape[0]
num_samples = smpl_params['body_pose'].shape[1]
offset = self.layers(feats).reshape(batch_size, 1, 13).repeat(1, num_samples, 1)
betas_offset = offset[:, :, :10]
cam_offset = offset[:, :, 10:]
pred_cam = cam_offset + self.init_cam
pred_betas = betas_offset + self.init_betas
return pred_betas, pred_cam
| [
"torch.nn.Linear",
"torch.nn.init.xavier_uniform_",
"torch.nn.ReLU",
"numpy.load"
] | [((786, 843), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.layers[2].weight'], {'gain': '(0.02)'}), '(self.layers[2].weight, gain=0.02)\n', (809, 843), True, 'import torch.nn as nn\n'), ((867, 896), 'numpy.load', 'np.load', (['cfg.SMPL.MEAN_PARAMS'], {}), '(cfg.SMPL.MEAN_PARAMS)\n', (874, 896), True, 'import numpy as np\n'), ((493, 567), 'torch.nn.Linear', 'nn.Linear', (['cfg.MODEL.FLOW.CONTEXT_FEATURES', 'cfg.MODEL.FC_HEAD.NUM_FEATURES'], {}), '(cfg.MODEL.FLOW.CONTEXT_FEATURES, cfg.MODEL.FC_HEAD.NUM_FEATURES)\n', (502, 567), True, 'import torch.nn as nn\n'), ((661, 683), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (668, 683), True, 'import torch.nn as nn\n'), ((731, 776), 'torch.nn.Linear', 'nn.Linear', (['cfg.MODEL.FC_HEAD.NUM_FEATURES', '(13)'], {}), '(cfg.MODEL.FC_HEAD.NUM_FEATURES, 13)\n', (740, 776), True, 'import torch.nn as nn\n')] |
import numpy as np
def make_continuous_copy(alpha):
alpha = (alpha + np.pi) % (2.0 * np.pi) - np.pi
continuous_x = np.zeros_like(alpha)
continuous_x[0] = alpha[0]
for i in range(1, len(alpha)):
if not (np.sign(alpha[i]) == np.sign(alpha[i - 1])) and np.abs(alpha[i]) > np.pi / 2:
continuous_x[i] = continuous_x[i - 1] + (
alpha[i] - alpha[i - 1]) - np.sign(
(alpha[i] - alpha[i - 1])) * 2 * np.pi
else:
continuous_x[i] = continuous_x[i - 1] + (alpha[i] - alpha[i - 1])
return continuous_x
def derivative_of(x, dt=1, radian=False):
if radian:
x = make_continuous_copy(x)
if x[~np.isnan(x)].shape[-1] < 2:
return np.zeros_like(x)
dx = np.full_like(x, np.nan)
dx[~np.isnan(x)] = np.gradient(x[~np.isnan(x)], dt)
return dx
def min_angle_dist(a, b):
c = a - b
if np.abs(c) >= np.pi:
c = c + 2 * np.pi if c < 0 else c - 2 * np.pi
return c
def interpolate_angle(x):
x = x.copy()
for i, a in enumerate(x):
if np.isnan(a):
exist_next = False
for j in range(i, len(x)):
if not np.isnan(x[j]):
exist_next = True
break
if i == 0:
x[i] = x[j]
continue
last = x[i - 1]
if exist_next:
next = x[j]
x[i] = last + (min_angle_dist(next, last)) / (j - i + 1)
x[i] = (x[i] + np.pi) % (2 * np.pi) - np.pi
else:
x[i] = last
return x | [
"numpy.full_like",
"numpy.zeros_like",
"numpy.abs",
"numpy.isnan",
"numpy.sign"
] | [((125, 145), 'numpy.zeros_like', 'np.zeros_like', (['alpha'], {}), '(alpha)\n', (138, 145), True, 'import numpy as np\n'), ((764, 787), 'numpy.full_like', 'np.full_like', (['x', 'np.nan'], {}), '(x, np.nan)\n', (776, 787), True, 'import numpy as np\n'), ((737, 753), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (750, 753), True, 'import numpy as np\n'), ((908, 917), 'numpy.abs', 'np.abs', (['c'], {}), '(c)\n', (914, 917), True, 'import numpy as np\n'), ((1081, 1092), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (1089, 1092), True, 'import numpy as np\n'), ((796, 807), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (804, 807), True, 'import numpy as np\n'), ((276, 292), 'numpy.abs', 'np.abs', (['alpha[i]'], {}), '(alpha[i])\n', (282, 292), True, 'import numpy as np\n'), ((826, 837), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (834, 837), True, 'import numpy as np\n'), ((228, 245), 'numpy.sign', 'np.sign', (['alpha[i]'], {}), '(alpha[i])\n', (235, 245), True, 'import numpy as np\n'), ((249, 270), 'numpy.sign', 'np.sign', (['alpha[i - 1]'], {}), '(alpha[i - 1])\n', (256, 270), True, 'import numpy as np\n'), ((1187, 1201), 'numpy.isnan', 'np.isnan', (['x[j]'], {}), '(x[j])\n', (1195, 1201), True, 'import numpy as np\n'), ((407, 439), 'numpy.sign', 'np.sign', (['(alpha[i] - alpha[i - 1])'], {}), '(alpha[i] - alpha[i - 1])\n', (414, 439), True, 'import numpy as np\n'), ((694, 705), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (702, 705), True, 'import numpy as np\n')] |
##### file path
# input
path_df_D = "tianchi_fresh_comp_train_user.csv"
path_df_part_1 = "df_part_1.csv"
path_df_part_2 = "df_part_2.csv"
path_df_part_3 = "df_part_3.csv"
path_df_part_1_tar = "df_part_1_tar.csv"
path_df_part_2_tar = "df_part_2_tar.csv"
path_df_part_1_uic_label = "df_part_1_uic_label.csv"
path_df_part_2_uic_label = "df_part_2_uic_label.csv"
path_df_part_3_uic = "df_part_3_uic.csv"
# output
path_df_part_1_U = "df_part_1_U.csv"
path_df_part_1_I = "df_part_1_I.csv"
path_df_part_1_C = "df_part_1_C.csv"
path_df_part_1_IC = "df_part_1_IC.csv"
path_df_part_1_UI = "df_part_1_UI.csv"
path_df_part_1_UC = "df_part_1_UC.csv"
path_df_part_2_U = "df_part_2_U.csv"
path_df_part_2_I = "df_part_2_I.csv"
path_df_part_2_C = "df_part_2_C.csv"
path_df_part_2_IC = "df_part_2_IC.csv"
path_df_part_2_UI = "df_part_2_UI.csv"
path_df_part_2_UC = "df_part_2_UC.csv"
path_df_part_3_U = "df_part_3_U.csv"
path_df_part_3_I = "df_part_3_I.csv"
path_df_part_3_C = "df_part_3_C.csv"
path_df_part_3_IC = "df_part_3_IC.csv"
path_df_part_3_UI = "df_part_3_UI.csv"
path_df_part_3_UC = "df_part_3_UC.csv"
import pandas as pd
import numpy as np
##========================================================##
##======================== Part 3 ========================##
##========================================================##
###########################################
'''Step 1.1 feature data set U of df_part_3
(1)
u_b1_count_in_6
u_b2_count_in_6
u_b3_count_in_6
u_b4_count_in_6
u_b_count_in_6
(2)
u_b1_count_in_3
u_b2_count_in_3
u_b3_count_in_3
u_b4_count_in_3
u_b_count_in_3
(2)
u_b1_count_in_1
u_b2_count_in_1
u_b3_count_in_1
u_b4_count_in_1
u_b_count_in_1
(3)
u_b4_rate (in_6)
u_b4_diff_hours (in_6)
'''
# loading data
path_df = open(path_df_part_3, 'r')
try:
df_part_3 = pd.read_csv(path_df, index_col=False, parse_dates=[0])
df_part_3.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
finally:
path_df.close()
# u_b_count_in_6
df_part_3['cumcount'] = df_part_3.groupby(['user_id', 'behavior_type']).cumcount()
df_part_3_u_b_count_in_6 = df_part_3.drop_duplicates(['user_id', 'behavior_type'], 'last')[
['user_id', 'behavior_type', 'cumcount']]
df_part_3_u_b_count_in_6 = pd.get_dummies(df_part_3_u_b_count_in_6['behavior_type']).join(
df_part_3_u_b_count_in_6[['user_id', 'cumcount']])
df_part_3_u_b_count_in_6.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_u_b_count_in_6['u_b1_count_in_6'] = df_part_3_u_b_count_in_6['behavior_type_1'] * (
df_part_3_u_b_count_in_6['cumcount'] + 1)
df_part_3_u_b_count_in_6['u_b2_count_in_6'] = df_part_3_u_b_count_in_6['behavior_type_2'] * (
df_part_3_u_b_count_in_6['cumcount'] + 1)
df_part_3_u_b_count_in_6['u_b3_count_in_6'] = df_part_3_u_b_count_in_6['behavior_type_3'] * (
df_part_3_u_b_count_in_6['cumcount'] + 1)
df_part_3_u_b_count_in_6['u_b4_count_in_6'] = df_part_3_u_b_count_in_6['behavior_type_4'] * (
df_part_3_u_b_count_in_6['cumcount'] + 1)
df_part_3_u_b_count_in_6 = df_part_3_u_b_count_in_6.groupby('user_id').agg({'u_b1_count_in_6': np.sum,
'u_b2_count_in_6': np.sum,
'u_b3_count_in_6': np.sum,
'u_b4_count_in_6': np.sum})
df_part_3_u_b_count_in_6.reset_index(inplace=True)
df_part_3_u_b_count_in_6['u_b_count_in_6'] = df_part_3_u_b_count_in_6[['u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6']].apply(lambda x: x.sum(),
axis=1)
# u_b_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')]
df_part_3_in_3['cumcount'] = df_part_3_in_3.groupby(['user_id', 'behavior_type']).cumcount()
df_part_3_u_b_count_in_3 = df_part_3.drop_duplicates(['user_id', 'behavior_type'], 'last')[
['user_id', 'behavior_type', 'cumcount']]
df_part_3_u_b_count_in_3 = pd.get_dummies(df_part_3_u_b_count_in_3['behavior_type']).join(
df_part_3_u_b_count_in_3[['user_id', 'cumcount']])
df_part_3_u_b_count_in_3.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_u_b_count_in_3['u_b1_count_in_3'] = df_part_3_u_b_count_in_3['behavior_type_1'] * (
df_part_3_u_b_count_in_3['cumcount'] + 1)
df_part_3_u_b_count_in_3['u_b2_count_in_3'] = df_part_3_u_b_count_in_3['behavior_type_2'] * (
df_part_3_u_b_count_in_3['cumcount'] + 1)
df_part_3_u_b_count_in_3['u_b3_count_in_3'] = df_part_3_u_b_count_in_3['behavior_type_3'] * (
df_part_3_u_b_count_in_3['cumcount'] + 1)
df_part_3_u_b_count_in_3['u_b4_count_in_3'] = df_part_3_u_b_count_in_3['behavior_type_4'] * (
df_part_3_u_b_count_in_3['cumcount'] + 1)
df_part_3_u_b_count_in_3 = df_part_3_u_b_count_in_3.groupby('user_id').agg({'u_b1_count_in_3': np.sum,
'u_b2_count_in_3': np.sum,
'u_b3_count_in_3': np.sum,
'u_b4_count_in_3': np.sum})
df_part_3_u_b_count_in_3.reset_index(inplace=True)
df_part_3_u_b_count_in_3['u_b_count_in_3'] = df_part_3_u_b_count_in_3[['u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3']].apply(lambda x: x.sum(),
axis=1)
# u_b_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')]
df_part_3_in_1['cumcount'] = df_part_3_in_1.groupby(['user_id', 'behavior_type']).cumcount()
df_part_3_u_b_count_in_1 = df_part_3_in_1.drop_duplicates(['user_id', 'behavior_type'], 'last')[
['user_id', 'behavior_type', 'cumcount']]
df_part_3_u_b_count_in_1 = pd.get_dummies(df_part_3_u_b_count_in_1['behavior_type']).join(
df_part_3_u_b_count_in_1[['user_id', 'cumcount']])
df_part_3_u_b_count_in_1.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_u_b_count_in_1['u_b1_count_in_1'] = df_part_3_u_b_count_in_1['behavior_type_1'] * (
df_part_3_u_b_count_in_1['cumcount'] + 1)
df_part_3_u_b_count_in_1['u_b2_count_in_1'] = df_part_3_u_b_count_in_1['behavior_type_2'] * (
df_part_3_u_b_count_in_1['cumcount'] + 1)
df_part_3_u_b_count_in_1['u_b3_count_in_1'] = df_part_3_u_b_count_in_1['behavior_type_3'] * (
df_part_3_u_b_count_in_1['cumcount'] + 1)
df_part_3_u_b_count_in_1['u_b4_count_in_1'] = df_part_3_u_b_count_in_1['behavior_type_4'] * (
df_part_3_u_b_count_in_1['cumcount'] + 1)
df_part_3_u_b_count_in_1 = df_part_3_u_b_count_in_1.groupby('user_id').agg({'u_b1_count_in_1': np.sum,
'u_b2_count_in_1': np.sum,
'u_b3_count_in_1': np.sum,
'u_b4_count_in_1': np.sum})
df_part_3_u_b_count_in_1.reset_index(inplace=True)
df_part_3_u_b_count_in_1['u_b_count_in_1'] = df_part_3_u_b_count_in_1[['u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1']].apply(lambda x: x.sum(),
axis=1)
# merge the result of count_in_6, count_in_3, count_in_1
df_part_3_u_b_count = pd.merge(df_part_3_u_b_count_in_6,
df_part_3_u_b_count_in_3, on=['user_id'], how='left').fillna(0)
df_part_3_u_b_count = pd.merge(df_part_3_u_b_count,
df_part_3_u_b_count_in_1, on=['user_id'], how='left').fillna(0)
df_part_3_u_b_count[['u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6',
'u_b_count_in_6',
'u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3',
'u_b_count_in_3',
'u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1',
'u_b_count_in_1']] = df_part_3_u_b_count[['u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6',
'u_b_count_in_6',
'u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3',
'u_b_count_in_3',
'u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1',
'u_b_count_in_1']].astype(int)
# u_b4_rate
df_part_3_u_b_count['u_b4_rate'] = df_part_3_u_b_count['u_b4_count_in_6'] / df_part_3_u_b_count['u_b_count_in_6']
# u_b4_diff_time
df_part_3 = df_part_3.sort_values(by=['user_id', 'time'])
df_part_3_u_b4_time = df_part_3[df_part_3['behavior_type'] == 4].drop_duplicates(['user_id'], 'first')[
['user_id', 'time']]
df_part_3_u_b4_time.columns = ['user_id', 'b4_first_time']
df_part_3_u_b_time = df_part_3.drop_duplicates(['user_id'], 'first')[['user_id', 'time']]
df_part_3_u_b_time.columns = ['user_id', 'b_first_time']
df_part_3_u_b_b4_time = pd.merge(df_part_3_u_b_time, df_part_3_u_b4_time, on=['user_id'])
df_part_3_u_b_b4_time['u_b4_diff_time'] = df_part_3_u_b_b4_time['b4_first_time'] - df_part_3_u_b_b4_time['b_first_time']
df_part_3_u_b_b4_time = df_part_3_u_b_b4_time[['user_id', 'u_b4_diff_time']]
df_part_3_u_b_b4_time['u_b4_diff_hours'] = df_part_3_u_b_b4_time['u_b4_diff_time'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
# generating feature set U
f_U_part_3 = pd.merge(df_part_3_u_b_count,
df_part_3_u_b_b4_time,
on=['user_id'], how='left')[['user_id',
'u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6',
'u_b_count_in_6',
'u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3',
'u_b_count_in_3',
'u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1',
'u_b_count_in_1',
'u_b4_rate',
'u_b4_diff_hours']]
# write to csv file
f_U_part_3 = f_U_part_3.round({'u_b4_rate': 3})
f_U_part_3.to_csv(path_df_part_3_U, index=False)
###########################################
'''Step 1.2 feature data set I of df_part_3
(1)
i_u_count_in_6
i_u_count_in_3
i_u_count_in_1
(2)
i_b1_count_in_6
i_b2_count_in_6
i_b3_count_in_6
i_b4_count_in_6
i_b_count_in_6
i_b1_count_in_3
i_b2_count_in_3
i_b3_count_in_3
i_b4_count_in_3
i_b_count_in_3
i_b1_count_in_1
i_b2_count_in_1
i_b3_count_in_1
i_b4_count_in_1
i_b_count_in_1
(3)
i_b4_rate (in_6)
i_b4_diff_hours (in_6)
'''
# loading data
path_df = open(path_df_part_3, 'r')
try:
df_part_3 = pd.read_csv(path_df, index_col=False, parse_dates=[0])
df_part_3.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
finally:
path_df.close()
# i_u_count_in_6
df_part_3_in_6 = df_part_3.drop_duplicates(['item_id', 'user_id'])
df_part_3_in_6['i_u_count_in_6'] = df_part_3_in_6.groupby('item_id').cumcount() + 1
df_part_3_i_u_count_in_6 = df_part_3_in_6.drop_duplicates(['item_id'], 'last')[['item_id', 'i_u_count_in_6']]
# i_u_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')].drop_duplicates(['item_id', 'user_id'])
df_part_3_in_3['i_u_count_in_3'] = df_part_3_in_3.groupby('item_id').cumcount() + 1
df_part_3_i_u_count_in_3 = df_part_3_in_3.drop_duplicates(['item_id'], 'last')[['item_id', 'i_u_count_in_3']]
# i_u_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')].drop_duplicates(['item_id', 'user_id'])
df_part_3_in_1['i_u_count_in_1'] = df_part_3_in_1.groupby('item_id').cumcount() + 1
df_part_3_i_u_count_in_1 = df_part_3_in_1.drop_duplicates(['item_id'], 'last')[['item_id', 'i_u_count_in_1']]
# merge for generation of i_u_count
df_part_3_i_u_count = pd.merge(df_part_3_i_u_count_in_6,
df_part_3_i_u_count_in_3,
on=['item_id'], how='left').fillna(0)
df_part_3_i_u_count = pd.merge(df_part_3_i_u_count,
df_part_3_i_u_count_in_1,
on=['item_id'], how='left').fillna(0)
df_part_3_i_u_count[['i_u_count_in_6',
'i_u_count_in_3',
'i_u_count_in_1']] = df_part_3_i_u_count[['i_u_count_in_6',
'i_u_count_in_3',
'i_u_count_in_1']].astype(int)
# i_b_count_in_6
df_part_3['cumcount'] = df_part_3.groupby(['item_id', 'behavior_type']).cumcount()
df_part_3_i_b_count_in_6 = df_part_3.drop_duplicates(['item_id', 'behavior_type'], 'last')[
['item_id', 'behavior_type', 'cumcount']]
df_part_3_i_b_count_in_6 = pd.get_dummies(df_part_3_i_b_count_in_6['behavior_type']).join(
df_part_3_i_b_count_in_6[['item_id', 'cumcount']])
df_part_3_i_b_count_in_6.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_i_b_count_in_6['i_b1_count_in_6'] = df_part_3_i_b_count_in_6['behavior_type_1'] * (
df_part_3_i_b_count_in_6['cumcount'] + 1)
df_part_3_i_b_count_in_6['i_b2_count_in_6'] = df_part_3_i_b_count_in_6['behavior_type_2'] * (
df_part_3_i_b_count_in_6['cumcount'] + 1)
df_part_3_i_b_count_in_6['i_b3_count_in_6'] = df_part_3_i_b_count_in_6['behavior_type_3'] * (
df_part_3_i_b_count_in_6['cumcount'] + 1)
df_part_3_i_b_count_in_6['i_b4_count_in_6'] = df_part_3_i_b_count_in_6['behavior_type_4'] * (
df_part_3_i_b_count_in_6['cumcount'] + 1)
df_part_3_i_b_count_in_6 = df_part_3_i_b_count_in_6[['item_id',
'i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6']]
df_part_3_i_b_count_in_6 = df_part_3_i_b_count_in_6.groupby('item_id').agg({'i_b1_count_in_6': np.sum,
'i_b2_count_in_6': np.sum,
'i_b3_count_in_6': np.sum,
'i_b4_count_in_6': np.sum})
df_part_3_i_b_count_in_6.reset_index(inplace=True)
df_part_3_i_b_count_in_6['i_b_count_in_6'] = df_part_3_i_b_count_in_6['i_b1_count_in_6'] + \
df_part_3_i_b_count_in_6['i_b2_count_in_6'] + \
df_part_3_i_b_count_in_6['i_b3_count_in_6'] + \
df_part_3_i_b_count_in_6['i_b4_count_in_6']
# i_b_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')]
df_part_3_in_3['cumcount'] = df_part_3_in_3.groupby(['item_id', 'behavior_type']).cumcount()
df_part_3_i_b_count_in_3 = df_part_3.drop_duplicates(['item_id', 'behavior_type'], 'last')[
['item_id', 'behavior_type', 'cumcount']]
df_part_3_i_b_count_in_3 = pd.get_dummies(df_part_3_i_b_count_in_3['behavior_type']).join(
df_part_3_i_b_count_in_3[['item_id', 'cumcount']])
df_part_3_i_b_count_in_3.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_i_b_count_in_3['i_b1_count_in_3'] = df_part_3_i_b_count_in_3['behavior_type_1'] * (
df_part_3_i_b_count_in_3['cumcount'] + 1)
df_part_3_i_b_count_in_3['i_b2_count_in_3'] = df_part_3_i_b_count_in_3['behavior_type_2'] * (
df_part_3_i_b_count_in_3['cumcount'] + 1)
df_part_3_i_b_count_in_3['i_b3_count_in_3'] = df_part_3_i_b_count_in_3['behavior_type_3'] * (
df_part_3_i_b_count_in_3['cumcount'] + 1)
df_part_3_i_b_count_in_3['i_b4_count_in_3'] = df_part_3_i_b_count_in_3['behavior_type_4'] * (
df_part_3_i_b_count_in_3['cumcount'] + 1)
df_part_3_i_b_count_in_3 = df_part_3_i_b_count_in_3[['item_id',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3']]
df_part_3_i_b_count_in_3 = df_part_3_i_b_count_in_3.groupby('item_id').agg({'i_b1_count_in_3': np.sum,
'i_b2_count_in_3': np.sum,
'i_b3_count_in_3': np.sum,
'i_b4_count_in_3': np.sum})
df_part_3_i_b_count_in_3.reset_index(inplace=True)
df_part_3_i_b_count_in_3['i_b_count_in_3'] = df_part_3_i_b_count_in_3['i_b1_count_in_3'] + \
df_part_3_i_b_count_in_3['i_b2_count_in_3'] + \
df_part_3_i_b_count_in_3['i_b3_count_in_3'] + \
df_part_3_i_b_count_in_3['i_b4_count_in_3']
# i_b_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')]
df_part_3_in_1['cumcount'] = df_part_3_in_1.groupby(['item_id', 'behavior_type']).cumcount()
df_part_3_i_b_count_in_1 = df_part_3_in_1.drop_duplicates(['item_id', 'behavior_type'], 'last')[
['item_id', 'behavior_type', 'cumcount']]
df_part_3_i_b_count_in_1 = pd.get_dummies(df_part_3_i_b_count_in_1['behavior_type']).join(
df_part_3_i_b_count_in_1[['item_id', 'cumcount']])
df_part_3_i_b_count_in_1.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_i_b_count_in_1['i_b1_count_in_1'] = df_part_3_i_b_count_in_1['behavior_type_1'] * (
df_part_3_i_b_count_in_1['cumcount'] + 1)
df_part_3_i_b_count_in_1['i_b2_count_in_1'] = df_part_3_i_b_count_in_1['behavior_type_2'] * (
df_part_3_i_b_count_in_1['cumcount'] + 1)
df_part_3_i_b_count_in_1['i_b3_count_in_1'] = df_part_3_i_b_count_in_1['behavior_type_3'] * (
df_part_3_i_b_count_in_1['cumcount'] + 1)
df_part_3_i_b_count_in_1['i_b4_count_in_1'] = df_part_3_i_b_count_in_1['behavior_type_4'] * (
df_part_3_i_b_count_in_1['cumcount'] + 1)
df_part_3_i_b_count_in_1 = df_part_3_i_b_count_in_1[['item_id',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1']]
df_part_3_i_b_count_in_1 = df_part_3_i_b_count_in_1.groupby('item_id').agg({'i_b1_count_in_1': np.sum,
'i_b2_count_in_1': np.sum,
'i_b3_count_in_1': np.sum,
'i_b4_count_in_1': np.sum})
df_part_3_i_b_count_in_1.reset_index(inplace=True)
df_part_3_i_b_count_in_1['i_b_count_in_1'] = df_part_3_i_b_count_in_1['i_b1_count_in_1'] + \
df_part_3_i_b_count_in_1['i_b2_count_in_1'] + \
df_part_3_i_b_count_in_1['i_b3_count_in_1'] + \
df_part_3_i_b_count_in_1['i_b4_count_in_1']
# merge for generation of i_b_count
df_part_3_i_b_count = pd.merge(df_part_3_i_b_count_in_6,
df_part_3_i_b_count_in_3,
on=['item_id'], how='left').fillna(0)
df_part_3_i_b_count = pd.merge(df_part_3_i_b_count,
df_part_3_i_b_count_in_1,
on=['item_id'], how='left').fillna(0)
df_part_3_i_b_count[['i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6',
'i_b_count_in_6',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3',
'i_b_count_in_3',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1',
'i_b_count_in_1']] = df_part_3_i_b_count[['i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6',
'i_b_count_in_6',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3',
'i_b_count_in_3',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1',
'i_b_count_in_1']].astype(int)
# i_b4_rate
df_part_3_i_b_count['i_b4_rate'] = df_part_3_i_b_count['i_b4_count_in_6'] / df_part_3_i_b_count['i_b_count_in_6']
# i_b4_diff_time
df_part_3 = df_part_3.sort_values(by=['item_id', 'time'])
df_part_3_i_b4_time = df_part_3[df_part_3['behavior_type'] == 4].drop_duplicates(['item_id'], 'first')[
['item_id', 'time']]
df_part_3_i_b4_time.columns = ['item_id', 'b4_first_time']
df_part_3_i_b_time = df_part_3.drop_duplicates(['item_id'], 'first')[['item_id', 'time']]
df_part_3_i_b_time.columns = ['item_id', 'b_first_time']
df_part_3_i_b_b4_time = pd.merge(df_part_3_i_b_time, df_part_3_i_b4_time, on=['item_id'])
df_part_3_i_b_b4_time['i_b4_diff_time'] = df_part_3_i_b_b4_time['b4_first_time'] - df_part_3_i_b_b4_time['b_first_time']
df_part_3_i_b_b4_time['i_b4_diff_hours'] = df_part_3_i_b_b4_time['i_b4_diff_time'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
df_part_3_i_b_b4_time = df_part_3_i_b_b4_time[['item_id', 'i_b4_diff_hours']]
# generating feature set I
f_I_part_3 = pd.merge(df_part_3_i_b_count,
df_part_3_i_b_b4_time,
on=['item_id'], how='left')
f_I_part_3 = pd.merge(f_I_part_3,
df_part_3_i_u_count,
on=['item_id'], how='left')[['item_id',
'i_u_count_in_6',
'i_u_count_in_3',
'i_u_count_in_1',
'i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6',
'i_b_count_in_6',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3',
'i_b_count_in_3',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1',
'i_b_count_in_1',
'i_b4_rate',
'i_b4_diff_hours']]
# write to csv file
f_I_part_3 = f_I_part_3.round({'i_b4_rate': 3})
f_I_part_3.to_csv(path_df_part_3_I, index=False)
###########################################
'''Step 1.3 feature data set C of df_part_3
(1)
c_u_count_in_6
c_u_count_in_3
c_u_count_in_1
(2)
c_b1_count_in_6
c_b2_count_in_6
c_b3_count_in_6
c_b4_count_in_6
c_b_count_in_6
c_b1_count_in_3
c_b2_count_in_3
c_b3_count_in_3
c_b4_count_in_3
c_b_count_in_3
c_b1_count_in_1
c_b2_count_in_1
c_b3_count_in_1
c_b4_count_in_1
c_b_count_in_1
(3)
c_b4_rate (in_6)
c_b4_diff_hours (in_6)
'''
# loading data
path_df = open(path_df_part_3, 'r')
try:
df_part_3 = pd.read_csv(path_df, index_col=False, parse_dates=[0])
df_part_3.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
finally:
path_df.close()
# c_u_count_in_6
df_part_3_in_6 = df_part_3.drop_duplicates(['item_category', 'user_id'])
df_part_3_in_6['c_u_count_in_6'] = df_part_3_in_6.groupby('item_category').cumcount() + 1
df_part_3_c_u_count_in_6 = df_part_3_in_6.drop_duplicates(['item_category'], 'last')[
['item_category', 'c_u_count_in_6']]
# c_u_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')].drop_duplicates(
['item_category', 'user_id'])
df_part_3_in_3['c_u_count_in_3'] = df_part_3_in_3.groupby('item_category').cumcount() + 1
df_part_3_c_u_count_in_3 = df_part_3_in_3.drop_duplicates(['item_category'], 'last')[
['item_category', 'c_u_count_in_3']]
# c_u_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')].drop_duplicates(
['item_category', 'user_id'])
df_part_3_in_1['c_u_count_in_1'] = df_part_3_in_1.groupby('item_category').cumcount() + 1
df_part_3_c_u_count_in_1 = df_part_3_in_1.drop_duplicates(['item_category'], 'last')[
['item_category', 'c_u_count_in_1']]
df_part_3_c_u_count = pd.merge(df_part_3_c_u_count_in_6, df_part_3_c_u_count_in_3, on=['item_category'],
how='left').fillna(0)
df_part_3_c_u_count = pd.merge(df_part_3_c_u_count, df_part_3_c_u_count_in_1, on=['item_category'], how='left').fillna(
0)
df_part_3_c_u_count[['c_u_count_in_6',
'c_u_count_in_3',
'c_u_count_in_1']] = df_part_3_c_u_count[['c_u_count_in_6',
'c_u_count_in_3',
'c_u_count_in_1']].astype(int)
# c_b_count_in_6
df_part_3['cumcount'] = df_part_3.groupby(['item_category', 'behavior_type']).cumcount()
df_part_3_c_b_count_in_6 = df_part_3.drop_duplicates(['item_category', 'behavior_type'], 'last')[
['item_category', 'behavior_type', 'cumcount']]
df_part_3_c_b_count_in_6 = pd.get_dummies(df_part_3_c_b_count_in_6['behavior_type']).join(
df_part_3_c_b_count_in_6[['item_category', 'cumcount']])
df_part_3_c_b_count_in_6.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_c_b_count_in_6['c_b1_count_in_6'] = df_part_3_c_b_count_in_6['behavior_type_1'] * (
df_part_3_c_b_count_in_6['cumcount'] + 1)
df_part_3_c_b_count_in_6['c_b2_count_in_6'] = df_part_3_c_b_count_in_6['behavior_type_2'] * (
df_part_3_c_b_count_in_6['cumcount'] + 1)
df_part_3_c_b_count_in_6['c_b3_count_in_6'] = df_part_3_c_b_count_in_6['behavior_type_3'] * (
df_part_3_c_b_count_in_6['cumcount'] + 1)
df_part_3_c_b_count_in_6['c_b4_count_in_6'] = df_part_3_c_b_count_in_6['behavior_type_4'] * (
df_part_3_c_b_count_in_6['cumcount'] + 1)
df_part_3_c_b_count_in_6 = df_part_3_c_b_count_in_6[['item_category',
'c_b1_count_in_6',
'c_b2_count_in_6',
'c_b3_count_in_6',
'c_b4_count_in_6']]
df_part_3_c_b_count_in_6 = df_part_3_c_b_count_in_6.groupby('item_category').agg({'c_b1_count_in_6': np.sum,
'c_b2_count_in_6': np.sum,
'c_b3_count_in_6': np.sum,
'c_b4_count_in_6': np.sum})
df_part_3_c_b_count_in_6.reset_index(inplace=True)
df_part_3_c_b_count_in_6['c_b_count_in_6'] = df_part_3_c_b_count_in_6['c_b1_count_in_6'] + \
df_part_3_c_b_count_in_6['c_b2_count_in_6'] + \
df_part_3_c_b_count_in_6['c_b3_count_in_6'] + \
df_part_3_c_b_count_in_6['c_b4_count_in_6']
# c_b_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')]
df_part_3_in_3['cumcount'] = df_part_3_in_3.groupby(['item_category', 'behavior_type']).cumcount()
df_part_3_c_b_count_in_3 = df_part_3_in_3.drop_duplicates(['item_category', 'behavior_type'], 'last')[
['item_category', 'behavior_type', 'cumcount']]
df_part_3_c_b_count_in_3 = pd.get_dummies(df_part_3_c_b_count_in_3['behavior_type']).join(
df_part_3_c_b_count_in_3[['item_category', 'cumcount']])
df_part_3_c_b_count_in_3.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_c_b_count_in_3['c_b1_count_in_3'] = df_part_3_c_b_count_in_3['behavior_type_1'] * (
df_part_3_c_b_count_in_3['cumcount'] + 1)
df_part_3_c_b_count_in_3['c_b2_count_in_3'] = df_part_3_c_b_count_in_3['behavior_type_2'] * (
df_part_3_c_b_count_in_3['cumcount'] + 1)
df_part_3_c_b_count_in_3['c_b3_count_in_3'] = df_part_3_c_b_count_in_3['behavior_type_3'] * (
df_part_3_c_b_count_in_3['cumcount'] + 1)
df_part_3_c_b_count_in_3['c_b4_count_in_3'] = df_part_3_c_b_count_in_3['behavior_type_4'] * (
df_part_3_c_b_count_in_3['cumcount'] + 1)
df_part_3_c_b_count_in_3 = df_part_3_c_b_count_in_3[['item_category',
'c_b1_count_in_3',
'c_b2_count_in_3',
'c_b3_count_in_3',
'c_b4_count_in_3']]
df_part_3_c_b_count_in_3 = df_part_3_c_b_count_in_3.groupby('item_category').agg({'c_b1_count_in_3': np.sum,
'c_b2_count_in_3': np.sum,
'c_b3_count_in_3': np.sum,
'c_b4_count_in_3': np.sum})
df_part_3_c_b_count_in_3.reset_index(inplace=True)
df_part_3_c_b_count_in_3['c_b_count_in_3'] = df_part_3_c_b_count_in_3['c_b1_count_in_3'] + \
df_part_3_c_b_count_in_3['c_b2_count_in_3'] + \
df_part_3_c_b_count_in_3['c_b3_count_in_3'] + \
df_part_3_c_b_count_in_3['c_b4_count_in_3']
# c_b_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')]
df_part_3_in_1['cumcount'] = df_part_3_in_1.groupby(['item_category', 'behavior_type']).cumcount()
df_part_3_c_b_count_in_1 = df_part_3_in_1.drop_duplicates(['item_category', 'behavior_type'], 'last')[
['item_category', 'behavior_type', 'cumcount']]
df_part_3_c_b_count_in_1 = pd.get_dummies(df_part_3_c_b_count_in_1['behavior_type']).join(
df_part_3_c_b_count_in_1[['item_category', 'cumcount']])
df_part_3_c_b_count_in_1.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_c_b_count_in_1['c_b1_count_in_1'] = df_part_3_c_b_count_in_1['behavior_type_1'] * (
df_part_3_c_b_count_in_1['cumcount'] + 1)
df_part_3_c_b_count_in_1['c_b2_count_in_1'] = df_part_3_c_b_count_in_1['behavior_type_2'] * (
df_part_3_c_b_count_in_1['cumcount'] + 1)
df_part_3_c_b_count_in_1['c_b3_count_in_1'] = df_part_3_c_b_count_in_1['behavior_type_3'] * (
df_part_3_c_b_count_in_1['cumcount'] + 1)
df_part_3_c_b_count_in_1['c_b4_count_in_1'] = df_part_3_c_b_count_in_1['behavior_type_4'] * (
df_part_3_c_b_count_in_1['cumcount'] + 1)
df_part_3_c_b_count_in_1 = df_part_3_c_b_count_in_1[['item_category',
'c_b1_count_in_1',
'c_b2_count_in_1',
'c_b3_count_in_1',
'c_b4_count_in_1']]
df_part_3_c_b_count_in_1 = df_part_3_c_b_count_in_1.groupby('item_category').agg({'c_b1_count_in_1': np.sum,
'c_b2_count_in_1': np.sum,
'c_b3_count_in_1': np.sum,
'c_b4_count_in_1': np.sum})
df_part_3_c_b_count_in_1.reset_index(inplace=True)
df_part_3_c_b_count_in_1['c_b_count_in_1'] = df_part_3_c_b_count_in_1['c_b1_count_in_1'] + \
df_part_3_c_b_count_in_1['c_b2_count_in_1'] + \
df_part_3_c_b_count_in_1['c_b3_count_in_1'] + \
df_part_3_c_b_count_in_1['c_b4_count_in_1']
df_part_3_c_b_count = pd.merge(df_part_3_c_b_count_in_6, df_part_3_c_b_count_in_3, on=['item_category'],
how='left').fillna(0)
df_part_3_c_b_count = pd.merge(df_part_3_c_b_count, df_part_3_c_b_count_in_1, on=['item_category'], how='left').fillna(
0)
df_part_3_c_b_count[['c_b1_count_in_6',
'c_b2_count_in_6',
'c_b3_count_in_6',
'c_b4_count_in_6',
'c_b_count_in_6',
'c_b1_count_in_3',
'c_b2_count_in_3',
'c_b3_count_in_3',
'c_b4_count_in_3',
'c_b_count_in_3',
'c_b1_count_in_1',
'c_b2_count_in_1',
'c_b3_count_in_1',
'c_b4_count_in_1',
'c_b_count_in_1']] = df_part_3_c_b_count[['c_b1_count_in_6',
'c_b2_count_in_6',
'c_b3_count_in_6',
'c_b4_count_in_6',
'c_b_count_in_6',
'c_b1_count_in_3',
'c_b2_count_in_3',
'c_b3_count_in_3',
'c_b4_count_in_3',
'c_b_count_in_3',
'c_b1_count_in_1',
'c_b2_count_in_1',
'c_b3_count_in_1',
'c_b4_count_in_1',
'c_b_count_in_1']].astype(int)
# c_b4_rate
df_part_3_c_b_count['c_b4_rate'] = df_part_3_c_b_count['c_b4_count_in_6'] / df_part_3_c_b_count['c_b_count_in_6']
# c_b4_diff_time
df_part_3 = df_part_3.sort_values(by=['item_category', 'time'])
df_part_3_c_b4_time = df_part_3[df_part_3['behavior_type'] == 4].drop_duplicates(['item_category'], 'first')[
['item_category', 'time']]
df_part_3_c_b4_time.columns = ['item_category', 'b4_first_time']
df_part_3_c_b_time = df_part_3.drop_duplicates(['item_category'], 'first')[['item_category', 'time']]
df_part_3_c_b_time.columns = ['item_category', 'b_first_time']
df_part_3_c_b_b4_time = pd.merge(df_part_3_c_b_time, df_part_3_c_b4_time, on=['item_category'])
df_part_3_c_b_b4_time['c_b4_diff_time'] = df_part_3_c_b_b4_time['b4_first_time'] - df_part_3_c_b_b4_time['b_first_time']
df_part_3_c_b_b4_time['c_b4_diff_hours'] = df_part_3_c_b_b4_time['c_b4_diff_time'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
df_part_3_c_b_b4_time = df_part_3_c_b_b4_time[['item_category',
'c_b4_diff_hours']]
# generating feature set C
f_C_part_3 = pd.merge(df_part_3_c_u_count, df_part_3_c_b_count, on=['item_category'], how='left')
f_C_part_3 = pd.merge(f_C_part_3, df_part_3_c_b_b4_time, on=['item_category'], how='left')
f_C_part_3 = f_C_part_3.round({'c_b4_rate': 3})
# write to csv file
f_C_part_3.to_csv(path_df_part_3_C, index=False)
############################################
'''Step 1.4 feature data set IC of df_part_3
ic_u_rank_in_c (in_6)
ic_b_rank_in_c (in_6)
ic_b4_rank_in_c (in_6)
'''
# get df_part_3_i_ub_count
path_df = open(path_df_part_3_I, 'r')
try:
df_part_3_I = pd.read_csv(path_df, index_col=False)
finally:
path_df.close()
df_part_3_i_ub_count = df_part_3_I[['item_id', 'i_u_count_in_6', 'i_b_count_in_6', 'i_b4_count_in_6']]
del (df_part_3_I)
# get df_part_3_uic for merge i & c
path_df = open(path_df_part_3_uic, 'r')
try:
df_part_3_uic = pd.read_csv(path_df, index_col=False)
finally:
path_df.close()
df_part_3_ic_u_b_count = pd.merge(df_part_3_uic, df_part_3_i_ub_count, on=['item_id'], how='left').fillna(0)
df_part_3_ic_u_b_count = df_part_3_ic_u_b_count.drop_duplicates(['item_id', 'item_category'])
# ic_u_rank_in_c
df_part_3_ic_u_b_count['ic_u_rank_in_c'] = df_part_3_ic_u_b_count.groupby('item_category')['i_u_count_in_6'].rank(
method='min', ascending=False).astype('int')
# ic_b_rank_in_c
df_part_3_ic_u_b_count['ic_b_rank_in_c'] = df_part_3_ic_u_b_count.groupby('item_category')['i_b_count_in_6'].rank(
method='min', ascending=False).astype('int')
# ic_b4_rank_in_c
df_part_3_ic_u_b_count['ic_b4_rank_in_c'] = df_part_3_ic_u_b_count.groupby('item_category')['i_b4_count_in_6'].rank(
method='min', ascending=False).astype('int')
f_IC_part_3 = df_part_3_ic_u_b_count[['item_id',
'item_category',
'ic_u_rank_in_c',
'ic_b_rank_in_c',
'ic_b4_rank_in_c']]
# write to csv file
f_IC_part_3.to_csv(path_df_part_3_IC, index=False)
############################################
'''Step 1.5 feature data set UI of df_part_3
(1)
ui_b1_count_in_6
ui_b2_count_in_6
ui_b3_count_in_6
ui_b4_count_in_6
ui_b_count_in_6
ui_b1_count_in_3
ui_b2_count_in_3
ui_b3_count_in_3
ui_b4_count_in_3
ui_b_count_in_3
ui_b1_count_in_1
ui_b2_count_in_1
ui_b3_count_in_1
ui_b4_count_in_1
ui_b_count_in_1
(2)
ui_b_count_rank_in_u (in_6)
ui_b_count_rank_in_uc (in_6)
(3)
ui_b1_last_hours (in_6)
ui_b2_last_hours (in_6)
ui_b3_last_hours (in_6)
ui_b4_last_hours (in_6)
'''
path_df = open(path_df_part_3, 'r')
try:
df_part_3 = pd.read_csv(path_df, index_col=False, parse_dates=[0])
df_part_3.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
finally:
path_df.close()
# ui_b_count_in_6
df_part_3['cumcount'] = df_part_3.groupby(['user_id', 'item_id', 'behavior_type']).cumcount()
df_part_3_ui_b_count_in_6 = df_part_3.drop_duplicates(['user_id', 'item_id', 'behavior_type'], 'last')[
['user_id', 'item_id', 'behavior_type', 'cumcount']]
df_part_3_ui_b_count_in_6 = pd.get_dummies(df_part_3_ui_b_count_in_6['behavior_type']).join(
df_part_3_ui_b_count_in_6[['user_id', 'item_id', 'cumcount']])
df_part_3_ui_b_count_in_6.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_ui_b_count_in_6['ui_b1_count_in_6'] = df_part_3_ui_b_count_in_6['behavior_type_1'] * (
df_part_3_ui_b_count_in_6['cumcount'] + 1)
df_part_3_ui_b_count_in_6['ui_b2_count_in_6'] = df_part_3_ui_b_count_in_6['behavior_type_2'] * (
df_part_3_ui_b_count_in_6['cumcount'] + 1)
df_part_3_ui_b_count_in_6['ui_b3_count_in_6'] = df_part_3_ui_b_count_in_6['behavior_type_3'] * (
df_part_3_ui_b_count_in_6['cumcount'] + 1)
df_part_3_ui_b_count_in_6['ui_b4_count_in_6'] = df_part_3_ui_b_count_in_6['behavior_type_4'] * (
df_part_3_ui_b_count_in_6['cumcount'] + 1)
df_part_3_ui_b_count_in_6 = df_part_3_ui_b_count_in_6[['user_id',
'item_id',
'ui_b1_count_in_6',
'ui_b2_count_in_6',
'ui_b3_count_in_6',
'ui_b4_count_in_6']]
df_part_3_ui_b_count_in_6 = df_part_3_ui_b_count_in_6.groupby(['user_id', 'item_id']).agg({'ui_b1_count_in_6': np.sum,
'ui_b2_count_in_6': np.sum,
'ui_b3_count_in_6': np.sum,
'ui_b4_count_in_6': np.sum})
df_part_3_ui_b_count_in_6.reset_index(inplace=True)
df_part_3_ui_b_count_in_6['ui_b_count_in_6'] = df_part_3_ui_b_count_in_6['ui_b1_count_in_6'] + \
df_part_3_ui_b_count_in_6['ui_b2_count_in_6'] + \
df_part_3_ui_b_count_in_6['ui_b3_count_in_6'] + \
df_part_3_ui_b_count_in_6['ui_b4_count_in_6']
# ui_b_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')]
df_part_3_in_3['cumcount'] = df_part_3_in_3.groupby(['user_id', 'item_id', 'behavior_type']).cumcount()
df_part_3_ui_b_count_in_3 = df_part_3.drop_duplicates(['user_id', 'item_id', 'behavior_type'], 'last')[
['user_id', 'item_id', 'behavior_type', 'cumcount']]
df_part_3_ui_b_count_in_3 = pd.get_dummies(df_part_3_ui_b_count_in_3['behavior_type']).join(
df_part_3_ui_b_count_in_3[['user_id', 'item_id', 'cumcount']])
df_part_3_ui_b_count_in_3.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_ui_b_count_in_3['ui_b1_count_in_3'] = df_part_3_ui_b_count_in_3['behavior_type_1'] * (
df_part_3_ui_b_count_in_3['cumcount'] + 1)
df_part_3_ui_b_count_in_3['ui_b2_count_in_3'] = df_part_3_ui_b_count_in_3['behavior_type_2'] * (
df_part_3_ui_b_count_in_3['cumcount'] + 1)
df_part_3_ui_b_count_in_3['ui_b3_count_in_3'] = df_part_3_ui_b_count_in_3['behavior_type_3'] * (
df_part_3_ui_b_count_in_3['cumcount'] + 1)
df_part_3_ui_b_count_in_3['ui_b4_count_in_3'] = df_part_3_ui_b_count_in_3['behavior_type_4'] * (
df_part_3_ui_b_count_in_3['cumcount'] + 1)
df_part_3_ui_b_count_in_3 = df_part_3_ui_b_count_in_3[['user_id',
'item_id',
'ui_b1_count_in_3',
'ui_b2_count_in_3',
'ui_b3_count_in_3',
'ui_b4_count_in_3']]
df_part_3_ui_b_count_in_3 = df_part_3_ui_b_count_in_3.groupby(['user_id', 'item_id']).agg({'ui_b1_count_in_3': np.sum,
'ui_b2_count_in_3': np.sum,
'ui_b3_count_in_3': np.sum,
'ui_b4_count_in_3': np.sum})
df_part_3_ui_b_count_in_3.reset_index(inplace=True)
df_part_3_ui_b_count_in_3['ui_b_count_in_3'] = df_part_3_ui_b_count_in_3['ui_b1_count_in_3'] + \
df_part_3_ui_b_count_in_3['ui_b2_count_in_3'] + \
df_part_3_ui_b_count_in_3['ui_b3_count_in_3'] + \
df_part_3_ui_b_count_in_3['ui_b4_count_in_3']
# ui_b_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')]
df_part_3_in_1['cumcount'] = df_part_3_in_1.groupby(['user_id', 'item_id', 'behavior_type']).cumcount()
df_part_3_ui_b_count_in_1 = df_part_3_in_1.drop_duplicates(['user_id', 'item_id', 'behavior_type'], 'last')[
['user_id', 'item_id', 'behavior_type', 'cumcount']]
df_part_3_ui_b_count_in_1 = pd.get_dummies(df_part_3_ui_b_count_in_1['behavior_type']).join(
df_part_3_ui_b_count_in_1[['user_id', 'item_id', 'cumcount']])
df_part_3_ui_b_count_in_1.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_ui_b_count_in_1['ui_b1_count_in_1'] = df_part_3_ui_b_count_in_1['behavior_type_1'] * (
df_part_3_ui_b_count_in_1['cumcount'] + 1)
df_part_3_ui_b_count_in_1['ui_b2_count_in_1'] = df_part_3_ui_b_count_in_1['behavior_type_2'] * (
df_part_3_ui_b_count_in_1['cumcount'] + 1)
df_part_3_ui_b_count_in_1['ui_b3_count_in_1'] = df_part_3_ui_b_count_in_1['behavior_type_3'] * (
df_part_3_ui_b_count_in_1['cumcount'] + 1)
df_part_3_ui_b_count_in_1['ui_b4_count_in_1'] = df_part_3_ui_b_count_in_1['behavior_type_4'] * (
df_part_3_ui_b_count_in_1['cumcount'] + 1)
df_part_3_ui_b_count_in_1 = df_part_3_ui_b_count_in_1[['user_id',
'item_id',
'ui_b1_count_in_1',
'ui_b2_count_in_1',
'ui_b3_count_in_1',
'ui_b4_count_in_1']]
df_part_3_ui_b_count_in_1 = df_part_3_ui_b_count_in_1.groupby(['user_id', 'item_id']).agg({'ui_b1_count_in_1': np.sum,
'ui_b2_count_in_1': np.sum,
'ui_b3_count_in_1': np.sum,
'ui_b4_count_in_1': np.sum})
df_part_3_ui_b_count_in_1.reset_index(inplace=True)
df_part_3_ui_b_count_in_1['ui_b_count_in_1'] = df_part_3_ui_b_count_in_1['ui_b1_count_in_1'] + \
df_part_3_ui_b_count_in_1['ui_b2_count_in_1'] + \
df_part_3_ui_b_count_in_1['ui_b3_count_in_1'] + \
df_part_3_ui_b_count_in_1['ui_b4_count_in_1']
df_part_3_ui_b_count = pd.merge(df_part_3_ui_b_count_in_6, df_part_3_ui_b_count_in_3, on=['user_id', 'item_id'],
how='left').fillna(0)
df_part_3_ui_b_count = pd.merge(df_part_3_ui_b_count, df_part_3_ui_b_count_in_1, on=['user_id', 'item_id'],
how='left').fillna(0)
df_part_3_ui_b_count[['ui_b1_count_in_6',
'ui_b2_count_in_6',
'ui_b3_count_in_6',
'ui_b4_count_in_6',
'ui_b_count_in_6',
'ui_b1_count_in_3',
'ui_b2_count_in_3',
'ui_b3_count_in_3',
'ui_b4_count_in_3',
'ui_b_count_in_3',
'ui_b1_count_in_1',
'ui_b2_count_in_1',
'ui_b3_count_in_1',
'ui_b4_count_in_1',
'ui_b_count_in_1']] = df_part_3_ui_b_count[['ui_b1_count_in_6',
'ui_b2_count_in_6',
'ui_b3_count_in_6',
'ui_b4_count_in_6',
'ui_b_count_in_6',
'ui_b1_count_in_3',
'ui_b2_count_in_3',
'ui_b3_count_in_3',
'ui_b4_count_in_3',
'ui_b_count_in_3',
'ui_b1_count_in_1',
'ui_b2_count_in_1',
'ui_b3_count_in_1',
'ui_b4_count_in_1',
'ui_b_count_in_1']].astype(int)
# ui_b_count_rank_in_u
df_part_3_ui_b_count['ui_b_count_rank_in_u'] = df_part_3_ui_b_count.groupby(['user_id'])['ui_b_count_in_6'].rank(
method='min', ascending=False).astype('int')
# ui_b_count_rank_in_uc
path_df = open(path_df_part_3_uic, 'r')
try:
df_part_3_uic = pd.read_csv(path_df, index_col=False)
finally:
path_df.close()
df_part_3_ui_b_count = pd.merge(df_part_3_uic, df_part_3_ui_b_count, on=['user_id', 'item_id'], how='left')
df_part_3_ui_b_count['ui_b_count_rank_in_uc'] = df_part_3_ui_b_count.groupby(['user_id', 'item_category'])[
'ui_b_count_rank_in_u'].rank(method='min', ascending=True).astype('int')
# ui_b_last_time
df_part_3.sort_values(by=['user_id', 'item_id', 'behavior_type', 'time'], inplace=True)
df_part_3_ui_b_last_time = df_part_3.drop_duplicates(['user_id', 'item_id', 'behavior_type'], 'last')[
['user_id', 'item_id', 'behavior_type', 'time']]
df_part_3_ui_b_last_time['ui_b1_last_time'] = df_part_3_ui_b_last_time[df_part_3_ui_b_last_time['behavior_type'] == 1][
'time']
df_part_3_ui_b_last_time['ui_b2_last_time'] = df_part_3_ui_b_last_time[df_part_3_ui_b_last_time['behavior_type'] == 2][
'time']
df_part_3_ui_b_last_time['ui_b3_last_time'] = df_part_3_ui_b_last_time[df_part_3_ui_b_last_time['behavior_type'] == 3][
'time']
df_part_3_ui_b_last_time['ui_b4_last_time'] = df_part_3_ui_b_last_time[df_part_3_ui_b_last_time['behavior_type'] == 4][
'time']
df_part_3_ui_b_last_time.loc[df_part_3_ui_b_last_time['ui_b1_last_time'].notnull(), 'ui_b1_last_hours'] = (
pd.to_datetime('2014-12-19') - df_part_3_ui_b_last_time['ui_b1_last_time'])
df_part_3_ui_b_last_time['ui_b1_last_hours'] = \
df_part_3_ui_b_last_time[df_part_3_ui_b_last_time['ui_b1_last_hours'].notnull()]['ui_b1_last_hours'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
df_part_3_ui_b_last_time.loc[df_part_3_ui_b_last_time['ui_b2_last_time'].notnull(), 'ui_b2_last_hours'] = (
pd.to_datetime('2014-12-19') - df_part_3_ui_b_last_time['ui_b2_last_time'])
df_part_3_ui_b_last_time['ui_b2_last_hours'] = \
df_part_3_ui_b_last_time[df_part_3_ui_b_last_time['ui_b2_last_hours'].notnull()]['ui_b2_last_hours'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
df_part_3_ui_b_last_time.loc[df_part_3_ui_b_last_time['ui_b3_last_time'].notnull(), 'ui_b3_last_hours'] = (
pd.to_datetime('2014-12-19') - df_part_3_ui_b_last_time['ui_b3_last_time'])
df_part_3_ui_b_last_time['ui_b3_last_hours'] = \
df_part_3_ui_b_last_time[df_part_3_ui_b_last_time['ui_b3_last_hours'].notnull()]['ui_b3_last_hours'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
df_part_3_ui_b_last_time.loc[df_part_3_ui_b_last_time['ui_b4_last_time'].notnull(), 'ui_b4_last_hours'] = (
pd.to_datetime('2014-12-19') - df_part_3_ui_b_last_time['ui_b4_last_time'])
df_part_3_ui_b_last_time['ui_b4_last_hours'] = \
df_part_3_ui_b_last_time[df_part_3_ui_b_last_time['ui_b4_last_hours'].notnull()]['ui_b4_last_hours'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
df_part_3_ui_b_last_time = df_part_3_ui_b_last_time[['user_id',
'item_id',
'ui_b1_last_hours',
'ui_b2_last_hours',
'ui_b3_last_hours',
'ui_b4_last_hours']]
df_part_3_ui_b_last_time = df_part_3_ui_b_last_time.groupby(['user_id', 'item_id']).agg({'ui_b1_last_hours': np.sum,
'ui_b2_last_hours': np.sum,
'ui_b3_last_hours': np.sum,
'ui_b4_last_hours': np.sum})
df_part_3_ui_b_last_time.reset_index(inplace=True)
# merge for generation of f_UI_part_3
f_UI_part_3 = pd.merge(df_part_3_ui_b_count, df_part_3_ui_b_last_time, how='left', on=['user_id', 'item_id'])
# write to csv file
f_UI_part_3.to_csv(path_df_part_3_UI, index=False)
############################################
'''Step 1.6 feature data set UC of df_part_3
(1)
uc_b1_count_in_6
uc_b2_count_in_6
uc_b3_count_in_6
uc_b4_count_in_6
uc_b_count_in_6
uc_b1_count_in_3
uc_b2_count_in_3
uc_b3_count_in_3
uc_b4_count_in_3
uc_b_count_in_3
uc_b1_count_in_1
uc_b2_count_in_1
uc_b3_count_in_1
uc_b4_count_in_1
uc_b_count_in_1
(2)
uc_b_count_rank_in_u (in_6)
(3)
uc_b1_last_hours (in_6)
uc_b2_last_hours (in_6)
uc_b3_last_hours (in_6)
uc_b4_last_hours (in_6)
'''
path_df = open(path_df_part_3, 'r')
try:
df_part_3 = pd.read_csv(path_df, index_col=False, parse_dates=[0])
df_part_3.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
finally:
path_df.close()
# uc_b_count_in_6
df_part_3['cumcount'] = df_part_3.groupby(['user_id', 'item_category', 'behavior_type']).cumcount()
df_part_3_uc_b_count_in_6 = df_part_3.drop_duplicates(['user_id', 'item_category', 'behavior_type'], 'last')[
['user_id', 'item_category', 'behavior_type', 'cumcount']]
df_part_3_uc_b_count_in_6 = pd.get_dummies(df_part_3_uc_b_count_in_6['behavior_type']).join(
df_part_3_uc_b_count_in_6[['user_id', 'item_category', 'cumcount']])
df_part_3_uc_b_count_in_6.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_uc_b_count_in_6['uc_b1_count_in_6'] = df_part_3_uc_b_count_in_6['behavior_type_1'] * (
df_part_3_uc_b_count_in_6['cumcount'] + 1)
df_part_3_uc_b_count_in_6['uc_b2_count_in_6'] = df_part_3_uc_b_count_in_6['behavior_type_2'] * (
df_part_3_uc_b_count_in_6['cumcount'] + 1)
df_part_3_uc_b_count_in_6['uc_b3_count_in_6'] = df_part_3_uc_b_count_in_6['behavior_type_3'] * (
df_part_3_uc_b_count_in_6['cumcount'] + 1)
df_part_3_uc_b_count_in_6['uc_b4_count_in_6'] = df_part_3_uc_b_count_in_6['behavior_type_4'] * (
df_part_3_uc_b_count_in_6['cumcount'] + 1)
df_part_3_uc_b_count_in_6 = df_part_3_uc_b_count_in_6[['user_id',
'item_category',
'uc_b1_count_in_6',
'uc_b2_count_in_6',
'uc_b3_count_in_6',
'uc_b4_count_in_6']]
df_part_3_uc_b_count_in_6 = df_part_3_uc_b_count_in_6.groupby(['user_id', 'item_category']).agg(
{'uc_b1_count_in_6': np.sum,
'uc_b2_count_in_6': np.sum,
'uc_b3_count_in_6': np.sum,
'uc_b4_count_in_6': np.sum})
df_part_3_uc_b_count_in_6.reset_index(inplace=True)
df_part_3_uc_b_count_in_6['uc_b_count_in_6'] = df_part_3_uc_b_count_in_6['uc_b1_count_in_6'] + \
df_part_3_uc_b_count_in_6['uc_b2_count_in_6'] + \
df_part_3_uc_b_count_in_6['uc_b3_count_in_6'] + \
df_part_3_uc_b_count_in_6['uc_b4_count_in_6']
# uc_b_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')]
df_part_3_in_3['cumcount'] = df_part_3_in_3.groupby(['user_id', 'item_category', 'behavior_type']).cumcount()
df_part_3_uc_b_count_in_3 = df_part_3.drop_duplicates(['user_id', 'item_category', 'behavior_type'], 'last')[
['user_id', 'item_category', 'behavior_type', 'cumcount']]
df_part_3_uc_b_count_in_3 = pd.get_dummies(df_part_3_uc_b_count_in_3['behavior_type']).join(
df_part_3_uc_b_count_in_3[['user_id', 'item_category', 'cumcount']])
df_part_3_uc_b_count_in_3.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_uc_b_count_in_3['uc_b1_count_in_3'] = df_part_3_uc_b_count_in_3['behavior_type_1'] * (
df_part_3_uc_b_count_in_3['cumcount'] + 1)
df_part_3_uc_b_count_in_3['uc_b2_count_in_3'] = df_part_3_uc_b_count_in_3['behavior_type_2'] * (
df_part_3_uc_b_count_in_3['cumcount'] + 1)
df_part_3_uc_b_count_in_3['uc_b3_count_in_3'] = df_part_3_uc_b_count_in_3['behavior_type_3'] * (
df_part_3_uc_b_count_in_3['cumcount'] + 1)
df_part_3_uc_b_count_in_3['uc_b4_count_in_3'] = df_part_3_uc_b_count_in_3['behavior_type_4'] * (
df_part_3_uc_b_count_in_3['cumcount'] + 1)
df_part_3_uc_b_count_in_3 = df_part_3_uc_b_count_in_3[['user_id',
'item_category',
'uc_b1_count_in_3',
'uc_b2_count_in_3',
'uc_b3_count_in_3',
'uc_b4_count_in_3']]
df_part_3_uc_b_count_in_3 = df_part_3_uc_b_count_in_3.groupby(['user_id', 'item_category']).agg(
{'uc_b1_count_in_3': np.sum,
'uc_b2_count_in_3': np.sum,
'uc_b3_count_in_3': np.sum,
'uc_b4_count_in_3': np.sum})
df_part_3_uc_b_count_in_3.reset_index(inplace=True)
df_part_3_uc_b_count_in_3['uc_b_count_in_3'] = df_part_3_uc_b_count_in_3['uc_b1_count_in_3'] + \
df_part_3_uc_b_count_in_3['uc_b2_count_in_3'] + \
df_part_3_uc_b_count_in_3['uc_b3_count_in_3'] + \
df_part_3_uc_b_count_in_3['uc_b4_count_in_3']
# uc_b_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')]
df_part_3_in_1['cumcount'] = df_part_3_in_1.groupby(['user_id', 'item_category', 'behavior_type']).cumcount()
df_part_3_uc_b_count_in_1 = df_part_3_in_1.drop_duplicates(['user_id', 'item_category', 'behavior_type'], 'last')[
['user_id', 'item_category', 'behavior_type', 'cumcount']]
df_part_3_uc_b_count_in_1 = pd.get_dummies(df_part_3_uc_b_count_in_1['behavior_type']).join(
df_part_3_uc_b_count_in_1[['user_id', 'item_category', 'cumcount']])
df_part_3_uc_b_count_in_1.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_uc_b_count_in_1['uc_b1_count_in_1'] = df_part_3_uc_b_count_in_1['behavior_type_1'] * (
df_part_3_uc_b_count_in_1['cumcount'] + 1)
df_part_3_uc_b_count_in_1['uc_b2_count_in_1'] = df_part_3_uc_b_count_in_1['behavior_type_2'] * (
df_part_3_uc_b_count_in_1['cumcount'] + 1)
df_part_3_uc_b_count_in_1['uc_b3_count_in_1'] = df_part_3_uc_b_count_in_1['behavior_type_3'] * (
df_part_3_uc_b_count_in_1['cumcount'] + 1)
df_part_3_uc_b_count_in_1['uc_b4_count_in_1'] = df_part_3_uc_b_count_in_1['behavior_type_4'] * (
df_part_3_uc_b_count_in_1['cumcount'] + 1)
df_part_3_uc_b_count_in_1 = df_part_3_uc_b_count_in_1[['user_id',
'item_category',
'uc_b1_count_in_1',
'uc_b2_count_in_1',
'uc_b3_count_in_1',
'uc_b4_count_in_1']]
df_part_3_uc_b_count_in_1 = df_part_3_uc_b_count_in_1.groupby(['user_id', 'item_category']).agg(
{'uc_b1_count_in_1': np.sum,
'uc_b2_count_in_1': np.sum,
'uc_b3_count_in_1': np.sum,
'uc_b4_count_in_1': np.sum})
df_part_3_uc_b_count_in_1.reset_index(inplace=True)
df_part_3_uc_b_count_in_1['uc_b_count_in_1'] = df_part_3_uc_b_count_in_1['uc_b1_count_in_1'] + \
df_part_3_uc_b_count_in_1['uc_b2_count_in_1'] + \
df_part_3_uc_b_count_in_1['uc_b3_count_in_1'] + \
df_part_3_uc_b_count_in_1['uc_b4_count_in_1']
df_part_3_uc_b_count = pd.merge(df_part_3_uc_b_count_in_6, df_part_3_uc_b_count_in_3, on=['user_id', 'item_category'],
how='left').fillna(0)
df_part_3_uc_b_count = pd.merge(df_part_3_uc_b_count, df_part_3_uc_b_count_in_1, on=['user_id', 'item_category'],
how='left').fillna(0)
df_part_3_uc_b_count[['uc_b1_count_in_6',
'uc_b2_count_in_6',
'uc_b3_count_in_6',
'uc_b4_count_in_6',
'uc_b_count_in_6',
'uc_b1_count_in_3',
'uc_b2_count_in_3',
'uc_b3_count_in_3',
'uc_b4_count_in_3',
'uc_b_count_in_3',
'uc_b1_count_in_1',
'uc_b2_count_in_1',
'uc_b3_count_in_1',
'uc_b4_count_in_1',
'uc_b_count_in_1']] = df_part_3_uc_b_count[['uc_b1_count_in_6',
'uc_b2_count_in_6',
'uc_b3_count_in_6',
'uc_b4_count_in_6',
'uc_b_count_in_6',
'uc_b1_count_in_3',
'uc_b2_count_in_3',
'uc_b3_count_in_3',
'uc_b4_count_in_3',
'uc_b_count_in_3',
'uc_b1_count_in_1',
'uc_b2_count_in_1',
'uc_b3_count_in_1',
'uc_b4_count_in_1',
'uc_b_count_in_1']].astype(int)
# uc_b_count_rank_in_u
df_part_3_uc_b_count['uc_b_count_rank_in_u'] = df_part_3_uc_b_count.groupby(['user_id'])['uc_b_count_in_6'].rank(
method='min', ascending=False).astype('int')
# uc_b_last_time
df_part_3.sort_values(by=['user_id', 'item_category', 'behavior_type', 'time'], inplace=True)
df_part_3_uc_b_last_time = df_part_3.drop_duplicates(['user_id', 'item_category', 'behavior_type'], 'last')[
['user_id', 'item_category', 'behavior_type', 'time']]
df_part_3_uc_b_last_time['uc_b1_last_time'] = df_part_3_uc_b_last_time[df_part_3_uc_b_last_time['behavior_type'] == 1][
'time']
df_part_3_uc_b_last_time['uc_b2_last_time'] = df_part_3_uc_b_last_time[df_part_3_uc_b_last_time['behavior_type'] == 2][
'time']
df_part_3_uc_b_last_time['uc_b3_last_time'] = df_part_3_uc_b_last_time[df_part_3_uc_b_last_time['behavior_type'] == 3][
'time']
df_part_3_uc_b_last_time['uc_b4_last_time'] = df_part_3_uc_b_last_time[df_part_3_uc_b_last_time['behavior_type'] == 4][
'time']
df_part_3_uc_b_last_time.loc[df_part_3_uc_b_last_time['uc_b1_last_time'].notnull(), 'uc_b1_last_hours'] = (
pd.to_datetime('2014-12-19') - df_part_3_uc_b_last_time['uc_b1_last_time'])
df_part_3_uc_b_last_time['uc_b1_last_hours'] = \
df_part_3_uc_b_last_time[df_part_3_uc_b_last_time['uc_b1_last_hours'].notnull()]['uc_b1_last_hours'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
df_part_3_uc_b_last_time.loc[df_part_3_uc_b_last_time['uc_b2_last_time'].notnull(), 'uc_b2_last_hours'] = (
pd.to_datetime('2014-12-19') - df_part_3_uc_b_last_time['uc_b2_last_time'])
df_part_3_uc_b_last_time['uc_b2_last_hours'] = \
df_part_3_uc_b_last_time[df_part_3_uc_b_last_time['uc_b2_last_hours'].notnull()]['uc_b2_last_hours'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
df_part_3_uc_b_last_time.loc[df_part_3_uc_b_last_time['uc_b3_last_time'].notnull(), 'uc_b3_last_hours'] = (
pd.to_datetime('2014-12-19') - df_part_3_uc_b_last_time['uc_b3_last_time'])
df_part_3_uc_b_last_time['uc_b3_last_hours'] = \
df_part_3_uc_b_last_time[df_part_3_uc_b_last_time['uc_b3_last_hours'].notnull()]['uc_b3_last_hours'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
df_part_3_uc_b_last_time.loc[df_part_3_uc_b_last_time['uc_b4_last_time'].notnull(), 'uc_b4_last_hours'] = (
pd.to_datetime('2014-12-19') - df_part_3_uc_b_last_time['uc_b4_last_time'])
df_part_3_uc_b_last_time['uc_b4_last_hours'] = \
df_part_3_uc_b_last_time[df_part_3_uc_b_last_time['uc_b4_last_hours'].notnull()]['uc_b4_last_hours'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
df_part_3_uc_b_last_time = df_part_3_uc_b_last_time[['user_id',
'item_category',
'uc_b1_last_hours',
'uc_b2_last_hours',
'uc_b3_last_hours',
'uc_b4_last_hours']]
df_part_3_uc_b_last_time = df_part_3_uc_b_last_time.groupby(['user_id', 'item_category']).agg(
{'uc_b1_last_hours': np.sum,
'uc_b2_last_hours': np.sum,
'uc_b3_last_hours': np.sum,
'uc_b4_last_hours': np.sum})
df_part_3_uc_b_last_time.reset_index(inplace=True)
# merge for generation of f_UC_part_3
f_UC_part_3 = pd.merge(df_part_3_uc_b_count, df_part_3_uc_b_last_time, how='left', on=['user_id', 'item_category'])
# write to csv file
f_UC_part_3.to_csv(path_df_part_3_UC, index=False) | [
"pandas.read_csv",
"pandas.get_dummies",
"pandas.merge",
"numpy.datetime64",
"pandas.to_datetime"
] | [((11749, 11814), 'pandas.merge', 'pd.merge', (['df_part_3_u_b_time', 'df_part_3_u_b4_time'], {'on': "['user_id']"}), "(df_part_3_u_b_time, df_part_3_u_b4_time, on=['user_id'])\n", (11757, 11814), True, 'import pandas as pd\n'), ((26533, 26598), 'pandas.merge', 'pd.merge', (['df_part_3_i_b_time', 'df_part_3_i_b4_time'], {'on': "['item_id']"}), "(df_part_3_i_b_time, df_part_3_i_b4_time, on=['item_id'])\n", (26541, 26598), True, 'import pandas as pd\n'), ((26983, 27068), 'pandas.merge', 'pd.merge', (['df_part_3_i_b_count', 'df_part_3_i_b_b4_time'], {'on': "['item_id']", 'how': '"""left"""'}), "(df_part_3_i_b_count, df_part_3_i_b_b4_time, on=['item_id'], how='left'\n )\n", (26991, 27068), True, 'import pandas as pd\n'), ((41740, 41811), 'pandas.merge', 'pd.merge', (['df_part_3_c_b_time', 'df_part_3_c_b4_time'], {'on': "['item_category']"}), "(df_part_3_c_b_time, df_part_3_c_b4_time, on=['item_category'])\n", (41748, 41811), True, 'import pandas as pd\n'), ((42250, 42338), 'pandas.merge', 'pd.merge', (['df_part_3_c_u_count', 'df_part_3_c_b_count'], {'on': "['item_category']", 'how': '"""left"""'}), "(df_part_3_c_u_count, df_part_3_c_b_count, on=['item_category'],\n how='left')\n", (42258, 42338), True, 'import pandas as pd\n'), ((42349, 42426), 'pandas.merge', 'pd.merge', (['f_C_part_3', 'df_part_3_c_b_b4_time'], {'on': "['item_category']", 'how': '"""left"""'}), "(f_C_part_3, df_part_3_c_b_b4_time, on=['item_category'], how='left')\n", (42357, 42426), True, 'import pandas as pd\n'), ((56082, 56170), 'pandas.merge', 'pd.merge', (['df_part_3_uic', 'df_part_3_ui_b_count'], {'on': "['user_id', 'item_id']", 'how': '"""left"""'}), "(df_part_3_uic, df_part_3_ui_b_count, on=['user_id', 'item_id'],\n how='left')\n", (56090, 56170), True, 'import pandas as pd\n'), ((59799, 59899), 'pandas.merge', 'pd.merge', (['df_part_3_ui_b_count', 'df_part_3_ui_b_last_time'], {'how': '"""left"""', 'on': "['user_id', 'item_id']"}), "(df_part_3_ui_b_count, df_part_3_ui_b_last_time, how='left', on=[\n 'user_id', 'item_id'])\n", (59807, 59899), True, 'import pandas as pd\n'), ((74112, 74218), 'pandas.merge', 'pd.merge', (['df_part_3_uc_b_count', 'df_part_3_uc_b_last_time'], {'how': '"""left"""', 'on': "['user_id', 'item_category']"}), "(df_part_3_uc_b_count, df_part_3_uc_b_last_time, how='left', on=[\n 'user_id', 'item_category'])\n", (74120, 74218), True, 'import pandas as pd\n'), ((1940, 1994), 'pandas.read_csv', 'pd.read_csv', (['path_df'], {'index_col': '(False)', 'parse_dates': '[0]'}), '(path_df, index_col=False, parse_dates=[0])\n', (1951, 1994), True, 'import pandas as pd\n'), ((12198, 12283), 'pandas.merge', 'pd.merge', (['df_part_3_u_b_count', 'df_part_3_u_b_b4_time'], {'on': "['user_id']", 'how': '"""left"""'}), "(df_part_3_u_b_count, df_part_3_u_b_b4_time, on=['user_id'], how='left'\n )\n", (12206, 12283), True, 'import pandas as pd\n'), ((14286, 14340), 'pandas.read_csv', 'pd.read_csv', (['path_df'], {'index_col': '(False)', 'parse_dates': '[0]'}), '(path_df, index_col=False, parse_dates=[0])\n', (14297, 14340), True, 'import pandas as pd\n'), ((27124, 27193), 'pandas.merge', 'pd.merge', (['f_I_part_3', 'df_part_3_i_u_count'], {'on': "['item_id']", 'how': '"""left"""'}), "(f_I_part_3, df_part_3_i_u_count, on=['item_id'], how='left')\n", (27132, 27193), True, 'import pandas as pd\n'), ((29412, 29466), 'pandas.read_csv', 'pd.read_csv', (['path_df'], {'index_col': '(False)', 'parse_dates': '[0]'}), '(path_df, index_col=False, parse_dates=[0])\n', (29423, 29466), True, 'import pandas as pd\n'), ((42827, 42864), 'pandas.read_csv', 'pd.read_csv', (['path_df'], {'index_col': '(False)'}), '(path_df, index_col=False)\n', (42838, 42864), True, 'import pandas as pd\n'), ((43126, 43163), 'pandas.read_csv', 'pd.read_csv', (['path_df'], {'index_col': '(False)'}), '(path_df, index_col=False)\n', (43137, 43163), True, 'import pandas as pd\n'), ((45017, 45071), 'pandas.read_csv', 'pd.read_csv', (['path_df'], {'index_col': '(False)', 'parse_dates': '[0]'}), '(path_df, index_col=False, parse_dates=[0])\n', (45028, 45071), True, 'import pandas as pd\n'), ((55989, 56026), 'pandas.read_csv', 'pd.read_csv', (['path_df'], {'index_col': '(False)'}), '(path_df, index_col=False)\n', (56000, 56026), True, 'import pandas as pd\n'), ((57283, 57311), 'pandas.to_datetime', 'pd.to_datetime', (['"""2014-12-19"""'], {}), "('2014-12-19')\n", (57297, 57311), True, 'import pandas as pd\n'), ((57690, 57718), 'pandas.to_datetime', 'pd.to_datetime', (['"""2014-12-19"""'], {}), "('2014-12-19')\n", (57704, 57718), True, 'import pandas as pd\n'), ((58097, 58125), 'pandas.to_datetime', 'pd.to_datetime', (['"""2014-12-19"""'], {}), "('2014-12-19')\n", (58111, 58125), True, 'import pandas as pd\n'), ((58504, 58532), 'pandas.to_datetime', 'pd.to_datetime', (['"""2014-12-19"""'], {}), "('2014-12-19')\n", (58518, 58532), True, 'import pandas as pd\n'), ((60642, 60696), 'pandas.read_csv', 'pd.read_csv', (['path_df'], {'index_col': '(False)', 'parse_dates': '[0]'}), '(path_df, index_col=False, parse_dates=[0])\n', (60653, 60696), True, 'import pandas as pd\n'), ((71830, 71858), 'pandas.to_datetime', 'pd.to_datetime', (['"""2014-12-19"""'], {}), "('2014-12-19')\n", (71844, 71858), True, 'import pandas as pd\n'), ((72237, 72265), 'pandas.to_datetime', 'pd.to_datetime', (['"""2014-12-19"""'], {}), "('2014-12-19')\n", (72251, 72265), True, 'import pandas as pd\n'), ((72644, 72672), 'pandas.to_datetime', 'pd.to_datetime', (['"""2014-12-19"""'], {}), "('2014-12-19')\n", (72658, 72672), True, 'import pandas as pd\n'), ((73051, 73079), 'pandas.to_datetime', 'pd.to_datetime', (['"""2014-12-19"""'], {}), "('2014-12-19')\n", (73065, 73079), True, 'import pandas as pd\n'), ((2388, 2445), 'pandas.get_dummies', 'pd.get_dummies', (["df_part_3_u_b_count_in_6['behavior_type']"], {}), "(df_part_3_u_b_count_in_6['behavior_type'])\n", (2402, 2445), True, 'import pandas as pd\n'), ((4413, 4440), 'numpy.datetime64', 'np.datetime64', (['"""2014-12-16"""'], {}), "('2014-12-16')\n", (4426, 4440), True, 'import numpy as np\n'), ((4704, 4761), 'pandas.get_dummies', 'pd.get_dummies', (["df_part_3_u_b_count_in_3['behavior_type']"], {}), "(df_part_3_u_b_count_in_3['behavior_type'])\n", (4718, 4761), True, 'import pandas as pd\n'), ((6729, 6756), 'numpy.datetime64', 'np.datetime64', (['"""2014-12-18"""'], {}), "('2014-12-18')\n", (6742, 6756), True, 'import numpy as np\n'), ((7025, 7082), 'pandas.get_dummies', 'pd.get_dummies', (["df_part_3_u_b_count_in_1['behavior_type']"], {}), "(df_part_3_u_b_count_in_1['behavior_type'])\n", (7039, 7082), True, 'import pandas as pd\n'), ((9066, 9158), 'pandas.merge', 'pd.merge', (['df_part_3_u_b_count_in_6', 'df_part_3_u_b_count_in_3'], {'on': "['user_id']", 'how': '"""left"""'}), "(df_part_3_u_b_count_in_6, df_part_3_u_b_count_in_3, on=['user_id'],\n how='left')\n", (9074, 9158), True, 'import pandas as pd\n'), ((9220, 9308), 'pandas.merge', 'pd.merge', (['df_part_3_u_b_count', 'df_part_3_u_b_count_in_1'], {'on': "['user_id']", 'how': '"""left"""'}), "(df_part_3_u_b_count, df_part_3_u_b_count_in_1, on=['user_id'], how\n ='left')\n", (9228, 9308), True, 'import pandas as pd\n'), ((15476, 15568), 'pandas.merge', 'pd.merge', (['df_part_3_i_u_count_in_6', 'df_part_3_i_u_count_in_3'], {'on': "['item_id']", 'how': '"""left"""'}), "(df_part_3_i_u_count_in_6, df_part_3_i_u_count_in_3, on=['item_id'],\n how='left')\n", (15484, 15568), True, 'import pandas as pd\n'), ((15662, 15750), 'pandas.merge', 'pd.merge', (['df_part_3_i_u_count', 'df_part_3_i_u_count_in_1'], {'on': "['item_id']", 'how': '"""left"""'}), "(df_part_3_i_u_count, df_part_3_i_u_count_in_1, on=['item_id'], how\n ='left')\n", (15670, 15750), True, 'import pandas as pd\n'), ((16431, 16488), 'pandas.get_dummies', 'pd.get_dummies', (["df_part_3_i_b_count_in_6['behavior_type']"], {}), "(df_part_3_i_b_count_in_6['behavior_type'])\n", (16445, 16488), True, 'import pandas as pd\n'), ((18690, 18717), 'numpy.datetime64', 'np.datetime64', (['"""2014-12-16"""'], {}), "('2014-12-16')\n", (18703, 18717), True, 'import numpy as np\n'), ((18981, 19038), 'pandas.get_dummies', 'pd.get_dummies', (["df_part_3_i_b_count_in_3['behavior_type']"], {}), "(df_part_3_i_b_count_in_3['behavior_type'])\n", (18995, 19038), True, 'import pandas as pd\n'), ((21240, 21267), 'numpy.datetime64', 'np.datetime64', (['"""2014-12-18"""'], {}), "('2014-12-18')\n", (21253, 21267), True, 'import numpy as np\n'), ((21536, 21593), 'pandas.get_dummies', 'pd.get_dummies', (["df_part_3_i_b_count_in_1['behavior_type']"], {}), "(df_part_3_i_b_count_in_1['behavior_type'])\n", (21550, 21593), True, 'import pandas as pd\n'), ((23788, 23880), 'pandas.merge', 'pd.merge', (['df_part_3_i_b_count_in_6', 'df_part_3_i_b_count_in_3'], {'on': "['item_id']", 'how': '"""left"""'}), "(df_part_3_i_b_count_in_6, df_part_3_i_b_count_in_3, on=['item_id'],\n how='left')\n", (23796, 23880), True, 'import pandas as pd\n'), ((23974, 24062), 'pandas.merge', 'pd.merge', (['df_part_3_i_b_count', 'df_part_3_i_b_count_in_1'], {'on': "['item_id']", 'how': '"""left"""'}), "(df_part_3_i_b_count, df_part_3_i_b_count_in_1, on=['item_id'], how\n ='left')\n", (23982, 24062), True, 'import pandas as pd\n'), ((30667, 30766), 'pandas.merge', 'pd.merge', (['df_part_3_c_u_count_in_6', 'df_part_3_c_u_count_in_3'], {'on': "['item_category']", 'how': '"""left"""'}), "(df_part_3_c_u_count_in_6, df_part_3_c_u_count_in_3, on=[\n 'item_category'], how='left')\n", (30675, 30766), True, 'import pandas as pd\n'), ((30827, 30921), 'pandas.merge', 'pd.merge', (['df_part_3_c_u_count', 'df_part_3_c_u_count_in_1'], {'on': "['item_category']", 'how': '"""left"""'}), "(df_part_3_c_u_count, df_part_3_c_u_count_in_1, on=['item_category'\n ], how='left')\n", (30835, 30921), True, 'import pandas as pd\n'), ((31562, 31619), 'pandas.get_dummies', 'pd.get_dummies', (["df_part_3_c_b_count_in_6['behavior_type']"], {}), "(df_part_3_c_b_count_in_6['behavior_type'])\n", (31576, 31619), True, 'import pandas as pd\n'), ((33857, 33884), 'numpy.datetime64', 'np.datetime64', (['"""2014-12-16"""'], {}), "('2014-12-16')\n", (33870, 33884), True, 'import numpy as np\n'), ((34171, 34228), 'pandas.get_dummies', 'pd.get_dummies', (["df_part_3_c_b_count_in_3['behavior_type']"], {}), "(df_part_3_c_b_count_in_3['behavior_type'])\n", (34185, 34228), True, 'import pandas as pd\n'), ((36466, 36493), 'numpy.datetime64', 'np.datetime64', (['"""2014-12-18"""'], {}), "('2014-12-18')\n", (36479, 36493), True, 'import numpy as np\n'), ((36780, 36837), 'pandas.get_dummies', 'pd.get_dummies', (["df_part_3_c_b_count_in_1['behavior_type']"], {}), "(df_part_3_c_b_count_in_1['behavior_type'])\n", (36794, 36837), True, 'import pandas as pd\n'), ((39031, 39130), 'pandas.merge', 'pd.merge', (['df_part_3_c_b_count_in_6', 'df_part_3_c_b_count_in_3'], {'on': "['item_category']", 'how': '"""left"""'}), "(df_part_3_c_b_count_in_6, df_part_3_c_b_count_in_3, on=[\n 'item_category'], how='left')\n", (39039, 39130), True, 'import pandas as pd\n'), ((39191, 39285), 'pandas.merge', 'pd.merge', (['df_part_3_c_b_count', 'df_part_3_c_b_count_in_1'], {'on': "['item_category']", 'how': '"""left"""'}), "(df_part_3_c_b_count, df_part_3_c_b_count_in_1, on=['item_category'\n ], how='left')\n", (39199, 39285), True, 'import pandas as pd\n'), ((43221, 43294), 'pandas.merge', 'pd.merge', (['df_part_3_uic', 'df_part_3_i_ub_count'], {'on': "['item_id']", 'how': '"""left"""'}), "(df_part_3_uic, df_part_3_i_ub_count, on=['item_id'], how='left')\n", (43229, 43294), True, 'import pandas as pd\n'), ((45501, 45559), 'pandas.get_dummies', 'pd.get_dummies', (["df_part_3_ui_b_count_in_6['behavior_type']"], {}), "(df_part_3_ui_b_count_in_6['behavior_type'])\n", (45515, 45559), True, 'import pandas as pd\n'), ((47956, 47983), 'numpy.datetime64', 'np.datetime64', (['"""2014-12-16"""'], {}), "('2014-12-16')\n", (47969, 47983), True, 'import numpy as np\n'), ((48282, 48340), 'pandas.get_dummies', 'pd.get_dummies', (["df_part_3_ui_b_count_in_3['behavior_type']"], {}), "(df_part_3_ui_b_count_in_3['behavior_type'])\n", (48296, 48340), True, 'import pandas as pd\n'), ((50737, 50764), 'numpy.datetime64', 'np.datetime64', (['"""2014-12-18"""'], {}), "('2014-12-18')\n", (50750, 50764), True, 'import numpy as np\n'), ((51068, 51126), 'pandas.get_dummies', 'pd.get_dummies', (["df_part_3_ui_b_count_in_1['behavior_type']"], {}), "(df_part_3_ui_b_count_in_1['behavior_type'])\n", (51082, 51126), True, 'import pandas as pd\n'), ((53479, 53585), 'pandas.merge', 'pd.merge', (['df_part_3_ui_b_count_in_6', 'df_part_3_ui_b_count_in_3'], {'on': "['user_id', 'item_id']", 'how': '"""left"""'}), "(df_part_3_ui_b_count_in_6, df_part_3_ui_b_count_in_3, on=[\n 'user_id', 'item_id'], how='left')\n", (53487, 53585), True, 'import pandas as pd\n'), ((53648, 53748), 'pandas.merge', 'pd.merge', (['df_part_3_ui_b_count', 'df_part_3_ui_b_count_in_1'], {'on': "['user_id', 'item_id']", 'how': '"""left"""'}), "(df_part_3_ui_b_count, df_part_3_ui_b_count_in_1, on=['user_id',\n 'item_id'], how='left')\n", (53656, 53748), True, 'import pandas as pd\n'), ((61144, 61202), 'pandas.get_dummies', 'pd.get_dummies', (["df_part_3_uc_b_count_in_6['behavior_type']"], {}), "(df_part_3_uc_b_count_in_6['behavior_type'])\n", (61158, 61202), True, 'import pandas as pd\n'), ((63365, 63392), 'numpy.datetime64', 'np.datetime64', (['"""2014-12-16"""'], {}), "('2014-12-16')\n", (63378, 63392), True, 'import numpy as np\n'), ((63709, 63767), 'pandas.get_dummies', 'pd.get_dummies', (["df_part_3_uc_b_count_in_3['behavior_type']"], {}), "(df_part_3_uc_b_count_in_3['behavior_type'])\n", (63723, 63767), True, 'import pandas as pd\n'), ((65930, 65957), 'numpy.datetime64', 'np.datetime64', (['"""2014-12-18"""'], {}), "('2014-12-18')\n", (65943, 65957), True, 'import numpy as np\n'), ((66279, 66337), 'pandas.get_dummies', 'pd.get_dummies', (["df_part_3_uc_b_count_in_1['behavior_type']"], {}), "(df_part_3_uc_b_count_in_1['behavior_type'])\n", (66293, 66337), True, 'import pandas as pd\n'), ((68456, 68568), 'pandas.merge', 'pd.merge', (['df_part_3_uc_b_count_in_6', 'df_part_3_uc_b_count_in_3'], {'on': "['user_id', 'item_category']", 'how': '"""left"""'}), "(df_part_3_uc_b_count_in_6, df_part_3_uc_b_count_in_3, on=[\n 'user_id', 'item_category'], how='left')\n", (68464, 68568), True, 'import pandas as pd\n'), ((68631, 68737), 'pandas.merge', 'pd.merge', (['df_part_3_uc_b_count', 'df_part_3_uc_b_count_in_1'], {'on': "['user_id', 'item_category']", 'how': '"""left"""'}), "(df_part_3_uc_b_count, df_part_3_uc_b_count_in_1, on=['user_id',\n 'item_category'], how='left')\n", (68639, 68737), True, 'import pandas as pd\n'), ((14815, 14842), 'numpy.datetime64', 'np.datetime64', (['"""2014-12-16"""'], {}), "('2014-12-16')\n", (14828, 14842), True, 'import numpy as np\n'), ((15149, 15176), 'numpy.datetime64', 'np.datetime64', (['"""2014-12-18"""'], {}), "('2014-12-18')\n", (15162, 15176), True, 'import numpy as np\n'), ((29971, 29998), 'numpy.datetime64', 'np.datetime64', (['"""2014-12-16"""'], {}), "('2014-12-16')\n", (29984, 29998), True, 'import numpy as np\n'), ((30341, 30368), 'numpy.datetime64', 'np.datetime64', (['"""2014-12-18"""'], {}), "('2014-12-18')\n", (30354, 30368), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import time
import h5py
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import sklearn.preprocessing as data_preprocessing
import data_utils
#-----------------data process for human3.6m dataset--------------------
class DataLoader(object):
"""
The Data Loader for human action recognition.
:param seq_length_in: length of input sequence
:param seq_length_out: length of output sequence
"""
def __init__(self,
seq_length_in,
seq_length_out,
data_dir):
self.actions=["walking", "eating", "smoking", "discussion", "directions",
"greeting", "phoning", "posing", "purchases", "sitting",
"sittingdown", "takingphoto", "waiting", "walkingdog",
"walkingtogether"]
self.label_cvt = data_preprocessing.LabelEncoder()
self.label_cvt.fit(self.actions)
self.seq_length_in=seq_length_in
self.seq_length_out=seq_length_out
self.read_all_data(self.actions, data_dir, False)
def define_actions(self,action):
"""
Define the list of actions we are using.
Args
action: String with the passed action. Could be "all"
Returns
actions: List of strings of actions
Raises
ValueError if the action is not included in H3.6M
"""
if action in self.actions:
return [action]
if action == "all":
return self.actions
if action == "all_srnn":
return ["walking", "eating", "smoking", "discussion"]
raise (ValueError, "Unrecognized action: %d" % action)
def read_all_data( self, actions, data_dir, one_hot=False):
"""
Loads data for training/testing and normalizes it.
Args
actions: list of strings (actions) to load
seq_length_in: number of frames to use in the burn-in sequence
seq_length_out: number of frames to use in the output sequence
data_dir: directory to load the data from
one_hot: whether to use one-hot encoding per action
Returns
train_set: dictionary with normalized training data
test_set: dictionary with test data
data_mean: d-long vector with the mean of the training data
data_std: d-long vector with the standard dev of the training data
dim_to_ignore: dimensions that are not used becaused stdev is too small
dim_to_use: dimensions that we are actually using in the model
"""
train_subject_ids = [1,6,7,8,9,11]
test_subject_ids = [5]
train_set, complete_train = data_utils.load_data( data_dir, train_subject_ids, actions, one_hot )
test_set, complete_test = data_utils.load_data( data_dir, test_subject_ids, actions, one_hot )
# Compute normalization stats
data_mean, data_std, dim_to_ignore, dim_to_use = data_utils.normalization_stats(complete_train)
# Normalize -- subtract mean, divide by stdev
train_set = data_utils.normalize_data( train_set, data_mean, data_std, dim_to_use, actions, one_hot )
test_set = data_utils.normalize_data( test_set, data_mean, data_std, dim_to_use, actions, one_hot )
print("done reading data.")
self.train_set = train_set
self.test_set = test_set
self.data_mean = data_mean
self.data_std = data_std
self.dim_to_ignore = dim_to_ignore
self.dim_to_use = dim_to_use
self.train_keys = list(self.train_set.keys())
def get_train_batch(self, batch_size):
"""
Get a batch from train set
"""
chosen_keys = np.random.choice( len(self.train_keys), batch_size )
# How many frames in total do we need?
total_frames = self.seq_length_in + self.seq_length_out
encoder_inputs = []
decoder_outputs = []
yhat = np.zeros([batch_size, len(self.actions)])
for i in xrange( batch_size ):
the_key = self.train_keys[ chosen_keys[i] ]
n, pts = self.train_set[ the_key ].shape
idx = np.random.randint( 16, n-total_frames )
# Select the data around the sampled points
data_sel = self.train_set[ the_key ][idx:idx+total_frames ,:]
currentAction = self.label_cvt.transform([the_key[1]])
yhat[i, currentAction[0]]=1
# Add the data
encoder_inputs += [np.expand_dims(data_sel[0:self.seq_length_in-1, :], 0)]
decoder_outputs += [np.expand_dims(data_sel, 0)]
return np.expand_dims(np.concatenate(encoder_inputs, axis=0), 3), np.expand_dims(np.concatenate(decoder_outputs, axis=0), 3), yhat
def find_indices_srnn( self, action ):
"""
Find the same action indices as in SRNN.
See https://github.com/asheshjain399/RNNexp/blob/master/structural_rnn/CRFProblems/H3.6m/processdata.py#L325
"""
# Used a fixed dummy seed, following
# https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/forecastTrajectories.py#L29
SEED = 1234567890
rng = np.random.RandomState( SEED )
subject = 5
subaction1 = 1
subaction2 = 2
T1 = self.test_set[ (subject, action, subaction1, 'even') ].shape[0]
T2 = self.test_set[ (subject, action, subaction2, 'even') ].shape[0]
prefix, suffix = 50, 100
idx = []
idx.append( rng.randint( 16,T1-prefix-suffix ))
idx.append( rng.randint( 16,T2-prefix-suffix ))
idx.append( rng.randint( 16,T1-prefix-suffix ))
idx.append( rng.randint( 16,T2-prefix-suffix ))
idx.append( rng.randint( 16,T1-prefix-suffix ))
idx.append( rng.randint( 16,T2-prefix-suffix ))
idx.append( rng.randint( 16,T1-prefix-suffix ))
idx.append( rng.randint( 16,T2-prefix-suffix ))
return idx
def get_test_batch(self, action):
actions = ["directions", "discussion", "eating", "greeting", "phoning",
"posing", "purchases", "sitting", "sittingdown", "smoking",
"takingphoto", "waiting", "walking", "walkingdog", "walkingtogether"]
if not action in actions:
raise ValueError("Unrecognized action {0}".format(action))
frames = {}
frames[ action ] = self.find_indices_srnn( action )
batch_size = 8 # we always evaluate 8 seeds
subject = 5 # we always evaluate on subject 5
source_seq_len = self.seq_length_in
target_seq_len = self.seq_length_out
total_frames = source_seq_len + target_seq_len
seeds = [( action, (i%2)+1, frames[action][i] ) for i in range(batch_size)]
encoder_inputs = []
decoder_outputs = []
yhat = np.zeros([batch_size, len(self.actions)])
for i in xrange( batch_size ):
_, subsequence, idx = seeds[i]
idx = idx + 50
data_sel = self.test_set[ (subject, action, subsequence, 'even') ]
data_sel = data_sel[(idx-source_seq_len):(idx+target_seq_len) ,:]
encoder_inputs += [np.expand_dims(data_sel[0:source_seq_len-1, :], axis=0)]
decoder_outputs += [np.expand_dims(data_sel, axis=0)]
currentAction = self.label_cvt.transform([action])
yhat[i,currentAction[0]] = 1
return np.expand_dims(np.concatenate(encoder_inputs, axis=0), 3), np.expand_dims(np.concatenate(decoder_outputs, axis=0),3), yhat
def get_srnn_gts(self, one_hot, to_euler=True):
"""
Get the ground truths for srnn's sequences, and convert to Euler angles.
(the error is always computed in Euler angles).
Args
actions: a list of actions to get ground truths for.
model: training model we are using (we only use the "get_batch" method).
test_set: dictionary with normalized training data.
data_mean: d-long vector with the mean of the training data.
data_std: d-long vector with the standard deviation of the training data.
dim_to_ignore: dimensions that we are not using to train/predict.
one_hot: whether the data comes with one-hot encoding indicating action.
to_euler: whether to convert the angles to Euler format or keep thm in exponential map
Returns
srnn_gts_euler: a dictionary where the keys are actions, and the values
are the ground_truth, denormalized expected outputs of srnns's seeds.
"""
srnn_gts_euler = {}
for action in self.actions:
srnn_gt_euler = []
_,srnn_expmap, _ = self.get_test_batch(action)
srnn_expmap = np.squeeze(srnn_expmap)
# expmap -> rotmat -> euler
for i in np.arange(srnn_expmap.shape[0]):
denormed = data_utils.unNormalizeData(srnn_expmap[i, :, :], self.data_mean, self.data_std, self.dim_to_ignore, self.actions,
one_hot)
if to_euler:
for j in np.arange(denormed.shape[0]):
for k in np.arange(0, 97, 3):
denormed[j, k:k + 3] = data_utils.rotmat2euler(
data_utils.expmap2rotmat(denormed[j, k:k + 3]))
srnn_gt_euler.append(denormed);
# Put back in the dictionary
srnn_gts_euler[action] = srnn_gt_euler
return srnn_gts_euler
def compute_test_error(self,action, pred_pose,srnn_gts_expmap,srnn_gts_euler,one_hot,samples_fname):
"""
Compute the test error
"""
pred_pose = np.squeeze(pred_pose)
predict_expmap = data_utils.revert_output_format(pred_pose, self.data_mean, self.data_std,
self.dim_to_ignore,
self.actions, one_hot)
mean_errors = np.zeros((len(predict_expmap), predict_expmap[0].shape[0]))
for i in np.arange(8):
eulerchannels_pred = np.copy(predict_expmap[i])
for j in np.arange(eulerchannels_pred.shape[0]):
for k in np.arange(0, 97, 3):
idx = [k, k+1, k+2]
eulerchannels_pred[j, idx] = data_utils.rotmat2euler(
data_utils.expmap2rotmat(predict_expmap[i][j, idx]))
eulerchannels_pred[:, 0:6] = 0
srnn_gts_euler[action][i][:, 0:6] = 0 ## Fixed by ZHEN, we also need
# Pick only the dimensions with sufficient standard deviation. Others are ignored.
# the below code is wrong!!!
# After a look at the original code, I found that the code below must be fixed!!! (ZHEN)
idx_to_use = np.where(np.std(srnn_gts_euler[action][i], 0) > 1e-4)[0]
#print(idx_to_use)
euc_error = np.power(srnn_gts_euler[action][i][50:, idx_to_use] - eulerchannels_pred[:, idx_to_use], 2)
euc_error = np.sum(euc_error, 1)
euc_error = np.sqrt(euc_error)
mean_errors[i, :] = euc_error
mean_mean_errors = np.mean(mean_errors, 0)
print()
print(action)
print()
print("{0: <16} |".format("milliseconds"), end="")
for ms in [80, 160, 320, 400, 560, 1000]:
print(" {0:5d} |".format(ms), end="")
print()
print("{0: <16} |".format(action), end="")
for ms in [1,3,7,9,13,24]:
if self.seq_length_out >= ms+1:
print(" {0:.3f} |".format( mean_mean_errors[ms] ), end="")
else:
print(" n/a |", end="")
print()
with h5py.File(samples_fname, 'a') as hf:
for i in np.arange(8):
node_name = 'expmap/gt/{1}_{0}'.format(i, action)
hf.create_dataset(node_name, data=srnn_gts_expmap[action][i])
node_name = 'expmap/preds/{1}_{0}'.format(i, action)
hf.create_dataset(node_name, data=predict_expmap[i])
node_name = 'mean_{0}_error'.format(action)
hf.create_dataset(node_name, data=mean_mean_errors)
| [
"numpy.sum",
"data_utils.unNormalizeData",
"numpy.mean",
"numpy.arange",
"numpy.random.randint",
"data_utils.normalization_stats",
"numpy.copy",
"numpy.std",
"numpy.power",
"sklearn.preprocessing.LabelEncoder",
"numpy.random.RandomState",
"data_utils.expmap2rotmat",
"h5py.File",
"data_util... | [((1057, 1090), 'sklearn.preprocessing.LabelEncoder', 'data_preprocessing.LabelEncoder', ([], {}), '()\n', (1088, 1090), True, 'import sklearn.preprocessing as data_preprocessing\n'), ((2916, 2983), 'data_utils.load_data', 'data_utils.load_data', (['data_dir', 'train_subject_ids', 'actions', 'one_hot'], {}), '(data_dir, train_subject_ids, actions, one_hot)\n', (2936, 2983), False, 'import data_utils\n'), ((3022, 3088), 'data_utils.load_data', 'data_utils.load_data', (['data_dir', 'test_subject_ids', 'actions', 'one_hot'], {}), '(data_dir, test_subject_ids, actions, one_hot)\n', (3042, 3088), False, 'import data_utils\n'), ((3189, 3235), 'data_utils.normalization_stats', 'data_utils.normalization_stats', (['complete_train'], {}), '(complete_train)\n', (3219, 3235), False, 'import data_utils\n'), ((3311, 3402), 'data_utils.normalize_data', 'data_utils.normalize_data', (['train_set', 'data_mean', 'data_std', 'dim_to_use', 'actions', 'one_hot'], {}), '(train_set, data_mean, data_std, dim_to_use,\n actions, one_hot)\n', (3336, 3402), False, 'import data_utils\n'), ((3421, 3511), 'data_utils.normalize_data', 'data_utils.normalize_data', (['test_set', 'data_mean', 'data_std', 'dim_to_use', 'actions', 'one_hot'], {}), '(test_set, data_mean, data_std, dim_to_use,\n actions, one_hot)\n', (3446, 3511), False, 'import data_utils\n'), ((4272, 4290), 'six.moves.xrange', 'xrange', (['batch_size'], {}), '(batch_size)\n', (4278, 4290), False, 'from six.moves import xrange\n'), ((5445, 5472), 'numpy.random.RandomState', 'np.random.RandomState', (['SEED'], {}), '(SEED)\n', (5466, 5472), True, 'import numpy as np\n'), ((7216, 7234), 'six.moves.xrange', 'xrange', (['batch_size'], {}), '(batch_size)\n', (7222, 7234), False, 'from six.moves import xrange\n'), ((10070, 10091), 'numpy.squeeze', 'np.squeeze', (['pred_pose'], {}), '(pred_pose)\n', (10080, 10091), True, 'import numpy as np\n'), ((10117, 10237), 'data_utils.revert_output_format', 'data_utils.revert_output_format', (['pred_pose', 'self.data_mean', 'self.data_std', 'self.dim_to_ignore', 'self.actions', 'one_hot'], {}), '(pred_pose, self.data_mean, self.data_std,\n self.dim_to_ignore, self.actions, one_hot)\n', (10148, 10237), False, 'import data_utils\n'), ((10465, 10477), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (10474, 10477), True, 'import numpy as np\n'), ((11600, 11623), 'numpy.mean', 'np.mean', (['mean_errors', '(0)'], {}), '(mean_errors, 0)\n', (11607, 11623), True, 'import numpy as np\n'), ((4421, 4460), 'numpy.random.randint', 'np.random.randint', (['(16)', '(n - total_frames)'], {}), '(16, n - total_frames)\n', (4438, 4460), True, 'import numpy as np\n'), ((9092, 9115), 'numpy.squeeze', 'np.squeeze', (['srnn_expmap'], {}), '(srnn_expmap)\n', (9102, 9115), True, 'import numpy as np\n'), ((9177, 9208), 'numpy.arange', 'np.arange', (['srnn_expmap.shape[0]'], {}), '(srnn_expmap.shape[0])\n', (9186, 9208), True, 'import numpy as np\n'), ((10525, 10551), 'numpy.copy', 'np.copy', (['predict_expmap[i]'], {}), '(predict_expmap[i])\n', (10532, 10551), True, 'import numpy as np\n'), ((10573, 10611), 'numpy.arange', 'np.arange', (['eulerchannels_pred.shape[0]'], {}), '(eulerchannels_pred.shape[0])\n', (10582, 10611), True, 'import numpy as np\n'), ((11350, 11445), 'numpy.power', 'np.power', (['(srnn_gts_euler[action][i][50:, idx_to_use] - eulerchannels_pred[:, idx_to_use]\n )', '(2)'], {}), '(srnn_gts_euler[action][i][50:, idx_to_use] - eulerchannels_pred[:,\n idx_to_use], 2)\n', (11358, 11445), True, 'import numpy as np\n'), ((11466, 11486), 'numpy.sum', 'np.sum', (['euc_error', '(1)'], {}), '(euc_error, 1)\n', (11472, 11486), True, 'import numpy as np\n'), ((11511, 11529), 'numpy.sqrt', 'np.sqrt', (['euc_error'], {}), '(euc_error)\n', (11518, 11529), True, 'import numpy as np\n'), ((12144, 12173), 'h5py.File', 'h5py.File', (['samples_fname', '"""a"""'], {}), "(samples_fname, 'a')\n", (12153, 12173), False, 'import h5py\n'), ((12202, 12214), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (12211, 12214), True, 'import numpy as np\n'), ((4757, 4813), 'numpy.expand_dims', 'np.expand_dims', (['data_sel[0:self.seq_length_in - 1, :]', '(0)'], {}), '(data_sel[0:self.seq_length_in - 1, :], 0)\n', (4771, 4813), True, 'import numpy as np\n'), ((4845, 4872), 'numpy.expand_dims', 'np.expand_dims', (['data_sel', '(0)'], {}), '(data_sel, 0)\n', (4859, 4872), True, 'import numpy as np\n'), ((4905, 4943), 'numpy.concatenate', 'np.concatenate', (['encoder_inputs'], {'axis': '(0)'}), '(encoder_inputs, axis=0)\n', (4919, 4943), True, 'import numpy as np\n'), ((4964, 5003), 'numpy.concatenate', 'np.concatenate', (['decoder_outputs'], {'axis': '(0)'}), '(decoder_outputs, axis=0)\n', (4978, 5003), True, 'import numpy as np\n'), ((7514, 7571), 'numpy.expand_dims', 'np.expand_dims', (['data_sel[0:source_seq_len - 1, :]'], {'axis': '(0)'}), '(data_sel[0:source_seq_len - 1, :], axis=0)\n', (7528, 7571), True, 'import numpy as np\n'), ((7603, 7635), 'numpy.expand_dims', 'np.expand_dims', (['data_sel'], {'axis': '(0)'}), '(data_sel, axis=0)\n', (7617, 7635), True, 'import numpy as np\n'), ((7773, 7811), 'numpy.concatenate', 'np.concatenate', (['encoder_inputs'], {'axis': '(0)'}), '(encoder_inputs, axis=0)\n', (7787, 7811), True, 'import numpy as np\n'), ((7832, 7871), 'numpy.concatenate', 'np.concatenate', (['decoder_outputs'], {'axis': '(0)'}), '(decoder_outputs, axis=0)\n', (7846, 7871), True, 'import numpy as np\n'), ((9237, 9364), 'data_utils.unNormalizeData', 'data_utils.unNormalizeData', (['srnn_expmap[i, :, :]', 'self.data_mean', 'self.data_std', 'self.dim_to_ignore', 'self.actions', 'one_hot'], {}), '(srnn_expmap[i, :, :], self.data_mean, self.\n data_std, self.dim_to_ignore, self.actions, one_hot)\n', (9263, 9364), False, 'import data_utils\n'), ((10638, 10657), 'numpy.arange', 'np.arange', (['(0)', '(97)', '(3)'], {}), '(0, 97, 3)\n', (10647, 10657), True, 'import numpy as np\n'), ((9473, 9501), 'numpy.arange', 'np.arange', (['denormed.shape[0]'], {}), '(denormed.shape[0])\n', (9482, 9501), True, 'import numpy as np\n'), ((9536, 9555), 'numpy.arange', 'np.arange', (['(0)', '(97)', '(3)'], {}), '(0, 97, 3)\n', (9545, 9555), True, 'import numpy as np\n'), ((10797, 10848), 'data_utils.expmap2rotmat', 'data_utils.expmap2rotmat', (['predict_expmap[i][j, idx]'], {}), '(predict_expmap[i][j, idx])\n', (10821, 10848), False, 'import data_utils\n'), ((11246, 11282), 'numpy.std', 'np.std', (['srnn_gts_euler[action][i]', '(0)'], {}), '(srnn_gts_euler[action][i], 0)\n', (11252, 11282), True, 'import numpy as np\n'), ((9665, 9711), 'data_utils.expmap2rotmat', 'data_utils.expmap2rotmat', (['denormed[j, k:k + 3]'], {}), '(denormed[j, k:k + 3])\n', (9689, 9711), False, 'import data_utils\n')] |
import librosa
import numpy
def extract_max(pitches, magnitudes, shape):
new_pitches = []
new_magnitudes = []
for i in range(0, shape[1]):
new_pitches.append(numpy.max(pitches[:, i]))
new_magnitudes.append(numpy.max(magnitudes[:, i]))
return numpy.asarray(new_pitches), numpy.asarray(new_magnitudes)
def smooth(x, window_len=11, window='hanning'):
if window_len < 3:
return x
s = numpy.r_[2 * x[0] - x[window_len - 1::-1], x, 2 * x[-1] - x[-1:-window_len:-1]]
if window == 'flat': # moving average
w = numpy.ones(window_len, 'd')
else:
w = eval('numpy.' + window + '(window_len)')
y = numpy.convolve(w / w.sum(), s, mode='same')
return y[window_len:- window_len + 1]
def analyse(y, sr, fmin, fmax):
pitches, magnitudes = librosa.core.piptrack(y=y, sr=sr, S=None, fmin=fmin,
fmax=fmax, threshold=0.75)
shape = numpy.shape(pitches)
pitches, magnitudes = extract_max(pitches, magnitudes, shape)
return pitches, magnitudes
def get_pitch_magnitude(audio_data_path, sample_rate):
duration = librosa.get_duration(filename=str(audio_data_path))
y, sr = librosa.load(audio_data_path, sr=sample_rate, duration=duration, mono=True)
pitches, magnitudes = analyse(y, sr, fmin=80, fmax=250)
return y, pitches, magnitudes
| [
"numpy.asarray",
"numpy.ones",
"numpy.shape",
"librosa.core.piptrack",
"numpy.max",
"librosa.load"
] | [((812, 891), 'librosa.core.piptrack', 'librosa.core.piptrack', ([], {'y': 'y', 'sr': 'sr', 'S': 'None', 'fmin': 'fmin', 'fmax': 'fmax', 'threshold': '(0.75)'}), '(y=y, sr=sr, S=None, fmin=fmin, fmax=fmax, threshold=0.75)\n', (833, 891), False, 'import librosa\n'), ((952, 972), 'numpy.shape', 'numpy.shape', (['pitches'], {}), '(pitches)\n', (963, 972), False, 'import numpy\n'), ((1207, 1282), 'librosa.load', 'librosa.load', (['audio_data_path'], {'sr': 'sample_rate', 'duration': 'duration', 'mono': '(True)'}), '(audio_data_path, sr=sample_rate, duration=duration, mono=True)\n', (1219, 1282), False, 'import librosa\n'), ((276, 302), 'numpy.asarray', 'numpy.asarray', (['new_pitches'], {}), '(new_pitches)\n', (289, 302), False, 'import numpy\n'), ((304, 333), 'numpy.asarray', 'numpy.asarray', (['new_magnitudes'], {}), '(new_magnitudes)\n', (317, 333), False, 'import numpy\n'), ((567, 594), 'numpy.ones', 'numpy.ones', (['window_len', '"""d"""'], {}), "(window_len, 'd')\n", (577, 594), False, 'import numpy\n'), ((180, 204), 'numpy.max', 'numpy.max', (['pitches[:, i]'], {}), '(pitches[:, i])\n', (189, 204), False, 'import numpy\n'), ((236, 263), 'numpy.max', 'numpy.max', (['magnitudes[:, i]'], {}), '(magnitudes[:, i])\n', (245, 263), False, 'import numpy\n')] |
import os
import random
import sys
from collections import OrderedDict, defaultdict
from datetime import datetime
from os import path
from time import sleep, time
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import torch
from scipy import ndimage
from torch import nn, optim
from torch.nn.parallel import data_parallel
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from tqdm import tqdm
from conet.utils.logger import log
from tensorboardX import SummaryWriter
matplotlib.use("agg")
def maybe_mkdir_p(dir):
os.makedirs(dir, exist_ok=True)
class BaseTrainer:
def __init__(self, output_folder, deterministic=False, max_num_epochs=1000):
# random.seed(42)
# np.random.seed(42)
# torch.manual_seed(42)
# torch.cuda.manual_seed_all(42)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
else:
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
################# SET THESE IN self.initialize() ###################################
self.network = None
self.optimizer = None
self.lr_scheduler = None
self.tr_gen = self.val_gen = None
self.was_initialized = False
################# SET THESE IN INIT ################################################
self.output_folder = output_folder
self.epoch = 0
self.max_num_epochs = max_num_epochs
self.log_file = None
self.loss = None
self.train_loss_MA = None
self.best_val_eval_criterion_MA = None
self.best_MA_tr_loss_for_patience = None
self.best_epoch_based_on_MA_tr_loss = None
self.tr_log_items = defaultdict(list)
self.val_log_items = defaultdict(list)
self.val_metrics = defaultdict(list)
self.epoch = 0
self.deterministic = deterministic
self.was_initialized = False
self.board_writer = SummaryWriter(self.output_folder)
def run_training(self):
torch.cuda.empty_cache()
bst_val_loss = None
bst_val_metric = None
while self.epoch < self.max_num_epochs:
self.print_to_log_file("\nepoch: ", self.epoch)
train_losses_epoch = defaultdict(list)
# train one epoch
self.network.train()
tbar = tqdm(self.tr_gen, ascii=True)
for b in tbar:
log_bags = self.run_iteration(b, True)
l = log_bags['loss']
tbar.set_description(f'train loss: {l:.5f}')
for loss_name, tr_loss in log_bags.items():
train_losses_epoch[loss_name].append(tr_loss)
for loss_name, loss_epoch in train_losses_epoch.items():
l_e = np.mean(loss_epoch)
self.board_writer.add_scalar(
f'tr/{loss_name}', l_e, self.epoch)
self.tr_log_items[loss_name].append(l_e)
log_message = ' '.join(
[f'{loss_name}: {self.tr_log_items[loss_name][-1]:.4f}' for loss_name in self.tr_log_items.keys()])
self.print_to_log_file(f"train {log_message}")
with torch.no_grad():
# validation with train=False
self.network.eval()
val_losses_epoch = defaultdict(list)
tbar = tqdm(self.val_gen, ascii=True)
for b in tbar:
log_bags = self.run_iteration(b, False, True)
l = log_bags['loss']
tbar.set_description(f'val loss: {l:.5f}')
for loss_name, val_loss in log_bags.items():
val_losses_epoch[loss_name].append(val_loss)
for loss_name, loss_epoch in val_losses_epoch.items():
l_e = np.mean(loss_epoch)
self.board_writer.add_scalar(
f'val/{loss_name}', l_e, self.epoch)
self.val_log_items[loss_name].append(l_e)
self.on_epoch_end()
if bst_val_metric is None:
bst_val_metric = self.all_val_eval_metrics[-1]
bst_val_metric = self.val_metrics['val_metric'][-1]
if bst_val_metric < self.val_metrics['val_metric'][-1]:
bst_val_metric = self.val_metrics['val_metric'][-1]
self.save_checkpoint(
path.join(self.output_folder, "model_best.model"))
self.lr_scheduler.step(np.mean(val_losses_epoch['loss']))
# self.lr_scheduler.step(self.val_metrics['bst'][-1])
self.print_to_log_file(
"val loss (train=False): %.4f" % self.val_log_items['loss'][-1])
self.epoch += 1
# self.print_to_log_file("This epoch took %f s\n" % (epoch_end_time-epoch_start_time))
self.save_checkpoint(path.join(self.output_folder,
"model_final_checkpoint.model"))
# now we can delete latest as it will be identical with final
if path.isfile(path.join(self.output_folder, "model_latest.model")):
os.remove(path.join(self.output_folder, "model_latest.model"))
if path.isfile(path.join(self.output_folder, "model_latest.model.pkl")):
os.remove(path.join(self.output_folder, "model_latest.model.pkl"))
def save_checkpoint(self, fname, save_optimizer=True):
start_time = time()
state_dict = self.network.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].cpu()
lr_sched_state_dct = None
if self.lr_scheduler is not None and not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
lr_sched_state_dct = self.lr_scheduler.state_dict()
for key in lr_sched_state_dct.keys():
lr_sched_state_dct[key] = lr_sched_state_dct[key]
if save_optimizer:
optimizer_state_dict = self.optimizer.state_dict()
else:
optimizer_state_dict = None
self.print_to_log_file("saving checkpoint...")
torch.save({
'epoch': self.epoch + 1,
'state_dict': state_dict,
'optimizer_state_dict': optimizer_state_dict,
'lr_scheduler_state_dict': lr_sched_state_dct,
'plot_stuff': (self.tr_log_items, self.val_log_items, self.val_metrics)},
fname)
self.print_to_log_file(
"done, saving took %.2f seconds" % (time() - start_time))
def load_best_checkpoint(self, train=True):
self.load_checkpoint(path.join(self.output_folder,
"model_best.model"), train=train)
def load_latest_checkpoint(self, train=True):
if path.isfile(path.join(self.output_folder, "model_final_checkpoint.model")):
return self.load_checkpoint(path.join(self.output_folder, "model_final_checkpoint.model"), train=train)
if path.isfile(path.join(self.output_folder, "model_latest.model")):
return self.load_checkpoint(path.join(self.output_folder, "model_latest.model"), train=train)
all_checkpoints = [i for i in os.listdir(
self.output_folder) if i.endswith(".model") and i.find("_ep_") != -1]
if len(all_checkpoints) == 0:
return self.load_best_checkpoint(train=train)
corresponding_epochs = [int(i.split("_")[-1].split(".")[0])
for i in all_checkpoints]
checkpoint = all_checkpoints[np.argmax(corresponding_epochs)]
self.load_checkpoint(
path.join(self.output_folder, checkpoint), train=train)
def load_checkpoint(self, fname, train=True):
self.print_to_log_file("loading checkpoint", fname, "train=", train)
if not self.was_initialized:
self.initialize(train)
saved_model = torch.load(fname, map_location=torch.device(
'cuda', torch.cuda.current_device()))
self.load_checkpoint_ram(saved_model, train)
def load_checkpoint_ram(self, saved_model, train=True):
"""
used for if the checkpoint is already in ram
:param saved_model:
:param train:
:return:
"""
if not self.was_initialized:
self.initialize(train)
new_state_dict = OrderedDict()
curr_state_dict_keys = list(self.network.state_dict().keys())
# if state dict comes form nn.DataParallel but we use non-parallel model here then the state dict keys do not
# match. Use heuristic to make it match
for k, value in saved_model['state_dict'].items():
key = k
if key not in curr_state_dict_keys:
key = key[7:]
new_state_dict[key] = value
self.network.load_state_dict(new_state_dict)
self.epoch = saved_model['epoch']
print(f'Load from epoch: {self.epoch}')
if train:
optimizer_state_dict = saved_model['optimizer_state_dict']
if optimizer_state_dict is not None:
self.optimizer.load_state_dict(optimizer_state_dict)
if self.lr_scheduler is not None and not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
self.lr_scheduler.load_state_dict(
saved_model['lr_scheduler_state_dict'])
self.tr_log_items, self.val_log_items, self.val_metrics = saved_model[
'plot_stuff']
def run_iteration(self, data_dict, do_backprop=True, run_online_evaluation=False):
raise NotImplementedError
def validate(self, train=True):
pass
def initialize(self):
pass
def log_val_metric(self):
for metric_name, metric_epoch in self.val_metrics.items():
self.board_writer.add_scalar(
f'metric/{metric_name}', metric_epoch, self.epoch)
def on_epoch_end(self):
self.finish_online_evaluation()
self.plot_progress()
def plot_progress(self):
"""
Should probably by improved
:return:
"""
try:
font = {'weight': 'normal',
'size': 18}
matplotlib.rc('font', **font)
fig = plt.figure(figsize=(30, 24))
ax = fig.add_subplot(111)
ax2 = ax.twinx()
colors = "bgrcmykw"
color_index = 0
x_values = list(range(self.epoch + 1))
for loss_name in self.tr_log_items.keys():
ax.plot(x_values, self.tr_log_items[loss_name], color=colors[color_index], ls='-',
label=f"{loss_name}_tr")
color_index += 1
for loss_name in self.val_log_items.keys():
ax.plot(x_values, self.val_log_items[loss_name], color=colors[color_index], ls='-',
label=f"{loss_name}_val")
color_index += 1
for metric_name in self.val_metrics.keys():
ax2.plot(x_values, self.val_metrics[metric_name], color=colors[color_index], ls='--',
label=f"{metric_name}")
color_index += 1
ax.set_xlabel("epoch")
ax.set_ylabel("loss")
ax2.set_ylabel("evaluation metric")
ax.legend()
ax2.legend(loc=9)
fig.savefig(path.join(self.output_folder, "progress.png"))
plt.close()
except IOError:
self.print_to_log_file("failed to plot: ", sys.exc_info())
def print_to_log_file(self, *args, also_print_to_console=True, add_timestamp=True):
timestamp = time()
dt_object = datetime.fromtimestamp(timestamp)
if add_timestamp:
args = ("%s:" % dt_object, *args)
if self.log_file is None:
maybe_mkdir_p(self.output_folder)
timestamp = datetime.now()
self.log_file = path.join(self.output_folder, "training_log_%d_%d_%d_%02.0d_%02.0d_%02.0d.txt" %
(timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute,
timestamp.second))
with open(self.log_file, 'w') as f:
f.write("Starting... \n")
successful = False
max_attempts = 5
ctr = 0
while not successful and ctr < max_attempts:
try:
with open(self.log_file, 'a+') as f:
for a in args:
f.write(str(a))
f.write(" ")
f.write("\n")
successful = True
except IOError:
print("%s: failed to log: " %
datetime.fromtimestamp(timestamp), sys.exc_info())
sleep(0.5)
ctr += 1
if also_print_to_console:
print(*args)
def run_online_evaluation(self, *args, **kwargs):
"""
Can be implemented, does not have to
:param output_torch:
:param target_npy:
:return:
"""
pass
def finish_online_evaluation(self, *args, **kwargs):
pass
| [
"matplotlib.rc",
"numpy.argmax",
"collections.defaultdict",
"matplotlib.pyplot.figure",
"numpy.mean",
"sys.exc_info",
"torch.cuda.current_device",
"torch.no_grad",
"os.path.join",
"matplotlib.pyplot.close",
"datetime.datetime.now",
"tqdm.tqdm",
"time.sleep",
"matplotlib.use",
"datetime.d... | [((519, 540), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (533, 540), False, 'import matplotlib\n'), ((571, 602), 'os.makedirs', 'os.makedirs', (['dir'], {'exist_ok': '(True)'}), '(dir, exist_ok=True)\n', (582, 602), False, 'import os\n'), ((1819, 1836), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1830, 1836), False, 'from collections import OrderedDict, defaultdict\n'), ((1866, 1883), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1877, 1883), False, 'from collections import OrderedDict, defaultdict\n'), ((1911, 1928), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1922, 1928), False, 'from collections import OrderedDict, defaultdict\n'), ((2064, 2097), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['self.output_folder'], {}), '(self.output_folder)\n', (2077, 2097), False, 'from tensorboardX import SummaryWriter\n'), ((2135, 2159), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (2157, 2159), False, 'import torch\n'), ((5538, 5544), 'time.time', 'time', ([], {}), '()\n', (5542, 5544), False, 'from time import sleep, time\n'), ((6216, 6465), 'torch.save', 'torch.save', (["{'epoch': self.epoch + 1, 'state_dict': state_dict, 'optimizer_state_dict':\n optimizer_state_dict, 'lr_scheduler_state_dict': lr_sched_state_dct,\n 'plot_stuff': (self.tr_log_items, self.val_log_items, self.val_metrics)}", 'fname'], {}), "({'epoch': self.epoch + 1, 'state_dict': state_dict,\n 'optimizer_state_dict': optimizer_state_dict, 'lr_scheduler_state_dict':\n lr_sched_state_dct, 'plot_stuff': (self.tr_log_items, self.\n val_log_items, self.val_metrics)}, fname)\n", (6226, 6465), False, 'import torch\n'), ((8441, 8454), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8452, 8454), False, 'from collections import OrderedDict, defaultdict\n'), ((11741, 11747), 'time.time', 'time', ([], {}), '()\n', (11745, 11747), False, 'from time import sleep, time\n'), ((11768, 11801), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['timestamp'], {}), '(timestamp)\n', (11790, 11801), False, 'from datetime import datetime\n'), ((2361, 2378), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2372, 2378), False, 'from collections import OrderedDict, defaultdict\n'), ((2462, 2491), 'tqdm.tqdm', 'tqdm', (['self.tr_gen'], {'ascii': '(True)'}), '(self.tr_gen, ascii=True)\n', (2466, 2491), False, 'from tqdm import tqdm\n'), ((4973, 5034), 'os.path.join', 'path.join', (['self.output_folder', '"""model_final_checkpoint.model"""'], {}), "(self.output_folder, 'model_final_checkpoint.model')\n", (4982, 5034), False, 'from os import path\n'), ((5168, 5219), 'os.path.join', 'path.join', (['self.output_folder', '"""model_latest.model"""'], {}), "(self.output_folder, 'model_latest.model')\n", (5177, 5219), False, 'from os import path\n'), ((5320, 5375), 'os.path.join', 'path.join', (['self.output_folder', '"""model_latest.model.pkl"""'], {}), "(self.output_folder, 'model_latest.model.pkl')\n", (5329, 5375), False, 'from os import path\n'), ((6706, 6755), 'os.path.join', 'path.join', (['self.output_folder', '"""model_best.model"""'], {}), "(self.output_folder, 'model_best.model')\n", (6715, 6755), False, 'from os import path\n'), ((6883, 6944), 'os.path.join', 'path.join', (['self.output_folder', '"""model_final_checkpoint.model"""'], {}), "(self.output_folder, 'model_final_checkpoint.model')\n", (6892, 6944), False, 'from os import path\n'), ((7086, 7137), 'os.path.join', 'path.join', (['self.output_folder', '"""model_latest.model"""'], {}), "(self.output_folder, 'model_latest.model')\n", (7095, 7137), False, 'from os import path\n'), ((7637, 7668), 'numpy.argmax', 'np.argmax', (['corresponding_epochs'], {}), '(corresponding_epochs)\n', (7646, 7668), True, 'import numpy as np\n'), ((7712, 7753), 'os.path.join', 'path.join', (['self.output_folder', 'checkpoint'], {}), '(self.output_folder, checkpoint)\n', (7721, 7753), False, 'from os import path\n'), ((10292, 10321), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (10305, 10321), False, 'import matplotlib\n'), ((10341, 10369), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(30, 24)'}), '(figsize=(30, 24))\n', (10351, 10369), True, 'import matplotlib.pyplot as plt\n'), ((11524, 11535), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11533, 11535), True, 'import matplotlib.pyplot as plt\n'), ((11980, 11994), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11992, 11994), False, 'from datetime import datetime\n'), ((12023, 12218), 'os.path.join', 'path.join', (['self.output_folder', "('training_log_%d_%d_%d_%02.0d_%02.0d_%02.0d.txt' % (timestamp.year,\n timestamp.month, timestamp.day, timestamp.hour, timestamp.minute,\n timestamp.second))"], {}), "(self.output_folder, \n 'training_log_%d_%d_%d_%02.0d_%02.0d_%02.0d.txt' % (timestamp.year,\n timestamp.month, timestamp.day, timestamp.hour, timestamp.minute,\n timestamp.second))\n", (12032, 12218), False, 'from os import path\n'), ((2890, 2909), 'numpy.mean', 'np.mean', (['loss_epoch'], {}), '(loss_epoch)\n', (2897, 2909), True, 'import numpy as np\n'), ((3299, 3314), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3312, 3314), False, 'import torch\n'), ((3433, 3450), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3444, 3450), False, 'from collections import OrderedDict, defaultdict\n'), ((3474, 3504), 'tqdm.tqdm', 'tqdm', (['self.val_gen'], {'ascii': '(True)'}), '(self.val_gen, ascii=True)\n', (3478, 3504), False, 'from tqdm import tqdm\n'), ((3930, 3949), 'numpy.mean', 'np.mean', (['loss_epoch'], {}), '(loss_epoch)\n', (3937, 3949), True, 'import numpy as np\n'), ((4597, 4630), 'numpy.mean', 'np.mean', (["val_losses_epoch['loss']"], {}), "(val_losses_epoch['loss'])\n", (4604, 4630), True, 'import numpy as np\n'), ((5244, 5295), 'os.path.join', 'path.join', (['self.output_folder', '"""model_latest.model"""'], {}), "(self.output_folder, 'model_latest.model')\n", (5253, 5295), False, 'from os import path\n'), ((5400, 5455), 'os.path.join', 'path.join', (['self.output_folder', '"""model_latest.model.pkl"""'], {}), "(self.output_folder, 'model_latest.model.pkl')\n", (5409, 5455), False, 'from os import path\n'), ((6987, 7048), 'os.path.join', 'path.join', (['self.output_folder', '"""model_final_checkpoint.model"""'], {}), "(self.output_folder, 'model_final_checkpoint.model')\n", (6996, 7048), False, 'from os import path\n'), ((7180, 7231), 'os.path.join', 'path.join', (['self.output_folder', '"""model_latest.model"""'], {}), "(self.output_folder, 'model_latest.model')\n", (7189, 7231), False, 'from os import path\n'), ((7284, 7314), 'os.listdir', 'os.listdir', (['self.output_folder'], {}), '(self.output_folder)\n', (7294, 7314), False, 'import os\n'), ((11465, 11510), 'os.path.join', 'path.join', (['self.output_folder', '"""progress.png"""'], {}), "(self.output_folder, 'progress.png')\n", (11474, 11510), False, 'from os import path\n'), ((4510, 4559), 'os.path.join', 'path.join', (['self.output_folder', '"""model_best.model"""'], {}), "(self.output_folder, 'model_best.model')\n", (4519, 4559), False, 'from os import path\n'), ((6606, 6612), 'time.time', 'time', ([], {}), '()\n', (6610, 6612), False, 'from time import sleep, time\n'), ((8055, 8082), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (8080, 8082), False, 'import torch\n'), ((11615, 11629), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (11627, 11629), False, 'import sys\n'), ((12907, 12917), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (12912, 12917), False, 'from time import sleep, time\n'), ((12875, 12889), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (12887, 12889), False, 'import sys\n'), ((12840, 12873), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['timestamp'], {}), '(timestamp)\n', (12862, 12873), False, 'from datetime import datetime\n')] |
import tensorflow as tf
model = tf.keras.models.load_model('model_car_damage.h5')
import streamlit as st
st.write("""
# upload car image
"""
)
st.write("This is a simple image classification web app to predict type of car damage")
file = st.file_uploader("Please upload an image file", type=["jpg", "png","jpeg"])
import cv2
from PIL import Image, ImageOps
import numpy as np
def import_and_predict(image_data, model):
size = (150,150)
image = ImageOps.fit(image_data, size, Image.ANTIALIAS)
image = np.asarray(image)
img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
img_resize = (cv2.resize(img, dsize=(224, 224), interpolation=cv2.INTER_CUBIC))/255.
img_reshape = img_resize[np.newaxis,...]
prediction = model.predict(img_reshape)
return prediction
if file is None:
st.text("Please upload an image file")
else:
image = Image.open(file)
st.image(image, use_column_width=True)
prediction = import_and_predict(image, model)
if np.argmax(prediction) == 0:
st.write("bumper dent!")
elif np.argmax(prediction) == 1:
st.write("bumper scratch!")
elif np.argmax(prediction) == 2:
st.write("door dent!")
elif np.argmax(prediction) == 3:
st.write("door_scratch!")
elif np.argmax(prediction) == 4:
st.write("glass_shatter!")
elif np.argmax(prediction) == 5:
st.write("head_lamp!")
elif np.argmax(prediction) == 6:
st.write("multiple_damage!")
else:
st.write("tail_lamp!")
st.text("Probability (0: dumper dent, 1: bumper scratch, 2: door dent, 3: door scratch, 4: glass shatter, 5: head lamp, 6: multiple damage, 7: tail lamp")
st.write(prediction) | [
"tensorflow.keras.models.load_model",
"streamlit.image",
"PIL.ImageOps.fit",
"cv2.cvtColor",
"numpy.argmax",
"numpy.asarray",
"streamlit.file_uploader",
"streamlit.write",
"PIL.Image.open",
"streamlit.text",
"cv2.resize"
] | [((32, 81), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""model_car_damage.h5"""'], {}), "('model_car_damage.h5')\n", (58, 81), True, 'import tensorflow as tf\n'), ((105, 159), 'streamlit.write', 'st.write', (['"""\n # upload car image\n """'], {}), '("""\n # upload car image\n """)\n', (113, 159), True, 'import streamlit as st\n'), ((170, 267), 'streamlit.write', 'st.write', (['"""This is a simple image classification web app to predict type of car damage"""'], {}), "(\n 'This is a simple image classification web app to predict type of car damage'\n )\n", (178, 267), True, 'import streamlit as st\n'), ((265, 341), 'streamlit.file_uploader', 'st.file_uploader', (['"""Please upload an image file"""'], {'type': "['jpg', 'png', 'jpeg']"}), "('Please upload an image file', type=['jpg', 'png', 'jpeg'])\n", (281, 341), True, 'import streamlit as st\n'), ((497, 544), 'PIL.ImageOps.fit', 'ImageOps.fit', (['image_data', 'size', 'Image.ANTIALIAS'], {}), '(image_data, size, Image.ANTIALIAS)\n', (509, 544), False, 'from PIL import Image, ImageOps\n'), ((561, 578), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (571, 578), True, 'import numpy as np\n'), ((593, 631), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (605, 631), False, 'import cv2\n'), ((895, 933), 'streamlit.text', 'st.text', (['"""Please upload an image file"""'], {}), "('Please upload an image file')\n", (902, 933), True, 'import streamlit as st\n'), ((952, 968), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (962, 968), False, 'from PIL import Image, ImageOps\n'), ((973, 1011), 'streamlit.image', 'st.image', (['image'], {'use_column_width': '(True)'}), '(image, use_column_width=True)\n', (981, 1011), True, 'import streamlit as st\n'), ((1616, 1780), 'streamlit.text', 'st.text', (['"""Probability (0: dumper dent, 1: bumper scratch, 2: door dent, 3: door scratch, 4: glass shatter, 5: head lamp, 6: multiple damage, 7: tail lamp"""'], {}), "(\n 'Probability (0: dumper dent, 1: bumper scratch, 2: door dent, 3: door scratch, 4: glass shatter, 5: head lamp, 6: multiple damage, 7: tail lamp'\n )\n", (1623, 1780), True, 'import streamlit as st\n'), ((1775, 1795), 'streamlit.write', 'st.write', (['prediction'], {}), '(prediction)\n', (1783, 1795), True, 'import streamlit as st\n'), ((654, 718), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(224, 224)', 'interpolation': 'cv2.INTER_CUBIC'}), '(img, dsize=(224, 224), interpolation=cv2.INTER_CUBIC)\n', (664, 718), False, 'import cv2\n'), ((1074, 1095), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (1083, 1095), True, 'import numpy as np\n'), ((1110, 1134), 'streamlit.write', 'st.write', (['"""bumper dent!"""'], {}), "('bumper dent!')\n", (1118, 1134), True, 'import streamlit as st\n'), ((1144, 1165), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (1153, 1165), True, 'import numpy as np\n'), ((1180, 1207), 'streamlit.write', 'st.write', (['"""bumper scratch!"""'], {}), "('bumper scratch!')\n", (1188, 1207), True, 'import streamlit as st\n'), ((1217, 1238), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (1226, 1238), True, 'import numpy as np\n'), ((1253, 1275), 'streamlit.write', 'st.write', (['"""door dent!"""'], {}), "('door dent!')\n", (1261, 1275), True, 'import streamlit as st\n'), ((1285, 1306), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (1294, 1306), True, 'import numpy as np\n'), ((1321, 1346), 'streamlit.write', 'st.write', (['"""door_scratch!"""'], {}), "('door_scratch!')\n", (1329, 1346), True, 'import streamlit as st\n'), ((1356, 1377), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (1365, 1377), True, 'import numpy as np\n'), ((1392, 1418), 'streamlit.write', 'st.write', (['"""glass_shatter!"""'], {}), "('glass_shatter!')\n", (1400, 1418), True, 'import streamlit as st\n'), ((1428, 1449), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (1437, 1449), True, 'import numpy as np\n'), ((1464, 1486), 'streamlit.write', 'st.write', (['"""head_lamp!"""'], {}), "('head_lamp!')\n", (1472, 1486), True, 'import streamlit as st\n'), ((1496, 1517), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (1505, 1517), True, 'import numpy as np\n'), ((1532, 1560), 'streamlit.write', 'st.write', (['"""multiple_damage!"""'], {}), "('multiple_damage!')\n", (1540, 1560), True, 'import streamlit as st\n'), ((1584, 1606), 'streamlit.write', 'st.write', (['"""tail_lamp!"""'], {}), "('tail_lamp!')\n", (1592, 1606), True, 'import streamlit as st\n')] |
# File: bayesian_gp.py
# File Created: Thursday, 7th November 2019 9:55:27 am
# Author: <NAME> (<EMAIL>)
"""
Simple Bayesian Gaussian process
Example usage:
>>> model = BayesianGP(x, y)
>>> model.raw_scales_prior = Normal(mean_scales, std_scales) # Optional
>>> model.fit()
>>> mf, vf = model.predict_f(x_test)
>>> my, cy = model.predict_y(x_test, diag=False)
"""
from functools import partial
import numpy as np
from pyro import sample
from pyro.distributions import Normal, MultivariateNormal, Delta
from pyro.distributions.transforms import ExpTransform
from pyro.infer import Predictive
from pyro.infer.mcmc import MCMC, NUTS
import torch
torch.set_num_threads(1)
TensorType = torch.DoubleTensor
torch_dtype = torch.double
zeros = partial(torch.zeros, dtype=torch_dtype)
ones = partial(torch.ones, dtype=torch_dtype)
eye = partial(torch.eye, dtype=torch_dtype)
_trtrs = lambda b, a: torch.triangular_solve(b, a, upper=False)[0]
def _squared_distance(x1, x2):
"""
Compute squared distance matrix.
:param x1: [N1 x D]
:type x1: torch.Tensor
:param x2: [N2 x D]
:type x2: torch.Tensor
:return: [N1 x N2] squared distance matrix
"""
r2 = (
torch.sum(x1 ** 2, dim=1, keepdim=True)
- 2.0 * x1 @ x2.t()
+ torch.sum(x2 ** 2, dim=1, keepdim=True).t()
)
r2 = r2 - (torch.clamp(r2, max=0.0)).detach()
return r2
def _rbf(x1, x2, scales, variance):
x1, x2 = x1 / scales, x2 / scales
r2 = _squared_distance(x1, x2)
return variance * torch.exp(-r2)
def _rbf_diag(x1, variance):
return variance + zeros(x1.shape[0])
def _jitchol(x):
"""
Cholesky with jitter backup
"""
try:
return torch.cholesky(x)
except RuntimeError:
factor = x.diag().mean()
for i in range(10):
jitter = factor * 2.0 ** (-9 + i)
try:
return torch.cholesky(x + jitter * eye(x.shape[0]))
except RuntimeError:
pass
else:
raise RuntimeError("Cholesky failed")
def _input_as_tensor(func):
def wrapped(obj, x_test, diag, with_jitter):
from_numpy = isinstance(x_test, np.ndarray)
if from_numpy:
x_test = TensorType(x_test)
mean, cov = func(obj, x_test, diag, with_jitter)
if from_numpy:
mean, cov = mean.detach().cpu().numpy(), cov.detach().cpu().numpy()
return mean, cov
return wrapped
class BayesianGP(object):
def __init__(self, x: np.ndarray, y: np.ndarray):
"""
:param x: [N x D]
:param y: [N]
"""
x, y = TensorType(x), TensorType(y)
assert x.ndimension() == 2
assert y.ndimension() == 1
assert x.shape[0] == y.numel()
self.x = x
self.y = y
self.n_samples = 32
self._xform = ExpTransform()
# Length scales for the kernel
self.raw_scales_prior = Normal(zeros(self.dx), ones(self.dx))
# Kernel variance
self.raw_variance_prior = Normal(zeros(1), ones(1))
# Jitter, aka Gaussian likelihood's variance
self.raw_jitter_prior = Normal(-3.0 + zeros(1), ones(1))
# For the constant ("bias") mean function
self.bias_prior = Normal(zeros(1), ones(1))
self._mcmc = None
@property
def dx(self):
"""
Input dimension
"""
return self.x.shape[1]
@property
def n(self):
"""
Number of data
"""
return self.y.numel()
def fit(self):
mcmc_kernel = NUTS(self._prior_model)
self._mcmc = MCMC(mcmc_kernel, num_samples=self.n_samples, warmup_steps=128)
self._mcmc.run()
def predict_f(self, x_test, diag=True):
return self._predict(x_test, diag, False)
def predict_y(self, x_test, diag=True):
return self._predict(x_test, diag, True)
def append_data(self, x_new, y_new):
"""
Add new input-output pair(s) to the model
:param x_new: inputs
:type x_new: np.ndarray
:param y_new: outputs
:type y_new: np.ndarray
"""
self.x = torch.cat((self.x, TensorType(np.atleast_2d(x_new))))
self.y = torch.cat((self.y, TensorType(y_new.flatten())))
def _prior_model(self):
scales, variance, jitter, bias = self._get_samples()
if self.n > 0:
kyy = _rbf(self.x, self.x, scales, variance) + jitter * eye(self.n)
try:
ckyy = _jitchol(kyy)
sample(
"output",
MultivariateNormal(bias + zeros(self.n), scale_tril=ckyy),
obs=self.y,
)
except RuntimeError: # Cholesky fails?
# "No chance"
sample("output", Delta(zeros(1)), obs=ones(1))
def _posterior_model(self, x_test, diag, with_jitter):
"""
Return means & (co)variance samples.
"""
assert self.n > 0, "Need at least one training datum for posterior"
scales, variance, jitter, bias = self._get_samples()
kyy = _rbf(self.x, self.x, scales, variance) + jitter * eye(self.n)
ckyy = _jitchol(kyy)
kys = _rbf(self.x, x_test, scales, variance)
alpha = _trtrs(kys, ckyy)
beta = _trtrs(self.y[:, None] - bias, ckyy)
mean = (alpha.t() @ beta).flatten() + bias
if diag:
kss = _rbf_diag(x_test, variance)
cov = kss - torch.sum(alpha ** 2, dim=0)
if with_jitter:
cov = cov + jitter
# Guard against numerically-negative variances?
cov = cov - (torch.clamp(cov, max=0.0)).detach()
else:
kss = _rbf(x_test, x_test, scales, variance)
cov = kss - alpha.t() @ alpha
if with_jitter:
cov = cov + jitter * eye(*cov.shape)
# Numerically-negativs variances?...
sample("mean", Delta(mean))
sample("cov", Delta(cov))
def _posterior_model_no_data(self, x_test, diag, with_jitter):
"""
When the conditioning set is empty
"""
scales, variance, jitter, bias = self._get_samples()
if diag:
cov = _rbf_diag(x_test, variance)
if with_jitter:
cov = cov + jitter
else:
cov = _rbf(x_test, x_test, scales, variance)
if with_jitter:
cov = cov + jitter * eye(x_test.shape[0])
mean = torch.zeros(x_test.shape[0]) + bias
sample("mean", Delta(mean))
sample("cov", Delta(cov))
def _get_samples(self):
scales = self._xform(sample("raw_scales", self.raw_scales_prior))
variance = self._xform(sample("raw_variance", self.raw_variance_prior))
jitter = self._xform(sample("raw_jitter", self.raw_jitter_prior))
bias = sample("bias", self.bias_prior)
return scales, variance, jitter, bias
@_input_as_tensor
def _predict(self, x_test: TensorType, diag, with_jitter):
"""
Return predictive mean [N* x 1] and either predictive variance [N* x 1]
or covariance [N* x N*]
:return: (TensorType, TensorType) mean & (co)variance
"""
model = self._posterior_model if self.n > 0 else self._posterior_model_no_data
samples = Predictive(model, self._mcmc.get_samples()).get_samples(
x_test, diag, with_jitter
)
means, covs = samples["mean"], samples["cov"]
mean = means.mean(dim=0)
# Law of total (co)variance:
if diag:
cov = means.var(dim=0) + covs.mean(dim=0)
else:
d_mean = (means - mean)[:, :, None]
cov_of_means = (d_mean @ torch.transpose(d_mean, 1, 2)).sum(dim=0) / (
means.shape[0] - 1
)
mean_of_covs = covs.mean(dim=0)
cov = cov_of_means + mean_of_covs
# Make sure the shapes are right:
if len(mean.shape) == 1:
mean = mean[:, None]
if len(cov.shape) == 1:
cov = cov[:, None]
return mean, cov
| [
"torch.triangular_solve",
"functools.partial",
"pyro.distributions.transforms.ExpTransform",
"pyro.distributions.Delta",
"pyro.sample",
"pyro.infer.mcmc.MCMC",
"torch.cholesky",
"torch.exp",
"pyro.infer.mcmc.NUTS",
"torch.set_num_threads",
"torch.clamp",
"torch.transpose",
"torch.zeros",
"... | [((649, 673), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (670, 673), False, 'import torch\n'), ((743, 782), 'functools.partial', 'partial', (['torch.zeros'], {'dtype': 'torch_dtype'}), '(torch.zeros, dtype=torch_dtype)\n', (750, 782), False, 'from functools import partial\n'), ((790, 828), 'functools.partial', 'partial', (['torch.ones'], {'dtype': 'torch_dtype'}), '(torch.ones, dtype=torch_dtype)\n', (797, 828), False, 'from functools import partial\n'), ((835, 872), 'functools.partial', 'partial', (['torch.eye'], {'dtype': 'torch_dtype'}), '(torch.eye, dtype=torch_dtype)\n', (842, 872), False, 'from functools import partial\n'), ((896, 937), 'torch.triangular_solve', 'torch.triangular_solve', (['b', 'a'], {'upper': '(False)'}), '(b, a, upper=False)\n', (918, 937), False, 'import torch\n'), ((1525, 1539), 'torch.exp', 'torch.exp', (['(-r2)'], {}), '(-r2)\n', (1534, 1539), False, 'import torch\n'), ((1703, 1720), 'torch.cholesky', 'torch.cholesky', (['x'], {}), '(x)\n', (1717, 1720), False, 'import torch\n'), ((2852, 2866), 'pyro.distributions.transforms.ExpTransform', 'ExpTransform', ([], {}), '()\n', (2864, 2866), False, 'from pyro.distributions.transforms import ExpTransform\n'), ((3573, 3596), 'pyro.infer.mcmc.NUTS', 'NUTS', (['self._prior_model'], {}), '(self._prior_model)\n', (3577, 3596), False, 'from pyro.infer.mcmc import MCMC, NUTS\n'), ((3618, 3681), 'pyro.infer.mcmc.MCMC', 'MCMC', (['mcmc_kernel'], {'num_samples': 'self.n_samples', 'warmup_steps': '(128)'}), '(mcmc_kernel, num_samples=self.n_samples, warmup_steps=128)\n', (3622, 3681), False, 'from pyro.infer.mcmc import MCMC, NUTS\n'), ((6902, 6933), 'pyro.sample', 'sample', (['"""bias"""', 'self.bias_prior'], {}), "('bias', self.bias_prior)\n", (6908, 6933), False, 'from pyro import sample\n'), ((1198, 1237), 'torch.sum', 'torch.sum', (['(x1 ** 2)'], {'dim': '(1)', 'keepdim': '(True)'}), '(x1 ** 2, dim=1, keepdim=True)\n', (1207, 1237), False, 'import torch\n'), ((5981, 5992), 'pyro.distributions.Delta', 'Delta', (['mean'], {}), '(mean)\n', (5986, 5992), False, 'from pyro.distributions import Normal, MultivariateNormal, Delta\n'), ((6016, 6026), 'pyro.distributions.Delta', 'Delta', (['cov'], {}), '(cov)\n', (6021, 6026), False, 'from pyro.distributions import Normal, MultivariateNormal, Delta\n'), ((6523, 6551), 'torch.zeros', 'torch.zeros', (['x_test.shape[0]'], {}), '(x_test.shape[0])\n', (6534, 6551), False, 'import torch\n'), ((6583, 6594), 'pyro.distributions.Delta', 'Delta', (['mean'], {}), '(mean)\n', (6588, 6594), False, 'from pyro.distributions import Normal, MultivariateNormal, Delta\n'), ((6618, 6628), 'pyro.distributions.Delta', 'Delta', (['cov'], {}), '(cov)\n', (6623, 6628), False, 'from pyro.distributions import Normal, MultivariateNormal, Delta\n'), ((6688, 6731), 'pyro.sample', 'sample', (['"""raw_scales"""', 'self.raw_scales_prior'], {}), "('raw_scales', self.raw_scales_prior)\n", (6694, 6731), False, 'from pyro import sample\n'), ((6764, 6811), 'pyro.sample', 'sample', (['"""raw_variance"""', 'self.raw_variance_prior'], {}), "('raw_variance', self.raw_variance_prior)\n", (6770, 6811), False, 'from pyro import sample\n'), ((6842, 6885), 'pyro.sample', 'sample', (['"""raw_jitter"""', 'self.raw_jitter_prior'], {}), "('raw_jitter', self.raw_jitter_prior)\n", (6848, 6885), False, 'from pyro import sample\n'), ((1276, 1315), 'torch.sum', 'torch.sum', (['(x2 ** 2)'], {'dim': '(1)', 'keepdim': '(True)'}), '(x2 ** 2, dim=1, keepdim=True)\n', (1285, 1315), False, 'import torch\n'), ((1341, 1365), 'torch.clamp', 'torch.clamp', (['r2'], {'max': '(0.0)'}), '(r2, max=0.0)\n', (1352, 1365), False, 'import torch\n'), ((5501, 5529), 'torch.sum', 'torch.sum', (['(alpha ** 2)'], {'dim': '(0)'}), '(alpha ** 2, dim=0)\n', (5510, 5529), False, 'import torch\n'), ((4184, 4204), 'numpy.atleast_2d', 'np.atleast_2d', (['x_new'], {}), '(x_new)\n', (4197, 4204), True, 'import numpy as np\n'), ((5678, 5703), 'torch.clamp', 'torch.clamp', (['cov'], {'max': '(0.0)'}), '(cov, max=0.0)\n', (5689, 5703), False, 'import torch\n'), ((7773, 7802), 'torch.transpose', 'torch.transpose', (['d_mean', '(1)', '(2)'], {}), '(d_mean, 1, 2)\n', (7788, 7802), False, 'import torch\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import logging as log
import os
import sys
import tarfile
from functools import lru_cache
from pathlib import Path
from typing import Any, Dict, List, Tuple
import numpy as np
import pandas as pd
import pyproj
import rasterio
import urllib3
logging = log.getLogger("cm-hdd_cdd")
logging.setLevel(log.DEBUG)
DECIMALS = 3
CURRENT_FILE_DIR = Path(__file__).parent
TESTDATA_DIR = CURRENT_FILE_DIR / "testdata" / "hddcddrepo"
def get_data_dir() -> Path:
return Path(os.environ["CM_HDD_CDD_DIR"])
def get_data_repository() -> Path:
return Path(os.environ["CM_HDD_CDD_REPOSITORY"])
def get_years() -> List[int]:
repo = get_data_repository()
return sorted(set([int(gtif.name.split("_")[0]) for gtif in repo.glob("**/*.tif")]))
def get_scenarios() -> List[str]:
repo = get_data_repository()
return sorted([scn.name for scn in repo.iterdir()])
def get_base_temperature(ddtype: str) -> List[str]:
repo = get_data_repository()
folder = repo / "historical" / ddtype
return sorted([float(tb.name) for tb in folder.iterdir()])
def get_hddcdd_schema(save: bool = False, schema_path: Path = None) -> Dict[str, Any]:
scenarios = get_scenarios()
scens = dict(
type="string",
title="Representative Concentration Pathway Scenarios",
description=(
"A Representative Concentration"
" Pathway(https://en.wikipedia.org/wiki/Representative_Concentration_Pathway)"
" (RCP) is a greenhouse gas concentration (not emissions) trajectory"
" adopted by the IPCC"
),
default=scenarios[0],
enum=scenarios,
)
htemps = get_base_temperature("hdd")
htemp = dict(
type="number",
title="Base temperature for HDD",
description="",
default=htemps[int(len(htemps) / 2)],
minimum=min(htemps),
maximum=max(htemps),
enum=htemps,
)
ctemps = get_base_temperature("cdd")
ctemp = dict(
type="number",
title="Base temperature for CDD",
description="",
default=ctemps[int(len(ctemps) / 2)],
minimum=min(ctemps),
maximum=max(ctemps),
enum=ctemps,
)
props = {
# "reference year": refyr,
"scenario RCP": scens,
"base temperature for HDD": htemp,
"base temperature for CDD": ctemp,
}
schema = dict(type="object", properties=props)
if save is True:
if schema_path is None:
cmpath = CURRENT_FILE_DIR.parent.parent.resolve()
schema_path = cmpath / "hdd_cdd" / "schema.json"
with open(schema_path.as_posix(), mode="w") as schfile:
json.dump(schema, schfile, indent=2, sort_keys=True)
return schema
def download_data():
"""Download HDDS and CDDs data.
The function use the environmental variables:
* `CM_HDD_CDD_REPOSITORY`
* `INPUT_DATA_DIR`
to define the path to the data repository.
"""
rdir = get_data_dir()
repo = get_data_repository()
print(f"data repository => {repo}")
os.makedirs(rdir.as_posix(), exist_ok=True)
hist = repo / "historical"
if hist.exists():
print(
f"The directory {hist} already exists, the dataset is not going to be"
" downloaded."
)
else:
print("Downloading the HDDs and CDDs dataset")
url = "https://gitlab.inf.unibz.it/URS/enermaps/hdd-cdd/-/archive/main/hdd-cdd-main.tar.gz"
zpath = rdir / "hdd-cdd-main.tar.gz"
http = urllib3.PoolManager()
with http.request("GET", url, preload_content=False) as req, open(
zpath, "b+w"
) as zdata:
zdata.write(req.read())
print(f"Extracting {zpath}")
with tarfile.open(zpath) as zfile:
zfile.extractall(rdir)
os.remove(zpath)
print("done!")
sys.exit(0)
def compute_centroid(geo) -> Tuple[float, float]:
"""
Example
>>> gj = {'features': [
... {'geometry':
... {'coordinates': [[[11.061588, 45.567844],
... [11.055015, 45.563899],
... [11.050421, 45.565349],
... [11.040453, 45.560869],
... [11.032734, 45.561389]]],
... 'type': 'Polygon'},
... 'id': 80284,
... 'properties': {'ds_id': 0,
... 'dt': '',
... 'fields': '',
... 'id': 'IT_023038',
... 'layer': '{ "type": "numerical" }',
... 'start_at': '',
... 'units': '{ "": null }',
... 'variables': '{ "": 0 }',
... 'z': ''},
... 'type': 'Feature'}],
... 'type': 'FeatureCollection'}
>>> import geopandas as gpd
>>> geo = gpd.GeoDataFrame.from_features(
... gj["features"], crs="EPSG:4326"
... ).geometry
>>> compute_centroid(geo)
(11.048, 45.564)
"""
# logging.warning(geo)
try:
coords = np.array(geo.to_crs("EPSG:3035").centroid.to_crs("EPSG:4326")[0])
except KeyError:
logging.error(geo)
raise ValueError(
"FAILED! The provided geometry is not a correct/supported geojson format."
)
return tuple(np.around(coords, decimals=DECIMALS))
def get_datarepodir() -> Path:
return Path(os.environ.get("CM_HDD_CDD_REPOSITORY", TESTDATA_DIR.as_posix()))
@lru_cache(maxsize=256)
def get_datadir(
datarepository: Path,
sim_type: str = "historical",
dd_type: str = "hdd",
Tb=18.0,
aggr_window="monthly",
method="average",
) -> Path:
"""
>>> get_datadir("cm/cm_hdd_cdd/testdata").as_posix()
"cm/cm_hdd_cdd/testdata/historical/hdd/18.0/monthly/average/"
"""
return (
Path(datarepository) / sim_type / dd_type / f"{Tb:.1f}" / aggr_window / method
)
def reproj(
src_x: float, src_y: float, src_crs: str = "EPSG:4326", dst_crs: str = "EPSG:3035"
) -> Tuple[float, float]:
"""Reproject from one reference system to another.
>>> # see: https://epsg.io/transform#s_srs=4326&t_srs=3035&x=11.0132789&y=45.5228261
>>> # (lat, lon)
>>> [f"{c:.2f}" for c in reproj(45.5228261, 11.0132789)]
["4400277.98", "2490583.97"]
"""
trans = pyproj.Transformer.from_crs(src_crs, dst_crs)
cy, cx = trans.transform(src_x, src_y)
return (cx, cy)
@lru_cache()
def extract_by_dir(
gdir: Path,
lon: float,
lat: float,
refyear: int = None,
refmonth: int = None,
__datasets: Dict[str, rasterio.DatasetReader] = {},
):
res = []
idx = []
# check inputs
if refyear is not None and (refyear < 1950 or refyear > 2100):
raise ValueError(
f"Reference year must be 1950 <= refyear <=2100, instead is: {refyear}"
)
refyear = "*" if refyear is None else f"{refyear}"
if refmonth is not None and (refmonth < 1 or refmonth > 12):
raise ValueError(
f"Reference month must be 1 <= refmonth <=12, instead is: {refmonth}"
)
refmonth = "*" if refmonth is None else f"{refmonth:02d}"
# define the file pattern to match
pattern = f"{refyear}_{refmonth}.tif"
# Convert lat, lon to EPSG:3035 coords
# cx, cy = reproj(lat, lon)
cx, cy = reproj(src_x=lat, src_y=lon, src_crs="EPSG:4326", dst_crs="EPSG:3035")
if not gdir.exists():
res, idx = [], []
yp, xp = -1, -1
else:
for gfi in gdir.iterdir():
if gfi.match(pattern):
idx.append(gfi.name[:-4].replace("_", "-"))
try:
gx = __datasets[gfi.as_posix()]
except KeyError:
gx = rasterio.open(gfi)
__datasets[gfi.as_posix()] = gx
yp, xp = gx.index(cx, cy)
if yp < 0 or xp < 0:
raise ValueError(
f"Negative row|col index, row_index={yp}, col_index={xp}"
f"@{lon:.{DECIMALS}f}({cx:.{DECIMALS}f}),{lat:.{DECIMALS}f}({cy:.{DECIMALS}f})"
)
val = gx.read()[0][yp, xp]
logging.info(
f"{gfi}@{lon:.{DECIMALS}f}({cx:.{DECIMALS}f}),{lat:.{DECIMALS}f}({cy:.{DECIMALS}f}):"
f" {yp}, {xp} => {val}"
)
res.append(val)
sr = pd.Series(np.array(res), index=idx, name=f"yp={yp},xp={xp}")
sr.sort_index(inplace=True)
return sr
| [
"json.dump",
"os.remove",
"rasterio.open",
"logging.getLogger",
"numpy.around",
"pathlib.Path",
"numpy.array",
"urllib3.PoolManager",
"pyproj.Transformer.from_crs",
"tarfile.open",
"functools.lru_cache",
"sys.exit"
] | [((312, 339), 'logging.getLogger', 'log.getLogger', (['"""cm-hdd_cdd"""'], {}), "('cm-hdd_cdd')\n", (325, 339), True, 'import logging as log\n'), ((5667, 5689), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(256)'}), '(maxsize=256)\n', (5676, 5689), False, 'from functools import lru_cache\n'), ((6630, 6641), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (6639, 6641), False, 'from functools import lru_cache\n'), ((401, 415), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (405, 415), False, 'from pathlib import Path\n'), ((524, 558), 'pathlib.Path', 'Path', (["os.environ['CM_HDD_CDD_DIR']"], {}), "(os.environ['CM_HDD_CDD_DIR'])\n", (528, 558), False, 'from pathlib import Path\n'), ((607, 648), 'pathlib.Path', 'Path', (["os.environ['CM_HDD_CDD_REPOSITORY']"], {}), "(os.environ['CM_HDD_CDD_REPOSITORY'])\n", (611, 648), False, 'from pathlib import Path\n'), ((3919, 3930), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3927, 3930), False, 'import sys\n'), ((6518, 6563), 'pyproj.Transformer.from_crs', 'pyproj.Transformer.from_crs', (['src_crs', 'dst_crs'], {}), '(src_crs, dst_crs)\n', (6545, 6563), False, 'import pyproj\n'), ((3578, 3599), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {}), '()\n', (3597, 3599), False, 'import urllib3\n'), ((3879, 3895), 'os.remove', 'os.remove', (['zpath'], {}), '(zpath)\n', (3888, 3895), False, 'import os\n'), ((5511, 5547), 'numpy.around', 'np.around', (['coords'], {'decimals': 'DECIMALS'}), '(coords, decimals=DECIMALS)\n', (5520, 5547), True, 'import numpy as np\n'), ((8629, 8642), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (8637, 8642), True, 'import numpy as np\n'), ((2727, 2779), 'json.dump', 'json.dump', (['schema', 'schfile'], {'indent': '(2)', 'sort_keys': '(True)'}), '(schema, schfile, indent=2, sort_keys=True)\n', (2736, 2779), False, 'import json\n'), ((3806, 3825), 'tarfile.open', 'tarfile.open', (['zpath'], {}), '(zpath)\n', (3818, 3825), False, 'import tarfile\n'), ((6026, 6046), 'pathlib.Path', 'Path', (['datarepository'], {}), '(datarepository)\n', (6030, 6046), False, 'from pathlib import Path\n'), ((7941, 7959), 'rasterio.open', 'rasterio.open', (['gfi'], {}), '(gfi)\n', (7954, 7959), False, 'import rasterio\n')] |
import os
import matplotlib.pyplot as plt
import random
import h5py
import numpy as np
import warnings
from sklearn.neighbors import kneighbors_graph
from sklearn.cluster import SpectralClustering
from sklearn.cluster import KMeans
from sklearn.cluster import MeanShift
from sklearn.cluster import estimate_bandwidth
from sklearn.mixture import GaussianMixture
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import DBSCAN
from sklearn.metrics import silhouette_score
from sklearn.ensemble import IsolationForest
warnings.filterwarnings('ignore', '.*Graph is not fully connected*')
print('reading Hands_sequence...')
file_name = "Object Motion Data (mat files)/Hands_sequence.mat"
f = h5py.File(file_name, "r")
davis = f['davis']
dvs = davis['dvs']
pol = dvs['p'][0]
ts = dvs['t'][0]
x = dvs['x'][0]
y = dvs['y'][0]
aps_ts = np.load("hands_img_ts.npy")
dvs_ts = np.load("hands_all_ts.npy")
#for i in dvs_ts:
# print(i)
#exit()
n = len(dvs_ts)
last = 0
ALL = len(pol)
NEIGHBORS = 30
for i in [66, 70, 87, 26, 101]:
xx = '0000000000'
yy = str(i)
file_name = xx[:len(xx) - len(yy)] + yy
print('img : ', i)
selected_events = []
last = dvs_ts[i-1] + 1 if i>0 else 0
idx = dvs_ts[i]
#for i in range(0, ALL)[last:idx]:
# selected_events.append([y[i], x[i], ts[i] * 0.0001, pol[i] * 0])
# if len(selected_events)>=116000:
# break
selected_events = np.load("results/190/selected_events/" + file_name + ".npy")
#selected_events = np.asarray(selected_events)
print('removing noise...')
#cleaned_events = IsolationForest(random_state=0, n_jobs=-1, contamination=0.05).fit(selected_events)
#unwanted_events = cleaned_events.predict(selected_events)
#selected_events = selected_events[np.where(unwanted_events == 1, True, False)]
print('graph construction...')
adMat = kneighbors_graph(selected_events, n_neighbors=NEIGHBORS)
max_score = -20
opt_clusters = 2
scores = []
all_clusters = []
print('predicting number of clusters...')
for CLUSTERS in range(2, 6):
clustering = SpectralClustering(n_clusters=CLUSTERS, random_state=0,
affinity='precomputed_nearest_neighbors',
n_neighbors=NEIGHBORS, assign_labels='kmeans',
n_jobs=-1).fit_predict(adMat)
all_clusters.append(clustering)
curr_score = silhouette_score(selected_events, clustering)
scores.append(curr_score)
if curr_score > max_score:
max_score = curr_score
opt_clusters = CLUSTERS
print('clustering...')
#clustering = SpectralClustering(n_clusters=opt_clusters, random_state=0,
# affinity='precomputed_nearest_neighbors',
# n_neighbors=NEIGHBORS, assign_labels='kmeans',
# n_jobs=-1).fit_predict(adMat)
#clustering_kmeans = KMeans(n_clusters=opt_clusters, random_state=0).fit_predict(selected_events)
#BW = estimate_bandwidth(selected_events)
#clustering_meanshift = MeanShift(bandwidth=BW).fit_predict(selected_events)
#clustering_dbscan = DBSCAN(eps=10, min_samples=NEIGHBORS).fit_predict(selected_events)
#clustering_aggc = AgglomerativeClustering(n_clusters=opt_clusters, linkage='ward', connectivity=adMat).fit_predict(selected_events)
#clustering_gmm = GaussianMixture(n_components=opt_clusters, random_state=0).fit_predict(selected_events)
print('saving results...')
#np.save(os.path.join('results/190/predict_k',
# file_name + '.npy'),
# np.asarray(scores))
#np.save(os.path.join('results/190/selected_events',
# file_name + '.npy'),
# selected_events)
#np.save(os.path.join('results/190/clusters/spectral',
# file_name + '.npy'),
# clustering)
np.save(os.path.join('results/190/for/clusters',
file_name+'.npy'),
all_clusters)
#np.save(os.path.join('results/190/clusters/kmeans',
# file_name + '.npy'),
# clustering_kmeans)
#np.save(os.path.join('results/190/clusters/meanshift',
# file_name + '.npy'),
# clustering_meanshift)
#np.save(os.path.join('results/190/clusters/dbscan',
# file_name + '.npy'),
# clustering_dbscan)
#np.save(os.path.join('results/190/clusters/aggc',
# file_name + '.npy'),
# clustering_aggc)
#np.save(os.path.join('results/190/clusters/gmm',
# file_name + '.npy'),
# clustering_gmm)
print('done') | [
"numpy.load",
"h5py.File",
"warnings.filterwarnings",
"sklearn.cluster.SpectralClustering",
"sklearn.metrics.silhouette_score",
"sklearn.neighbors.kneighbors_graph",
"os.path.join"
] | [((556, 624), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""', '""".*Graph is not fully connected*"""'], {}), "('ignore', '.*Graph is not fully connected*')\n", (579, 624), False, 'import warnings\n'), ((731, 756), 'h5py.File', 'h5py.File', (['file_name', '"""r"""'], {}), "(file_name, 'r')\n", (740, 756), False, 'import h5py\n'), ((878, 905), 'numpy.load', 'np.load', (['"""hands_img_ts.npy"""'], {}), "('hands_img_ts.npy')\n", (885, 905), True, 'import numpy as np\n'), ((916, 943), 'numpy.load', 'np.load', (['"""hands_all_ts.npy"""'], {}), "('hands_all_ts.npy')\n", (923, 943), True, 'import numpy as np\n'), ((1485, 1545), 'numpy.load', 'np.load', (["('results/190/selected_events/' + file_name + '.npy')"], {}), "('results/190/selected_events/' + file_name + '.npy')\n", (1492, 1545), True, 'import numpy as np\n'), ((1939, 1995), 'sklearn.neighbors.kneighbors_graph', 'kneighbors_graph', (['selected_events'], {'n_neighbors': 'NEIGHBORS'}), '(selected_events, n_neighbors=NEIGHBORS)\n', (1955, 1995), False, 'from sklearn.neighbors import kneighbors_graph\n'), ((2545, 2590), 'sklearn.metrics.silhouette_score', 'silhouette_score', (['selected_events', 'clustering'], {}), '(selected_events, clustering)\n', (2561, 2590), False, 'from sklearn.metrics import silhouette_score\n'), ((4115, 4175), 'os.path.join', 'os.path.join', (['"""results/190/for/clusters"""', "(file_name + '.npy')"], {}), "('results/190/for/clusters', file_name + '.npy')\n", (4127, 4175), False, 'import os\n'), ((2184, 2349), 'sklearn.cluster.SpectralClustering', 'SpectralClustering', ([], {'n_clusters': 'CLUSTERS', 'random_state': '(0)', 'affinity': '"""precomputed_nearest_neighbors"""', 'n_neighbors': 'NEIGHBORS', 'assign_labels': '"""kmeans"""', 'n_jobs': '(-1)'}), "(n_clusters=CLUSTERS, random_state=0, affinity=\n 'precomputed_nearest_neighbors', n_neighbors=NEIGHBORS, assign_labels=\n 'kmeans', n_jobs=-1)\n", (2202, 2349), False, 'from sklearn.cluster import SpectralClustering\n')] |
import os
from glob import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.ndimage import label
from IPython.display import display
_LABEL_MAP = {'normal': 0, 'aggressive_long_accel': 1,
'aggressive_turn': 2, 'aggressive_bump': 3}
_COL_NAMES = ['timestamp', 'accel_x', 'accel_y',
'accel_z', 'gyro_roll', 'gyro_pitch',
'gyro_yaw', 'label']
def load_data(path, col_names, events_names):
"""
Loads the data from a set of CSV files.
Parameters
----------
path: string
Path to the directory with the CSV files.
col_names : list
List with the column names of the columns that should be loaded.
events_names: list
The names of the events that should be loaded.
Returns
-------
Dict
Dictionary with the dataframe for each event.
"""
files = sorted(glob(os.path.join(path, '*.csv')))
data = {signal_name: None for signal_name in events_names}
normal_df = None
for f in files:
df = pd.read_csv(f)[col_names]
if 'normal' in f and 'normal' in events_names:
if data['normal'] is not None:
data['normal'] = data['normal'].append(df)
else:
data['normal'] = df
if 'aggressive_longitudinal_acceleration' in f\
and 'aggressive_long_accel' in events_names:
idx = np.nonzero(df['label'].values)[0]
min_i, max_i = int(idx.min()), int(idx.max())
if data['aggressive_long_accel'] is not None:
data['aggressive_long_accel'] = \
data['aggressive_long_accel'].append(df[min_i: max_i])
else:
data['aggressive_long_accel'] = df[min_i: max_i]
if 'aggressive_turn' in f and 'aggressive_turn' in events_names:
idx = np.nonzero(df['label'].values)[0]
min_i, max_i = int(idx.min()), int(idx.max())
if data['aggressive_turn'] is not None:
data['aggressive_turn'] = \
data['aggressive_turn'].append(df[min_i: max_i])
else:
data['aggressive_turn'] = df[min_i: max_i]
if 'aggressive_bump' in f and 'aggressive_bump' in events_names:
idx = np.nonzero(df['label'].values)[0]
min_i, max_i = int(idx.min()), int(idx.max())
if data['aggressive_bump'] is not None:
data['aggressive_bump'] = \
data['aggressive_bump'].append(df[min_i: max_i])
else:
data['aggressive_bump'] = df[min_i: max_i]
print('Data loaded with the following shapes:')
for key, values in data.items():
print('\t{}: {}.'.format(key, values.shape))
return data
def split_train_val_test(data, percs=(0.7, 0.15, 0.15),
n_points_discard=20):
"""
Splits the data into training, validation, and test.
Parameters
----------
data: Dict
Dictionary with the data of each event.
percs : tuple
The percentage of the data to consider for each set.
n_points_discard: int
Number of points to discard between each set to avoid overlap
and data leak.
Returns
-------
Dict
Dictionary with training dataframes for each event.
Dict
Dictionary with validation dataframes for each event.
Dict
Dictionary with test dataframes for each event.
"""
assert np.sum(percs) == 1
assert len(percs) == 3
train, val, test = {}, {}, {}
for key, values in data.items():
n_samples = values.shape[0]
lower_lim = 0
top_lim = n_samples * percs[0]
train[key] = values[lower_lim: int(top_lim - n_points_discard)]
lower_lim = top_lim + n_points_discard
top_lim = top_lim + n_samples * percs[1]
val[key] = values[int(lower_lim): int(top_lim - n_points_discard)]
lower_lim = top_lim + n_points_discard
top_lim = top_lim + n_samples * percs[2]
test[key] = values[int(lower_lim): int(top_lim - n_points_discard)]
print('\nNumber of points in {}:'.format(key))
print('Train: {}.'.format(train[key].shape[0]))
print('Validation: {}.'.format(val[key].shape[0]))
print('Test: {}.'.format(test[key].shape[0]))
if key != 'normal':
print('Number of aggressive events:')
_, n_labels = label(train[key]['label'].values)
print('\tTrain: {}.'.format(n_labels))
_, n_labels = label(val[key]['label'].values)
print('\tValidation: {}.'.format(n_labels))
_, n_labels = label(test[key]['label'].values)
print('\tTest: {}.'.format(n_labels))
return train, val, test
def join_data(dataframes_dict, signal_columns, label_column):
"""
Joins the data of all events into a single dataframe and array of labels.
Parameters
----------
dataframes_dict: Dict
Dictionary with the dataframes of the data each event in a given set.
signal_columns : list
List of strings of the columns names to keep.
label_column: string
The name of the column with the labels.
Returns
-------
pandas dataframe
Dataframe with the signals of all types of events appended.
numpy array
The array with the labels of each row of the data. The label number
is defined by _LABEL_MAP
"""
for i, (key, values) in enumerate(dataframes_dict.items()):
if i == 0:
join_X = values[signal_columns]
join_y = np.zeros(values.shape[0]) \
+ _LABEL_MAP[key] * values[label_column]
else:
join_X = join_X.append(values[signal_columns])
join_y = np.append(join_y,
np.zeros(values.shape[0]) + \
_LABEL_MAP[key] * values[label_column],
axis=0)
return join_X, join_y
def events_size(manual_labels, bins=60):
"""
Plots the histogram of the size of the events (number of points) of the
events.
Parameters
----------
manual_labels: numpy array
The array with labels. Note that this labels should be binary.
E.g. the array with labels only for harsh acceleration.
bins : int
Number of bins of the histogram.
"""
labeled_array, n_labels = label(manual_labels)
sizes = np.zeros(n_labels)
for i in range(1, n_labels + 1):
sizes[i - 1] = np.count_nonzero(labeled_array == i)
plt.hist(sizes, bins=bins)
plt.ylabel('Number of events.')
plt.xlabel('Number of points of event.')
plt.show()
class DummyPreProcessing:
"""This is an example of how to define a data transform class to be
integrated into sklearn's pipeline.
In this example, it just returns the data as is, for demonstrations
purposes.
This class must have 3 methods: transform, fit, and fir transform.
"""
def transform(self, X, y=None):
"""Transforms the data. This method only makes sense after calling
the fit method.
Parameters
----------
X: numpy array
The array with features. Rows are the samples and columns the
features..
y : numpy
The array of labels. Most of the time it is not necessary at
pre-processing time.
Returns
-------
numpy array
Array of shape [1, n_features] with the computed features.
"""
return X
def fit(self, X, y=None):
"""Estimates the parameters of the pre-processing.
Parameters
----------
X: numpy array
The array with features. Rows are the samples and columns the
features.
y : numpy
The array of labels. Most of the time it is not necessary at
pre-processing time.
"""
pass
def fit_transform(self, X, y=None):
"""Estimates the parameters of the pre-processing and applies it to
the data. This may be handy at training time.
Parameters
----------
X: numpy array
The array with features. Rows are the samples and columns the
features..
y : numpy
The array of labels. Most of the time it is not necessary at
pre-processing time.
Returns
-------
numpy array
Array of shape [1, n_features] with the computed features.
"""
self.fit(X=X, y=y)
return self.transform(X=X, y=y)
def general_report(results):
"""
Prints the metrics for the given datasets and classifiers.
Parameters
----------
results: Dictionary
Dict of dicts with the metrics. Keys are the names of the datasets.
Then, for each dataset there is another dictionary where keys are the
classifiers and this dict has the metrics.
"""
for set_key, set_values in results.items():
print('\n{}'.format(set_key))
df = pd.DataFrame.from_dict(results[set_key]).T
display(df)
| [
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.hist",
"numpy.count_nonzero",
"pandas.read_csv",
"pandas.DataFrame.from_dict",
"numpy.zeros",
"IPython.display.display",
"numpy.nonzero",
"scipy.ndimage.label",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join"
] | [((6649, 6669), 'scipy.ndimage.label', 'label', (['manual_labels'], {}), '(manual_labels)\n', (6654, 6669), False, 'from scipy.ndimage import label\n'), ((6682, 6700), 'numpy.zeros', 'np.zeros', (['n_labels'], {}), '(n_labels)\n', (6690, 6700), True, 'import numpy as np\n'), ((6816, 6842), 'matplotlib.pyplot.hist', 'plt.hist', (['sizes'], {'bins': 'bins'}), '(sizes, bins=bins)\n', (6824, 6842), True, 'import matplotlib.pyplot as plt\n'), ((6847, 6878), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of events."""'], {}), "('Number of events.')\n", (6857, 6878), True, 'import matplotlib.pyplot as plt\n'), ((6883, 6923), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of points of event."""'], {}), "('Number of points of event.')\n", (6893, 6923), True, 'import matplotlib.pyplot as plt\n'), ((6928, 6938), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6936, 6938), True, 'import matplotlib.pyplot as plt\n'), ((3613, 3626), 'numpy.sum', 'np.sum', (['percs'], {}), '(percs)\n', (3619, 3626), True, 'import numpy as np\n'), ((6766, 6802), 'numpy.count_nonzero', 'np.count_nonzero', (['(labeled_array == i)'], {}), '(labeled_array == i)\n', (6782, 6802), True, 'import numpy as np\n'), ((9419, 9430), 'IPython.display.display', 'display', (['df'], {}), '(df)\n', (9426, 9430), False, 'from IPython.display import display\n'), ((921, 948), 'os.path.join', 'os.path.join', (['path', '"""*.csv"""'], {}), "(path, '*.csv')\n", (933, 948), False, 'import os\n'), ((1073, 1087), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (1084, 1087), True, 'import pandas as pd\n'), ((4620, 4653), 'scipy.ndimage.label', 'label', (["train[key]['label'].values"], {}), "(train[key]['label'].values)\n", (4625, 4653), False, 'from scipy.ndimage import label\n'), ((4731, 4762), 'scipy.ndimage.label', 'label', (["val[key]['label'].values"], {}), "(val[key]['label'].values)\n", (4736, 4762), False, 'from scipy.ndimage import label\n'), ((4845, 4877), 'scipy.ndimage.label', 'label', (["test[key]['label'].values"], {}), "(test[key]['label'].values)\n", (4850, 4877), False, 'from scipy.ndimage import label\n'), ((9368, 9408), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['results[set_key]'], {}), '(results[set_key])\n', (9390, 9408), True, 'import pandas as pd\n'), ((1471, 1501), 'numpy.nonzero', 'np.nonzero', (["df['label'].values"], {}), "(df['label'].values)\n", (1481, 1501), True, 'import numpy as np\n'), ((1937, 1967), 'numpy.nonzero', 'np.nonzero', (["df['label'].values"], {}), "(df['label'].values)\n", (1947, 1967), True, 'import numpy as np\n'), ((2379, 2409), 'numpy.nonzero', 'np.nonzero', (["df['label'].values"], {}), "(df['label'].values)\n", (2389, 2409), True, 'import numpy as np\n'), ((5802, 5827), 'numpy.zeros', 'np.zeros', (['values.shape[0]'], {}), '(values.shape[0])\n', (5810, 5827), True, 'import numpy as np\n'), ((6036, 6061), 'numpy.zeros', 'np.zeros', (['values.shape[0]'], {}), '(values.shape[0])\n', (6044, 6061), True, 'import numpy as np\n')] |
"""
This is a longer example that applies time domain beamforming towards a source
of interest in the presence of a strong interfering source.
"""
from __future__ import division, print_function
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
import pyroomacoustics as pra
from pyroomacoustics.transform import stft
# Spectrogram figure properties
figsize = (15, 7) # figure size
fft_size = 512 # fft size for analysis
fft_hop = 8 # hop between analysis frame
fft_zp = 512 # zero padding
analysis_window = pra.hann(fft_size)
t_cut = 0.83 # length in [s] to remove at end of signal (no sound)
# Some simulation parameters
Fs = 8000
absorption = 0.1
max_order_sim = 2
sigma2_n = 5e-7
# Microphone array design parameters
mic1 = np.array([2, 1.5]) # position
M = 8 # number of microphones
d = 0.08 # distance between microphones
phi = 0.0 # angle from horizontal
max_order_design = 1 # maximum image generation used in design
shape = "Linear" # array shape
Lg_t = 0.100 # Filter size in seconds
Lg = np.ceil(Lg_t * Fs) # Filter size in samples
delay = 0.050 # Beamformer delay in seconds
# Define the FFT length
N = 1024
# Create a microphone array
if shape is "Circular":
R = pra.circular_2D_array(mic1, M, phi, d * M / (2 * np.pi))
else:
R = pra.linear_2D_array(mic1, M, phi, d)
# path to samples
path = os.path.dirname(__file__)
# The first signal (of interest) is singing
rate1, signal1 = wavfile.read(path + "/input_samples/singing_" + str(Fs) + ".wav")
signal1 = np.array(signal1, dtype=float)
signal1 = pra.normalize(signal1)
signal1 = pra.highpass(signal1, Fs)
delay1 = 0.0
# The second signal (interferer) is some german speech
rate2, signal2 = wavfile.read(path + "/input_samples/german_speech_" + str(Fs) + ".wav")
signal2 = np.array(signal2, dtype=float)
signal2 = pra.normalize(signal2)
signal2 = pra.highpass(signal2, Fs)
delay2 = 1.0
# Create the room
room_dim = [4, 6]
room1 = pra.ShoeBox(
room_dim,
absorption=absorption,
fs=Fs,
max_order=max_order_sim,
sigma2_awgn=sigma2_n,
)
# Add sources to room
good_source = np.array([1, 4.5]) # good source
normal_interferer = np.array([2.8, 4.3]) # interferer
room1.add_source(good_source, signal=signal1, delay=delay1)
room1.add_source(normal_interferer, signal=signal2, delay=delay2)
"""
MVDR direct path only simulation
"""
# compute beamforming filters
mics = pra.Beamformer(R, Fs, N=N, Lg=Lg)
room1.add_microphone_array(mics)
room1.compute_rir()
room1.simulate()
mics.rake_mvdr_filters(
room1.sources[0][0:1],
room1.sources[1][0:1],
sigma2_n * np.eye(mics.Lg * mics.M),
delay=delay,
)
# process the signal
output = mics.process()
# save to output file
input_mic = pra.normalize(pra.highpass(mics.signals[mics.M // 2], Fs))
wavfile.write(path + "/output_samples/input.wav", Fs, input_mic)
out_DirectMVDR = pra.normalize(pra.highpass(output, Fs))
wavfile.write(path + "/output_samples/output_DirectMVDR.wav", Fs, out_DirectMVDR)
"""
Rake MVDR simulation
"""
# Add the microphone array and compute RIR
mics = pra.Beamformer(R, Fs, N, Lg=Lg)
room1.mic_array = mics
room1.compute_rir()
room1.simulate()
# Design the beamforming filters using some of the images sources
good_sources = room1.sources[0][: max_order_design + 1]
bad_sources = room1.sources[1][: max_order_design + 1]
mics.rake_mvdr_filters(
good_sources, bad_sources, sigma2_n * np.eye(mics.Lg * mics.M), delay=delay
)
# process the signal
output = mics.process()
# save to output file
out_RakeMVDR = pra.normalize(pra.highpass(output, Fs))
wavfile.write(path + "/output_samples/output_RakeMVDR.wav", Fs, out_RakeMVDR)
"""
Perceptual direct path only simulation
"""
# compute beamforming filters
mics = pra.Beamformer(R, Fs, N, Lg=Lg)
room1.mic_array = mics
room1.compute_rir()
room1.simulate()
mics.rake_perceptual_filters(
room1.sources[0][0:1],
room1.sources[1][0:1],
sigma2_n * np.eye(mics.Lg * mics.M),
delay=delay,
)
# process the signal
output = mics.process()
# save to output file
out_DirectPerceptual = pra.normalize(pra.highpass(output, Fs))
wavfile.write(
path + "/output_samples/output_DirectPerceptual.wav", Fs, out_DirectPerceptual
)
"""
Rake Perceptual simulation
"""
# compute beamforming filters
mics = pra.Beamformer(R, Fs, N, Lg=Lg)
room1.mic_array = mics
room1.compute_rir()
room1.simulate()
mics.rake_perceptual_filters(
good_sources, bad_sources, sigma2_n * np.eye(mics.Lg * mics.M), delay=delay
)
# process the signal
output = mics.process()
# save to output file
out_RakePerceptual = pra.normalize(pra.highpass(output, Fs))
wavfile.write(
path + "/output_samples/output_RakePerceptual.wav", Fs, out_RakePerceptual
)
"""
Plot all the spectrogram
"""
dSNR = pra.dB(room1.direct_snr(mics.center[:, 0], source=0), power=True)
print("The direct SNR for good source is " + str(dSNR))
# remove a bit of signal at the end
n_lim = int(np.ceil(len(input_mic) - t_cut * Fs))
input_clean = signal1[:n_lim]
input_mic = input_mic[:n_lim]
out_DirectMVDR = out_DirectMVDR[:n_lim]
out_RakeMVDR = out_RakeMVDR[:n_lim]
out_DirectPerceptual = out_DirectPerceptual[:n_lim]
out_RakePerceptual = out_RakePerceptual[:n_lim]
# compute time-frequency planes
F0 = stft.analysis(input_clean, fft_size, fft_hop, win=analysis_window, zp_back=fft_zp)
F1 = stft.analysis(input_mic, fft_size, fft_hop, win=analysis_window, zp_back=fft_zp)
F2 = stft.analysis(
out_DirectMVDR, fft_size, fft_hop, win=analysis_window, zp_back=fft_zp
)
F3 = stft.analysis(out_RakeMVDR, fft_size, fft_hop, win=analysis_window, zp_back=fft_zp)
F4 = stft.analysis(
out_DirectPerceptual, fft_size, fft_hop, win=analysis_window, zp_back=fft_zp
)
F5 = stft.analysis(
out_RakePerceptual, fft_size, fft_hop, win=analysis_window, zp_back=fft_zp
)
# (not so) fancy way to set the scale to avoid having the spectrum
# dominated by a few outliers
p_min = 7
p_max = 100
all_vals = np.concatenate(
(
pra.dB(F1 + pra.eps),
pra.dB(F2 + pra.eps),
pra.dB(F3 + pra.eps),
pra.dB(F0 + pra.eps),
pra.dB(F4 + pra.eps),
pra.dB(F5 + pra.eps),
)
).flatten()
vmin, vmax = np.percentile(all_vals, [p_min, p_max])
cmap = "afmhot"
interpolation = "none"
fig, ax = plt.subplots(figsize=figsize, nrows=2, ncols=3)
def plot_spectrogram(F, title):
plt.imshow(
pra.dB(F.T),
extent=[0, 1, 0, Fs / 2],
vmin=vmin,
vmax=vmax,
origin="lower",
cmap=plt.get_cmap(cmap),
interpolation=interpolation,
)
ax.set_title(title)
ax.set_ylabel("")
ax.set_xlabel("")
ax.set_aspect("auto")
ax.axis("off")
ax = plt.subplot(2, 3, 1)
plot_spectrogram(F0, "Desired Signal")
ax = plt.subplot(2, 3, 4)
plot_spectrogram(F1, "Microphone Input")
ax = plt.subplot(2, 3, 2)
plot_spectrogram(F2, "Direct MVDR")
ax = plt.subplot(2, 3, 5)
plot_spectrogram(F3, "Rake MVDR")
ax = plt.subplot(2, 3, 3)
plot_spectrogram(F4, "Direct Perceptual")
ax = plt.subplot(2, 3, 6)
plot_spectrogram(F5, "Rake Perceptual")
fig.savefig(path + "/figures/spectrograms.png", dpi=150)
plt.show()
| [
"pyroomacoustics.linear_2D_array",
"pyroomacoustics.transform.stft.analysis",
"pyroomacoustics.circular_2D_array",
"pyroomacoustics.Beamformer",
"os.path.dirname",
"scipy.io.wavfile.write",
"pyroomacoustics.hann",
"matplotlib.pyplot.subplots",
"pyroomacoustics.highpass",
"pyroomacoustics.ShoeBox",... | [((556, 574), 'pyroomacoustics.hann', 'pra.hann', (['fft_size'], {}), '(fft_size)\n', (564, 574), True, 'import pyroomacoustics as pra\n'), ((779, 797), 'numpy.array', 'np.array', (['[2, 1.5]'], {}), '([2, 1.5])\n', (787, 797), True, 'import numpy as np\n'), ((1057, 1075), 'numpy.ceil', 'np.ceil', (['(Lg_t * Fs)'], {}), '(Lg_t * Fs)\n', (1064, 1075), True, 'import numpy as np\n'), ((1376, 1401), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1391, 1401), False, 'import os\n'), ((1540, 1570), 'numpy.array', 'np.array', (['signal1'], {'dtype': 'float'}), '(signal1, dtype=float)\n', (1548, 1570), True, 'import numpy as np\n'), ((1581, 1603), 'pyroomacoustics.normalize', 'pra.normalize', (['signal1'], {}), '(signal1)\n', (1594, 1603), True, 'import pyroomacoustics as pra\n'), ((1614, 1639), 'pyroomacoustics.highpass', 'pra.highpass', (['signal1', 'Fs'], {}), '(signal1, Fs)\n', (1626, 1639), True, 'import pyroomacoustics as pra\n'), ((1808, 1838), 'numpy.array', 'np.array', (['signal2'], {'dtype': 'float'}), '(signal2, dtype=float)\n', (1816, 1838), True, 'import numpy as np\n'), ((1849, 1871), 'pyroomacoustics.normalize', 'pra.normalize', (['signal2'], {}), '(signal2)\n', (1862, 1871), True, 'import pyroomacoustics as pra\n'), ((1882, 1907), 'pyroomacoustics.highpass', 'pra.highpass', (['signal2', 'Fs'], {}), '(signal2, Fs)\n', (1894, 1907), True, 'import pyroomacoustics as pra\n'), ((1966, 2068), 'pyroomacoustics.ShoeBox', 'pra.ShoeBox', (['room_dim'], {'absorption': 'absorption', 'fs': 'Fs', 'max_order': 'max_order_sim', 'sigma2_awgn': 'sigma2_n'}), '(room_dim, absorption=absorption, fs=Fs, max_order=max_order_sim,\n sigma2_awgn=sigma2_n)\n', (1977, 2068), True, 'import pyroomacoustics as pra\n'), ((2125, 2143), 'numpy.array', 'np.array', (['[1, 4.5]'], {}), '([1, 4.5])\n', (2133, 2143), True, 'import numpy as np\n'), ((2179, 2199), 'numpy.array', 'np.array', (['[2.8, 4.3]'], {}), '([2.8, 4.3])\n', (2187, 2199), True, 'import numpy as np\n'), ((2420, 2453), 'pyroomacoustics.Beamformer', 'pra.Beamformer', (['R', 'Fs'], {'N': 'N', 'Lg': 'Lg'}), '(R, Fs, N=N, Lg=Lg)\n', (2434, 2453), True, 'import pyroomacoustics as pra\n'), ((2802, 2866), 'scipy.io.wavfile.write', 'wavfile.write', (["(path + '/output_samples/input.wav')", 'Fs', 'input_mic'], {}), "(path + '/output_samples/input.wav', Fs, input_mic)\n", (2815, 2866), False, 'from scipy.io import wavfile\n'), ((2925, 3010), 'scipy.io.wavfile.write', 'wavfile.write', (["(path + '/output_samples/output_DirectMVDR.wav')", 'Fs', 'out_DirectMVDR'], {}), "(path + '/output_samples/output_DirectMVDR.wav', Fs,\n out_DirectMVDR)\n", (2938, 3010), False, 'from scipy.io import wavfile\n'), ((3089, 3120), 'pyroomacoustics.Beamformer', 'pra.Beamformer', (['R', 'Fs', 'N'], {'Lg': 'Lg'}), '(R, Fs, N, Lg=Lg)\n', (3103, 3120), True, 'import pyroomacoustics as pra\n'), ((3589, 3666), 'scipy.io.wavfile.write', 'wavfile.write', (["(path + '/output_samples/output_RakeMVDR.wav')", 'Fs', 'out_RakeMVDR'], {}), "(path + '/output_samples/output_RakeMVDR.wav', Fs, out_RakeMVDR)\n", (3602, 3666), False, 'from scipy.io import wavfile\n'), ((3753, 3784), 'pyroomacoustics.Beamformer', 'pra.Beamformer', (['R', 'Fs', 'N'], {'Lg': 'Lg'}), '(R, Fs, N, Lg=Lg)\n', (3767, 3784), True, 'import pyroomacoustics as pra\n'), ((4121, 4218), 'scipy.io.wavfile.write', 'wavfile.write', (["(path + '/output_samples/output_DirectPerceptual.wav')", 'Fs', 'out_DirectPerceptual'], {}), "(path + '/output_samples/output_DirectPerceptual.wav', Fs,\n out_DirectPerceptual)\n", (4134, 4218), False, 'from scipy.io import wavfile\n'), ((4295, 4326), 'pyroomacoustics.Beamformer', 'pra.Beamformer', (['R', 'Fs', 'N'], {'Lg': 'Lg'}), '(R, Fs, N, Lg=Lg)\n', (4309, 4326), True, 'import pyroomacoustics as pra\n'), ((4629, 4722), 'scipy.io.wavfile.write', 'wavfile.write', (["(path + '/output_samples/output_RakePerceptual.wav')", 'Fs', 'out_RakePerceptual'], {}), "(path + '/output_samples/output_RakePerceptual.wav', Fs,\n out_RakePerceptual)\n", (4642, 4722), False, 'from scipy.io import wavfile\n'), ((5251, 5338), 'pyroomacoustics.transform.stft.analysis', 'stft.analysis', (['input_clean', 'fft_size', 'fft_hop'], {'win': 'analysis_window', 'zp_back': 'fft_zp'}), '(input_clean, fft_size, fft_hop, win=analysis_window, zp_back=\n fft_zp)\n', (5264, 5338), False, 'from pyroomacoustics.transform import stft\n'), ((5339, 5424), 'pyroomacoustics.transform.stft.analysis', 'stft.analysis', (['input_mic', 'fft_size', 'fft_hop'], {'win': 'analysis_window', 'zp_back': 'fft_zp'}), '(input_mic, fft_size, fft_hop, win=analysis_window, zp_back=fft_zp\n )\n', (5352, 5424), False, 'from pyroomacoustics.transform import stft\n'), ((5425, 5514), 'pyroomacoustics.transform.stft.analysis', 'stft.analysis', (['out_DirectMVDR', 'fft_size', 'fft_hop'], {'win': 'analysis_window', 'zp_back': 'fft_zp'}), '(out_DirectMVDR, fft_size, fft_hop, win=analysis_window,\n zp_back=fft_zp)\n', (5438, 5514), False, 'from pyroomacoustics.transform import stft\n'), ((5522, 5610), 'pyroomacoustics.transform.stft.analysis', 'stft.analysis', (['out_RakeMVDR', 'fft_size', 'fft_hop'], {'win': 'analysis_window', 'zp_back': 'fft_zp'}), '(out_RakeMVDR, fft_size, fft_hop, win=analysis_window, zp_back\n =fft_zp)\n', (5535, 5610), False, 'from pyroomacoustics.transform import stft\n'), ((5611, 5706), 'pyroomacoustics.transform.stft.analysis', 'stft.analysis', (['out_DirectPerceptual', 'fft_size', 'fft_hop'], {'win': 'analysis_window', 'zp_back': 'fft_zp'}), '(out_DirectPerceptual, fft_size, fft_hop, win=analysis_window,\n zp_back=fft_zp)\n', (5624, 5706), False, 'from pyroomacoustics.transform import stft\n'), ((5714, 5807), 'pyroomacoustics.transform.stft.analysis', 'stft.analysis', (['out_RakePerceptual', 'fft_size', 'fft_hop'], {'win': 'analysis_window', 'zp_back': 'fft_zp'}), '(out_RakePerceptual, fft_size, fft_hop, win=analysis_window,\n zp_back=fft_zp)\n', (5727, 5807), False, 'from pyroomacoustics.transform import stft\n'), ((6174, 6213), 'numpy.percentile', 'np.percentile', (['all_vals', '[p_min, p_max]'], {}), '(all_vals, [p_min, p_max])\n', (6187, 6213), True, 'import numpy as np\n'), ((6265, 6312), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize', 'nrows': '(2)', 'ncols': '(3)'}), '(figsize=figsize, nrows=2, ncols=3)\n', (6277, 6312), True, 'import matplotlib.pyplot as plt\n'), ((6676, 6696), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(1)'], {}), '(2, 3, 1)\n', (6687, 6696), True, 'import matplotlib.pyplot as plt\n'), ((6742, 6762), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (6753, 6762), True, 'import matplotlib.pyplot as plt\n'), ((6810, 6830), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(2)'], {}), '(2, 3, 2)\n', (6821, 6830), True, 'import matplotlib.pyplot as plt\n'), ((6873, 6893), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(5)'], {}), '(2, 3, 5)\n', (6884, 6893), True, 'import matplotlib.pyplot as plt\n'), ((6934, 6954), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(3)'], {}), '(2, 3, 3)\n', (6945, 6954), True, 'import matplotlib.pyplot as plt\n'), ((7003, 7023), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(6)'], {}), '(2, 3, 6)\n', (7014, 7023), True, 'import matplotlib.pyplot as plt\n'), ((7123, 7133), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7131, 7133), True, 'import matplotlib.pyplot as plt\n'), ((1242, 1298), 'pyroomacoustics.circular_2D_array', 'pra.circular_2D_array', (['mic1', 'M', 'phi', '(d * M / (2 * np.pi))'], {}), '(mic1, M, phi, d * M / (2 * np.pi))\n', (1263, 1298), True, 'import pyroomacoustics as pra\n'), ((1313, 1349), 'pyroomacoustics.linear_2D_array', 'pra.linear_2D_array', (['mic1', 'M', 'phi', 'd'], {}), '(mic1, M, phi, d)\n', (1332, 1349), True, 'import pyroomacoustics as pra\n'), ((2757, 2800), 'pyroomacoustics.highpass', 'pra.highpass', (['mics.signals[mics.M // 2]', 'Fs'], {}), '(mics.signals[mics.M // 2], Fs)\n', (2769, 2800), True, 'import pyroomacoustics as pra\n'), ((2899, 2923), 'pyroomacoustics.highpass', 'pra.highpass', (['output', 'Fs'], {}), '(output, Fs)\n', (2911, 2923), True, 'import pyroomacoustics as pra\n'), ((3563, 3587), 'pyroomacoustics.highpass', 'pra.highpass', (['output', 'Fs'], {}), '(output, Fs)\n', (3575, 3587), True, 'import pyroomacoustics as pra\n'), ((4095, 4119), 'pyroomacoustics.highpass', 'pra.highpass', (['output', 'Fs'], {}), '(output, Fs)\n', (4107, 4119), True, 'import pyroomacoustics as pra\n'), ((4603, 4627), 'pyroomacoustics.highpass', 'pra.highpass', (['output', 'Fs'], {}), '(output, Fs)\n', (4615, 4627), True, 'import pyroomacoustics as pra\n'), ((2617, 2641), 'numpy.eye', 'np.eye', (['(mics.Lg * mics.M)'], {}), '(mics.Lg * mics.M)\n', (2623, 2641), True, 'import numpy as np\n'), ((3425, 3449), 'numpy.eye', 'np.eye', (['(mics.Lg * mics.M)'], {}), '(mics.Lg * mics.M)\n', (3431, 3449), True, 'import numpy as np\n'), ((3944, 3968), 'numpy.eye', 'np.eye', (['(mics.Lg * mics.M)'], {}), '(mics.Lg * mics.M)\n', (3950, 3968), True, 'import numpy as np\n'), ((4459, 4483), 'numpy.eye', 'np.eye', (['(mics.Lg * mics.M)'], {}), '(mics.Lg * mics.M)\n', (4465, 4483), True, 'import numpy as np\n'), ((6371, 6382), 'pyroomacoustics.dB', 'pra.dB', (['F.T'], {}), '(F.T)\n', (6377, 6382), True, 'import pyroomacoustics as pra\n'), ((6493, 6511), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (6505, 6511), True, 'import matplotlib.pyplot as plt\n'), ((5971, 5991), 'pyroomacoustics.dB', 'pra.dB', (['(F1 + pra.eps)'], {}), '(F1 + pra.eps)\n', (5977, 5991), True, 'import pyroomacoustics as pra\n'), ((6001, 6021), 'pyroomacoustics.dB', 'pra.dB', (['(F2 + pra.eps)'], {}), '(F2 + pra.eps)\n', (6007, 6021), True, 'import pyroomacoustics as pra\n'), ((6031, 6051), 'pyroomacoustics.dB', 'pra.dB', (['(F3 + pra.eps)'], {}), '(F3 + pra.eps)\n', (6037, 6051), True, 'import pyroomacoustics as pra\n'), ((6061, 6081), 'pyroomacoustics.dB', 'pra.dB', (['(F0 + pra.eps)'], {}), '(F0 + pra.eps)\n', (6067, 6081), True, 'import pyroomacoustics as pra\n'), ((6091, 6111), 'pyroomacoustics.dB', 'pra.dB', (['(F4 + pra.eps)'], {}), '(F4 + pra.eps)\n', (6097, 6111), True, 'import pyroomacoustics as pra\n'), ((6121, 6141), 'pyroomacoustics.dB', 'pra.dB', (['(F5 + pra.eps)'], {}), '(F5 + pra.eps)\n', (6127, 6141), True, 'import pyroomacoustics as pra\n')] |
import cv2
import numpy as np
import math as mt
import matplotlib.pyplot as plt
def RGB2HSI(rgb_img):
'''
RGB image 2 HSI image
'''
rgb_img = np.array(rgb_img, dtype="float32")
n, m = rgb_img.shape[0], rgb_img.shape[1]
hsi_img = rgb_img.copy()
B, G, R = cv2.split(rgb_img)
[B, G, R] = [i / 255.0 for i in ([B, G, R])]
H = np.zeros((n, m))
S = np.zeros((n, m), dtype="float32")
I = (R + G + B) / 3.0
for i in range(n):
x = np.sqrt((R[i] - G[i]) ** 2 + (R[i] - B[i]) * (G[i] - B[i]))
theta = np.arccos(0.5 * (R[i] - B[i] + R[i] - G[i]) / x)
h = np.zeros(m)
h[B[i] <= G[i]] = theta[B[i] <= G[i]]
h[G[i] < B[i]] = 2 * mt.pi-theta[G[i] < B[i]]
h[x == 0] = 0
H[i] = h / (2 * mt.pi)
for i in range(n):
Min = []
for j in range(m):
arr = [B[i][j], G[i][j], R[i][j]]
Min.append(np.min(arr))
Min = np.array(Min)
S[i] = 1 - Min * 3 / (R[i]+B[i]+G[i])
S[i][R[i] + B[i] + G[i] == 0] = 0
hsi_img[:, :, 0] = H*255
hsi_img[:, :, 1] = S*255
hsi_img[:, :, 2] = I*255
return hsi_img
def HSI2RGB(hsi_img):
'''
HSI image 2 RGB image
'''
n, m = hsi_img.shape[0], hsi_img.shape[1]
rgb_img = hsi_img.copy()
H, S, I = cv2.split(hsi_img)
[H, S, I] = [i / 255.0 for i in ([H,S,I])]
R, G, B = H, S, I
for i in range(n):
h = H[i] * 2 * mt.pi
a1 = h >= 0
a2 = h < 2 * mt.pi / 3
a = a1 & a2
tmp = np.cos(mt.pi / 3 - h)
b = I[i] * (1 - S[i])
r = I[i] * (1 + S[i] * np.cos(h) / tmp)
g = 3 * I[i] - r - b
B[i][a] = b[a]
R[i][a] = r[a]
G[i][a] = g[a]
a1 = h >= 2 * mt.pi / 3
a2 = h < 4 * mt.pi / 3
a = a1 & a2
tmp = np.cos(mt.pi - h)
r = I[i] * (1 - S[i])
g = I[i] * (1 + S[i] * np.cos(h - 2 * mt.pi / 3) / tmp)
b = 3 * I[i] - r - g
R[i][a] = r[a]
G[i][a] = g[a]
B[i][a] = b[a]
a1 = h >= 4 * mt.pi / 3
a2 = h < 2 * mt.pi
a = a1 & a2
tmp = np.cos(5 * mt.pi / 3 - h)
g = I[i] * (1 - S[i])
b = I[i] * (1 + S[i] * np.cos(h - 4 * mt.pi / 3) / tmp)
r = 3 * I[i] - g - b
B[i][a] = b[a]
G[i][a] = g[a]
R[i][a] = r[a]
rgb_img[:, :,0] = B*255
rgb_img[:, :,1] = G*255
rgb_img[:, :,2] = R*255
return rgb_img
def visual(x, name=""):
'''
Visual distribution of color hist
'''
print("Min : {} Max : {}".format(x.min(), x.max()))
y = x.reshape(-1)
plt.hist(y, bins=255)
plt.xlim(0, 260)
plt.savefig("{}_hist.png".format(name))
plt.close()
def calcu_mapping(x):
'''
Calculate equalize histogram mapping with color distribution x.
'''
tot = sum(x)
pix = []
for i in range(256):
if x[i] != 0:
pix.append([i, x[i]])
mapping = dict()
if len(pix) == 1:
mapping[pix[0][0]] = pix[0][0]
return mapping
# Optimize too much black or white
while True:
flag = 0
for i in range(len(pix)):
if pix[i][1] / tot > 0.1:
pix[i][1] = tot * 0.1
x[pix[i][0]] = pix[i][1]
flag = 1
elif pix[i][1] / tot > 0.01 and (pix[i][0] == 0 or pix[i][0] == 255):
pix[i][1] = tot * 0.01
x[pix[i][0]] = pix[i][1]
flag = 1
tot = sum(x)
if flag == 0:
break
current = 0
N = len(pix)
for i in range(N):
ratio = min(i, 1)
l = int(255.0 * current / tot)
current += pix[i][1]
r = int(255.0 * current / tot)
p = int(l + (r-l) * ratio)
mapping[pix[i][0]] = p
return mapping
def myequalizeHist(img):
'''
Histogram Equalize
'''
img = np.uint8(img)
e_img = img.copy()
pix = []
count = [0] * 256
for i in range(len(e_img)):
for j in range(len(e_img[0])):
pix.append([img[i][j], i, j])
count[img[i][j]] += 1
mapping = calcu_mapping(count)
pix.sort()
for i in range(len(pix)):
e_img[pix[i][1]][pix[i][2]] = mapping[pix[i][0]]
return e_img
def MyEqualize(img, mode):
'''
Deal with GRAY and RGB, HSI separately
'''
if mode == "GRAY":
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
result = myequalizeHist(img)
visual(img, "raw")
visual(result, "my")
elif mode == "RGB":
(b, g, r) = cv2.split(img)
bH = myequalizeHist(b)
gH = myequalizeHist(g)
rH = myequalizeHist(r)
visual(r, "raw")
visual(rH, "my")
result = cv2.merge((bH, gH, rH))
elif mode == "HSI":
hsi_img = RGB2HSI(img)
(h, s, i) = hsi_img[:, :, 0], hsi_img[:, :, 1], hsi_img[:, :, 2]
hH = h
sH = s
iH = myequalizeHist(i)
visual(i, "raw")
visual(iH, "my")
hsi_img = np.stack((hH, sH, iH), axis=2)
result = HSI2RGB(hsi_img)
return result
def laplacian_filter(img, kernel):
'''
Apply laplacian filter to image with kernel.
'''
laplacian = np.array(img, dtype="int")
n, m = laplacian.shape
for i in range(n):
for j in range(m):
v = 0
for l in range(3):
for r in range(3):
v += kernel[l][r] * int(img[max(0, min(n - 1, i + l - 1))][max(0, min(m - 1, j + r - 1))])
laplacian[i][j] = v
return laplacian
def MyLaplacian(img, mode):
'''
Deal with GRAY and RGB separately
'''
if "1" in mode:
kernel = [[0, 1, 0], [1, -4, 1], [0, 1, 0]]
else:
kernel = [[1, 1, 1], [1, -8, 1], [1, 1, 1]]
if len(img.shape) == 2:
laplacian = laplacian_filter(img, kernel)
norm = np.uint8(255 * (laplacian - laplacian.min()) / (laplacian.max() - laplacian.min()))
else:
(b, g, r) = cv2.split(img)
b_l, g_l, r_l = laplacian_filter(b, kernel), laplacian_filter(g, kernel), laplacian_filter(r, kernel)
laplacian = cv2.merge((b_l, g_l, r_l))
norm = laplacian.copy()
for i in range(3):
sub = norm[:, :, i]
norm[:, :, i] = np.uint8(255 * (sub - sub.min()) / (sub.max() - sub.min()))
return np.clip(np.array(img, dtype="int")-laplacian, 0, 255), laplacian, norm
def FFT(img):
fft = np.fft.fft2(img)
ifft = np.fft.ifft2(fft)
return 20 * np.log(np.abs(np.fft.fftshift(fft))), np.abs(ifft)
def Matching(img):
width, height = img.shape[0], img.shape[1]
w = width // 5
h = height // 5
p = (np.random.randint(width-w), np.random.randint(height-h))
template = img[p[0]:p[0]+w, p[1]:p[1]+h]
template_img = img.copy()
cv2.rectangle(template_img, p, (p[0]+w, p[1]+h), 255, 2)
res = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED)
single_img = img.copy()
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(single_img, top_left, bottom_right, 255, 2)
multi_img = img.copy()
threshold = 0.8
loc = np.where(res >= threshold)
for pt in zip(*loc[::-1]):
bottom_right = (pt[0] + w, pt[1] + h)
cv2.rectangle(multi_img, pt, bottom_right, 255, 2)
return template_img, single_img, multi_img
def IdealHighPassFiltering(f_shift, D0):
m = f_shift.shape[0]
n = f_shift.shape[1]
h1 = np.zeros((m, n))
x0 = np.floor(m/2)
y0 = np.floor(n/2)
for i in range(m):
for j in range(n):
D = np.sqrt((i - x0)**2 + (j - y0)**2)
if D >= D0:
h1[i][j] = 1
result = np.multiply(f_shift, h1)
return result
def GaussLowPassFiltering(f_shift, D0):
m = f_shift.shape[0]
n = f_shift.shape[1]
h1 = np.zeros((m, n))
x0 = np.floor(m/2)
y0 = np.floor(n/2)
for i in range(m):
for j in range(n):
D = np.sqrt((i - x0)**2 + (j - y0)**2)
h1[i][j] = np.exp((-1)*D**2/2/(D0**2))
result = np.multiply(f_shift, h1)
return result
def GFLP(img, r):
f = np.fft.fft2(img)
f_shift = np.fft.fftshift(f)
IHPF = np.fft.ifftshift(IdealHighPassFiltering(f_shift, r))
GLPF = np.fft.ifftshift(GaussLowPassFiltering(f_shift, r))
return np.abs(np.fft.ifft2(IHPF)), np.abs(np.fft.ifft2(GLPF))
def DCT(img):
if len(img.shape) == 2:
img = np.float32(img)
dct = cv2.dct(img)
idct = cv2.idct(dct)
else:
img = np.float32(img)
dct = img.copy()
idct = img.copy()
for i in range(3):
dct[:, :, i] = cv2.dct(img[:, :, i])
idct[:, :, i] = cv2.idct(dct[:, :, i])
return dct, idct
def Edge(img):
sobel_x = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=3)
sobel_y = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=3)
dst = cv2.Canny(img, 50, 200, None, 3)
lines = cv2.HoughLines(dst, 1, np.pi / 180, 150, None, 0, 0)
hough = dst.copy()
if lines is not None:
for i in range(0, len(lines)):
rho = lines[i][0][0]
theta = lines[i][0][1]
a = math.cos(theta)
b = math.sin(theta)
x0 = a * rho
y0 = b * rho
pt1 = (int(x0 + 1000 * (-b)), int(y0 + 1000 * (a)))
pt2 = (int(x0 - 1000 * (-b)), int(y0 - 1000 * (a)))
cv.line(dst, pt1, pt2, 255, 3, cv.LINE_AA)
return sobel_x, sobel_y, hough
def morphology(img, mode):
kernel = np.ones((5, 5), np.uint8)
if mode == "erode":
return cv2.erode(img, kernel)
elif mode == "dilation":
return cv2.dilate(img, kernel)
elif mode == "opening":
return cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
elif mode == "closing":
return cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
else:
return cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)
def add_noise(img, mode):
'''
Add gaussian noise to image
'''
if mode == "gaussian":
noise_size = [100, 400, 800, 1600]
var = np.random.randint(len(noise_size))
var = noise_size[var]
print("noise var {}".format(var))
if len(img.shape) == 2:
noise = np.random.normal(0, var ** 0.5, img.shape)
img = np.clip(img + noise, 0, 255)
else:
for i in range(3):
noise = np.random.normal(0, var ** 0.5, img.shape[:2])
img[:, :, i] = np.clip(img[:, :, i] + noise, 0, 255)
return np.uint8(img), var
def myarithemeticblur(img, kernel_size):
'''
Arithemetic image blurring
'''
K = int(kernel_size // 2)
blur_img = np.zeros(img.shape)
n, m = img.shape
for i in range(n):
for j in range(m):
blur_img[i][j] = np.mean(img[max(i-K, 0): min(i+K+1, n), max(j-K, 0) : min(j+K+1, m)])
return np.uint8(blur_img)
def geomean(xs):
'''
Geometric mean operator
'''
xs = np.clip(xs, 0.1, 255)
return mt.exp(mt.fsum(mt.log(x) for x in xs) / len(xs))
def mygeometricblur(img, kernel_size):
'''
Geometric image blurring
'''
K = int(kernel_size // 2)
blur_img = np.zeros(img.shape, dtype="float32")
n, m = img.shape
for i in range(n):
for j in range(m):
blur_img[i][j] = geomean(img[max(i - K, 0): min(i + K + 1, n), max(j - K, 0): min(j + K + 1, m)].reshape(-1))
return np.uint8(blur_img)
def myadaptiveblur(img, kernel_size, noise_var=1000):
'''
Adaptive image blurring
'''
if noise_var == 0:
return img
K = int(kernel_size // 2)
blur_img = np.array(img, dtype="float32")
n, m = img.shape
for i in range(n):
for j in range(m):
if i < K or i-K+kernel_size > n-1 or j < K or j-K+kernel_size > m-1:
continue
else:
local_space = img[i-K:i-K+kernel_size, j-K:j-K+kernel_size].reshape(-1)
local_mean = np.mean(local_space)
local_var = np.var(local_space)
# Deal in special case (local var too small)
if local_var < noise_var:
blur_img[i][j] = img[i][j]-local_var / noise_var * (img[i][j]-local_mean)
else:
blur_img[i][j] = img[i][j] - noise_var / local_var * (img[i][j] - local_mean)
return np.uint8(blur_img)
def Recover(img, mode, noise_var=1000):
'''
Image recover
'''
if mode == 'Arithmetic':
return myarithemeticblur(img, 7)
elif mode == "Geometric":
return mygeometricblur(img, 7)
else:
return myadaptiveblur(img, 7, noise_var)
def MyRecover(img, mode, noise_var=1000):
'''
GRAY and RGB seperately
'''
if len(img.shape) == 2:
return Recover(img, mode, noise_var)
else:
blur_img = img.copy()
for i in range(3):
blur_img[:, :, i] = Recover(img[:, :, i], mode, noise_var)
return blur_img
def OpencvEqualize(img, mode):
'''
Utilized for double check during experiment.
Not used in final demo.
'''
if mode == "GRAY":
result = cv2.equalizeHist(img)
visual(result, "cvGRAY")
elif mode == "RGB":
(b, g, r) = cv2.split(img)
bH = cv2.equalizeHist(b)
gH = cv2.equalizeHist(g)
rH = cv2.equalizeHist(r)
result = cv2.merge((bH, gH, rH))
elif mode == "HSI":
hsi_img = RGB2HSI(img)
(h, s, i) = hsi_img[:, :, 0], hsi_img[:, :, 1], hsi_img[:, :, 2]
hH = h
sH = s
iH = cv2.equalizeHist(np.uint8(i))
hsi_img = np.stack((hH, sH, iH), axis=2)
result = HSI2RGB(hsi_img)
return result
def OpencvLaplacian(img):
'''
Utilized for double check during experiment.
Not used in final demo.
'''
laplacian = cv2.Laplacian(img, cv2.CV_64F)
return img-laplacian, laplacian
def OpencvRecover(img, mode):
'''
Utilized for double check during experiment.
Not used in final demo.
'''
if mode == 'Arithmetic':
return cv2.blur(img, (7, 7))
elif mode == 'Geometric':
return cv2.blur(img, (7, 7))
else:
return cv2.blur(img, (7, 7))
if __name__ == '__main__':
pass | [
"numpy.abs",
"numpy.floor",
"numpy.ones",
"numpy.clip",
"numpy.random.randint",
"numpy.mean",
"numpy.exp",
"numpy.random.normal",
"cv2.rectangle",
"cv2.minMaxLoc",
"cv2.erode",
"numpy.fft.ifft2",
"cv2.matchTemplate",
"numpy.multiply",
"cv2.dilate",
"cv2.cvtColor",
"matplotlib.pyplot.... | [((163, 197), 'numpy.array', 'np.array', (['rgb_img'], {'dtype': '"""float32"""'}), "(rgb_img, dtype='float32')\n", (171, 197), True, 'import numpy as np\n'), ((287, 305), 'cv2.split', 'cv2.split', (['rgb_img'], {}), '(rgb_img)\n', (296, 305), False, 'import cv2\n'), ((363, 379), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (371, 379), True, 'import numpy as np\n'), ((388, 421), 'numpy.zeros', 'np.zeros', (['(n, m)'], {'dtype': '"""float32"""'}), "((n, m), dtype='float32')\n", (396, 421), True, 'import numpy as np\n'), ((1314, 1332), 'cv2.split', 'cv2.split', (['hsi_img'], {}), '(hsi_img)\n', (1323, 1332), False, 'import cv2\n'), ((2623, 2644), 'matplotlib.pyplot.hist', 'plt.hist', (['y'], {'bins': '(255)'}), '(y, bins=255)\n', (2631, 2644), True, 'import matplotlib.pyplot as plt\n'), ((2649, 2665), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(260)'], {}), '(0, 260)\n', (2657, 2665), True, 'import matplotlib.pyplot as plt\n'), ((2714, 2725), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2723, 2725), True, 'import matplotlib.pyplot as plt\n'), ((3906, 3919), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (3914, 3919), True, 'import numpy as np\n'), ((5279, 5305), 'numpy.array', 'np.array', (['img'], {'dtype': '"""int"""'}), "(img, dtype='int')\n", (5287, 5305), True, 'import numpy as np\n'), ((6519, 6535), 'numpy.fft.fft2', 'np.fft.fft2', (['img'], {}), '(img)\n', (6530, 6535), True, 'import numpy as np\n'), ((6548, 6565), 'numpy.fft.ifft2', 'np.fft.ifft2', (['fft'], {}), '(fft)\n', (6560, 6565), True, 'import numpy as np\n'), ((6885, 6945), 'cv2.rectangle', 'cv2.rectangle', (['template_img', 'p', '(p[0] + w, p[1] + h)', '(255)', '(2)'], {}), '(template_img, p, (p[0] + w, p[1] + h), 255, 2)\n', (6898, 6945), False, 'import cv2\n'), ((6953, 7007), 'cv2.matchTemplate', 'cv2.matchTemplate', (['img', 'template', 'cv2.TM_CCOEFF_NORMED'], {}), '(img, template, cv2.TM_CCOEFF_NORMED)\n', (6970, 7007), False, 'import cv2\n'), ((7079, 7097), 'cv2.minMaxLoc', 'cv2.minMaxLoc', (['res'], {}), '(res)\n', (7092, 7097), False, 'import cv2\n'), ((7180, 7237), 'cv2.rectangle', 'cv2.rectangle', (['single_img', 'top_left', 'bottom_right', '(255)', '(2)'], {}), '(single_img, top_left, bottom_right, 255, 2)\n', (7193, 7237), False, 'import cv2\n'), ((7297, 7323), 'numpy.where', 'np.where', (['(res >= threshold)'], {}), '(res >= threshold)\n', (7305, 7323), True, 'import numpy as np\n'), ((7609, 7625), 'numpy.zeros', 'np.zeros', (['(m, n)'], {}), '((m, n))\n', (7617, 7625), True, 'import numpy as np\n'), ((7635, 7650), 'numpy.floor', 'np.floor', (['(m / 2)'], {}), '(m / 2)\n', (7643, 7650), True, 'import numpy as np\n'), ((7658, 7673), 'numpy.floor', 'np.floor', (['(n / 2)'], {}), '(n / 2)\n', (7666, 7673), True, 'import numpy as np\n'), ((7839, 7863), 'numpy.multiply', 'np.multiply', (['f_shift', 'h1'], {}), '(f_shift, h1)\n', (7850, 7863), True, 'import numpy as np\n'), ((7982, 7998), 'numpy.zeros', 'np.zeros', (['(m, n)'], {}), '((m, n))\n', (7990, 7998), True, 'import numpy as np\n'), ((8008, 8023), 'numpy.floor', 'np.floor', (['(m / 2)'], {}), '(m / 2)\n', (8016, 8023), True, 'import numpy as np\n'), ((8031, 8046), 'numpy.floor', 'np.floor', (['(n / 2)'], {}), '(n / 2)\n', (8039, 8046), True, 'import numpy as np\n'), ((8210, 8234), 'numpy.multiply', 'np.multiply', (['f_shift', 'h1'], {}), '(f_shift, h1)\n', (8221, 8234), True, 'import numpy as np\n'), ((8280, 8296), 'numpy.fft.fft2', 'np.fft.fft2', (['img'], {}), '(img)\n', (8291, 8296), True, 'import numpy as np\n'), ((8311, 8329), 'numpy.fft.fftshift', 'np.fft.fftshift', (['f'], {}), '(f)\n', (8326, 8329), True, 'import numpy as np\n'), ((8924, 8965), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': '(3)'}), '(img, cv2.CV_64F, 1, 0, ksize=3)\n', (8933, 8965), False, 'import cv2\n'), ((8980, 9021), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': '(3)'}), '(img, cv2.CV_64F, 0, 1, ksize=3)\n', (8989, 9021), False, 'import cv2\n'), ((9033, 9065), 'cv2.Canny', 'cv2.Canny', (['img', '(50)', '(200)', 'None', '(3)'], {}), '(img, 50, 200, None, 3)\n', (9042, 9065), False, 'import cv2\n'), ((9079, 9131), 'cv2.HoughLines', 'cv2.HoughLines', (['dst', '(1)', '(np.pi / 180)', '(150)', 'None', '(0)', '(0)'], {}), '(dst, 1, np.pi / 180, 150, None, 0, 0)\n', (9093, 9131), False, 'import cv2\n'), ((9664, 9689), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (9671, 9689), True, 'import numpy as np\n'), ((10841, 10860), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (10849, 10860), True, 'import numpy as np\n'), ((11044, 11062), 'numpy.uint8', 'np.uint8', (['blur_img'], {}), '(blur_img)\n', (11052, 11062), True, 'import numpy as np\n'), ((11138, 11159), 'numpy.clip', 'np.clip', (['xs', '(0.1)', '(255)'], {}), '(xs, 0.1, 255)\n', (11145, 11159), True, 'import numpy as np\n'), ((11357, 11393), 'numpy.zeros', 'np.zeros', (['img.shape'], {'dtype': '"""float32"""'}), "(img.shape, dtype='float32')\n", (11365, 11393), True, 'import numpy as np\n'), ((11600, 11618), 'numpy.uint8', 'np.uint8', (['blur_img'], {}), '(blur_img)\n', (11608, 11618), True, 'import numpy as np\n'), ((11812, 11842), 'numpy.array', 'np.array', (['img'], {'dtype': '"""float32"""'}), "(img, dtype='float32')\n", (11820, 11842), True, 'import numpy as np\n'), ((12556, 12574), 'numpy.uint8', 'np.uint8', (['blur_img'], {}), '(blur_img)\n', (12564, 12574), True, 'import numpy as np\n'), ((14052, 14082), 'cv2.Laplacian', 'cv2.Laplacian', (['img', 'cv2.CV_64F'], {}), '(img, cv2.CV_64F)\n', (14065, 14082), False, 'import cv2\n'), ((483, 542), 'numpy.sqrt', 'np.sqrt', (['((R[i] - G[i]) ** 2 + (R[i] - B[i]) * (G[i] - B[i]))'], {}), '((R[i] - G[i]) ** 2 + (R[i] - B[i]) * (G[i] - B[i]))\n', (490, 542), True, 'import numpy as np\n'), ((559, 607), 'numpy.arccos', 'np.arccos', (['(0.5 * (R[i] - B[i] + R[i] - G[i]) / x)'], {}), '(0.5 * (R[i] - B[i] + R[i] - G[i]) / x)\n', (568, 607), True, 'import numpy as np\n'), ((620, 631), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (628, 631), True, 'import numpy as np\n'), ((948, 961), 'numpy.array', 'np.array', (['Min'], {}), '(Min)\n', (956, 961), True, 'import numpy as np\n'), ((1539, 1560), 'numpy.cos', 'np.cos', (['(mt.pi / 3 - h)'], {}), '(mt.pi / 3 - h)\n', (1545, 1560), True, 'import numpy as np\n'), ((1834, 1851), 'numpy.cos', 'np.cos', (['(mt.pi - h)'], {}), '(mt.pi - h)\n', (1840, 1851), True, 'import numpy as np\n'), ((2137, 2162), 'numpy.cos', 'np.cos', (['(5 * mt.pi / 3 - h)'], {}), '(5 * mt.pi / 3 - h)\n', (2143, 2162), True, 'import numpy as np\n'), ((6060, 6074), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (6069, 6074), False, 'import cv2\n'), ((6205, 6231), 'cv2.merge', 'cv2.merge', (['(b_l, g_l, r_l)'], {}), '((b_l, g_l, r_l))\n', (6214, 6231), False, 'import cv2\n'), ((6620, 6632), 'numpy.abs', 'np.abs', (['ifft'], {}), '(ifft)\n', (6626, 6632), True, 'import numpy as np\n'), ((6749, 6777), 'numpy.random.randint', 'np.random.randint', (['(width - w)'], {}), '(width - w)\n', (6766, 6777), True, 'import numpy as np\n'), ((6777, 6806), 'numpy.random.randint', 'np.random.randint', (['(height - h)'], {}), '(height - h)\n', (6794, 6806), True, 'import numpy as np\n'), ((7409, 7459), 'cv2.rectangle', 'cv2.rectangle', (['multi_img', 'pt', 'bottom_right', '(255)', '(2)'], {}), '(multi_img, pt, bottom_right, 255, 2)\n', (7422, 7459), False, 'import cv2\n'), ((8580, 8595), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (8590, 8595), True, 'import numpy as np\n'), ((8610, 8622), 'cv2.dct', 'cv2.dct', (['img'], {}), '(img)\n', (8617, 8622), False, 'import cv2\n'), ((8639, 8652), 'cv2.idct', 'cv2.idct', (['dct'], {}), '(dct)\n', (8647, 8652), False, 'import cv2\n'), ((8677, 8692), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (8687, 8692), True, 'import numpy as np\n'), ((9729, 9751), 'cv2.erode', 'cv2.erode', (['img', 'kernel'], {}), '(img, kernel)\n', (9738, 9751), False, 'import cv2\n'), ((13351, 13372), 'cv2.equalizeHist', 'cv2.equalizeHist', (['img'], {}), '(img)\n', (13367, 13372), False, 'import cv2\n'), ((14295, 14316), 'cv2.blur', 'cv2.blur', (['img', '(7, 7)'], {}), '(img, (7, 7))\n', (14303, 14316), False, 'import cv2\n'), ((4443, 4480), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (4455, 4480), False, 'import cv2\n'), ((4618, 4632), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (4627, 4632), False, 'import cv2\n'), ((4793, 4816), 'cv2.merge', 'cv2.merge', (['(bH, gH, rH)'], {}), '((bH, gH, rH))\n', (4802, 4816), False, 'import cv2\n'), ((7738, 7776), 'numpy.sqrt', 'np.sqrt', (['((i - x0) ** 2 + (j - y0) ** 2)'], {}), '((i - x0) ** 2 + (j - y0) ** 2)\n', (7745, 7776), True, 'import numpy as np\n'), ((8111, 8149), 'numpy.sqrt', 'np.sqrt', (['((i - x0) ** 2 + (j - y0) ** 2)'], {}), '((i - x0) ** 2 + (j - y0) ** 2)\n', (8118, 8149), True, 'import numpy as np\n'), ((8169, 8202), 'numpy.exp', 'np.exp', (['(-1 * D ** 2 / 2 / D0 ** 2)'], {}), '(-1 * D ** 2 / 2 / D0 ** 2)\n', (8175, 8202), True, 'import numpy as np\n'), ((8475, 8493), 'numpy.fft.ifft2', 'np.fft.ifft2', (['IHPF'], {}), '(IHPF)\n', (8487, 8493), True, 'import numpy as np\n'), ((8503, 8521), 'numpy.fft.ifft2', 'np.fft.ifft2', (['GLPF'], {}), '(GLPF)\n', (8515, 8521), True, 'import numpy as np\n'), ((8799, 8820), 'cv2.dct', 'cv2.dct', (['img[:, :, i]'], {}), '(img[:, :, i])\n', (8806, 8820), False, 'import cv2\n'), ((8849, 8871), 'cv2.idct', 'cv2.idct', (['dct[:, :, i]'], {}), '(dct[:, :, i])\n', (8857, 8871), False, 'import cv2\n'), ((9796, 9819), 'cv2.dilate', 'cv2.dilate', (['img', 'kernel'], {}), '(img, kernel)\n', (9806, 9819), False, 'import cv2\n'), ((10392, 10434), 'numpy.random.normal', 'np.random.normal', (['(0)', '(var ** 0.5)', 'img.shape'], {}), '(0, var ** 0.5, img.shape)\n', (10408, 10434), True, 'import numpy as np\n'), ((10453, 10481), 'numpy.clip', 'np.clip', (['(img + noise)', '(0)', '(255)'], {}), '(img + noise, 0, 255)\n', (10460, 10481), True, 'import numpy as np\n'), ((10683, 10696), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (10691, 10696), True, 'import numpy as np\n'), ((13450, 13464), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (13459, 13464), False, 'import cv2\n'), ((13478, 13497), 'cv2.equalizeHist', 'cv2.equalizeHist', (['b'], {}), '(b)\n', (13494, 13497), False, 'import cv2\n'), ((13511, 13530), 'cv2.equalizeHist', 'cv2.equalizeHist', (['g'], {}), '(g)\n', (13527, 13530), False, 'import cv2\n'), ((13544, 13563), 'cv2.equalizeHist', 'cv2.equalizeHist', (['r'], {}), '(r)\n', (13560, 13563), False, 'import cv2\n'), ((13581, 13604), 'cv2.merge', 'cv2.merge', (['(bH, gH, rH)'], {}), '((bH, gH, rH))\n', (13590, 13604), False, 'import cv2\n'), ((14362, 14383), 'cv2.blur', 'cv2.blur', (['img', '(7, 7)'], {}), '(img, (7, 7))\n', (14370, 14383), False, 'import cv2\n'), ((14409, 14430), 'cv2.blur', 'cv2.blur', (['img', '(7, 7)'], {}), '(img, (7, 7))\n', (14417, 14430), False, 'import cv2\n'), ((921, 932), 'numpy.min', 'np.min', (['arr'], {}), '(arr)\n', (927, 932), True, 'import numpy as np\n'), ((5074, 5104), 'numpy.stack', 'np.stack', (['(hH, sH, iH)'], {'axis': '(2)'}), '((hH, sH, iH), axis=2)\n', (5082, 5104), True, 'import numpy as np\n'), ((6431, 6457), 'numpy.array', 'np.array', (['img'], {'dtype': '"""int"""'}), "(img, dtype='int')\n", (6439, 6457), True, 'import numpy as np\n'), ((9863, 9908), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img', 'cv2.MORPH_OPEN', 'kernel'], {}), '(img, cv2.MORPH_OPEN, kernel)\n', (9879, 9908), False, 'import cv2\n'), ((10551, 10597), 'numpy.random.normal', 'np.random.normal', (['(0)', '(var ** 0.5)', 'img.shape[:2]'], {}), '(0, var ** 0.5, img.shape[:2])\n', (10567, 10597), True, 'import numpy as np\n'), ((10629, 10666), 'numpy.clip', 'np.clip', (['(img[:, :, i] + noise)', '(0)', '(255)'], {}), '(img[:, :, i] + noise, 0, 255)\n', (10636, 10666), True, 'import numpy as np\n'), ((12157, 12177), 'numpy.mean', 'np.mean', (['local_space'], {}), '(local_space)\n', (12164, 12177), True, 'import numpy as np\n'), ((12206, 12225), 'numpy.var', 'np.var', (['local_space'], {}), '(local_space)\n', (12212, 12225), True, 'import numpy as np\n'), ((13824, 13854), 'numpy.stack', 'np.stack', (['(hH, sH, iH)'], {'axis': '(2)'}), '((hH, sH, iH), axis=2)\n', (13832, 13854), True, 'import numpy as np\n'), ((6596, 6616), 'numpy.fft.fftshift', 'np.fft.fftshift', (['fft'], {}), '(fft)\n', (6611, 6616), True, 'import numpy as np\n'), ((9952, 9998), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(img, cv2.MORPH_CLOSE, kernel)\n', (9968, 9998), False, 'import cv2\n'), ((10024, 10073), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img', 'cv2.MORPH_GRADIENT', 'kernel'], {}), '(img, cv2.MORPH_GRADIENT, kernel)\n', (10040, 10073), False, 'import cv2\n'), ((11187, 11196), 'math.log', 'mt.log', (['x'], {}), '(x)\n', (11193, 11196), True, 'import math as mt\n'), ((13793, 13804), 'numpy.uint8', 'np.uint8', (['i'], {}), '(i)\n', (13801, 13804), True, 'import numpy as np\n'), ((1622, 1631), 'numpy.cos', 'np.cos', (['h'], {}), '(h)\n', (1628, 1631), True, 'import numpy as np\n'), ((1913, 1938), 'numpy.cos', 'np.cos', (['(h - 2 * mt.pi / 3)'], {}), '(h - 2 * mt.pi / 3)\n', (1919, 1938), True, 'import numpy as np\n'), ((2224, 2249), 'numpy.cos', 'np.cos', (['(h - 4 * mt.pi / 3)'], {}), '(h - 4 * mt.pi / 3)\n', (2230, 2249), True, 'import numpy as np\n')] |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing TypeCast op in DE
"""
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as ops
def slice_compare(array, indexing):
data = ds.NumpySlicesDataset([array])
array = np.array(array)
data = data.map(operations=ops.Slice(indexing))
for d in data:
if indexing is None:
array = array[:]
else:
array = array[indexing]
np.testing.assert_array_equal(array, d[0])
def test_slice_all():
slice_compare([1, 2, 3, 4, 5], None)
slice_compare([1, 2, 3, 4, 5], ...)
def test_slice_single_index():
slice_compare([1, 2, 3, 4, 5], 0)
slice_compare([1, 2, 3, 4, 5], 4)
slice_compare([1, 2, 3, 4, 5], 2)
slice_compare([1, 2, 3, 4, 5], -1)
slice_compare([1, 2, 3, 4, 5], -5)
slice_compare([1, 2, 3, 4, 5], -3)
def test_slice_list_index():
slice_compare([1, 2, 3, 4, 5], [0, 1, 4])
slice_compare([1, 2, 3, 4, 5], [4, 1, 0])
slice_compare([1, 2, 3, 4, 5], [-1, 1, 0])
slice_compare([1, 2, 3, 4, 5], [-1, -4, -2])
slice_compare([1, 2, 3, 4, 5], [3, 3, 3])
slice_compare([1, 2, 3, 4, 5], [1, 1, 1, 1, 1])
def test_slice_slice_obj_2s():
slice_compare([1, 2, 3, 4, 5], slice(0, 2))
slice_compare([1, 2, 3, 4, 5], slice(2, 4))
slice_compare([1, 2, 3, 4, 5], slice(4, 10))
def test_slice_slice_obj_1s():
slice_compare([1, 2, 3, 4, 5], slice(1))
slice_compare([1, 2, 3, 4, 5], slice(4))
slice_compare([1, 2, 3, 4, 5], slice(10))
def test_slice_slice_obj_3s():
slice_compare([1, 2, 3, 4, 5], slice(0, 2, 1))
slice_compare([1, 2, 3, 4, 5], slice(0, 4, 1))
slice_compare([1, 2, 3, 4, 5], slice(0, 10, 1))
slice_compare([1, 2, 3, 4, 5], slice(0, 5, 2))
slice_compare([1, 2, 3, 4, 5], slice(0, 2, 2))
slice_compare([1, 2, 3, 4, 5], slice(0, 1, 2))
slice_compare([1, 2, 3, 4, 5], slice(4, 5, 1))
slice_compare([1, 2, 3, 4, 5], slice(2, 5, 3))
def test_slice_slice_obj_3s_double():
slice_compare([1., 2., 3., 4., 5.], slice(0, 2, 1))
slice_compare([1., 2., 3., 4., 5.], slice(0, 4, 1))
slice_compare([1., 2., 3., 4., 5.], slice(0, 10, 1))
slice_compare([1., 2., 3., 4., 5.], slice(0, 5, 2))
slice_compare([1., 2., 3., 4., 5.], slice(0, 2, 2))
slice_compare([1., 2., 3., 4., 5.], slice(0, 1, 2))
slice_compare([1., 2., 3., 4., 5.], slice(4, 5, 1))
slice_compare([1., 2., 3., 4., 5.], slice(2, 5, 3))
def test_slice_slice_obj_neg():
slice_compare([1, 2, 3, 4, 5], slice(-1, -5, -1))
slice_compare([1, 2, 3, 4, 5], slice(-1))
slice_compare([1, 2, 3, 4, 5], slice(-2))
slice_compare([1, 2, 3, 4, 5], slice(-1, -5, -2))
slice_compare([1, 2, 3, 4, 5], slice(-5, -1, 2))
slice_compare([1, 2, 3, 4, 5], slice(-5, -1))
def test_slice_exceptions():
with pytest.raises(RuntimeError) as info:
slice_compare([1, 2, 3, 4, 5], 5)
assert "Index 5 is out of bounds [0,5)" in str(info.value)
with pytest.raises(RuntimeError) as info:
slice_compare([1, 2, 3, 4, 5], slice(0))
assert "Indices are empty, generated tensor would be empty." in str(info.value)
with pytest.raises(RuntimeError) as info:
slice_compare([1, 2, 3, 4, 5], slice(5, 10, 1))
assert "Indices are empty, generated tensor would be empty." in str(info.value)
with pytest.raises(RuntimeError) as info:
slice_compare([1, 2, 3, 4, 5], slice(-1, -5, 1))
assert "Indices are empty, generated tensor would be empty." in str(info.value)
def test_slice_all_str():
slice_compare([b"1", b"2", b"3", b"4", b"5"], None)
slice_compare([b"1", b"2", b"3", b"4", b"5"], ...)
def test_slice_single_index_str():
slice_compare([b"1", b"2", b"3", b"4", b"5"], 0)
slice_compare([b"1", b"2", b"3", b"4", b"5"], 4)
slice_compare([b"1", b"2", b"3", b"4", b"5"], 2)
slice_compare([b"1", b"2", b"3", b"4", b"5"], -1)
slice_compare([b"1", b"2", b"3", b"4", b"5"], -5)
slice_compare([b"1", b"2", b"3", b"4", b"5"], -3)
def test_slice_list_index_str():
slice_compare([b"1", b"2", b"3", b"4", b"5"], [0, 1, 4])
slice_compare([b"1", b"2", b"3", b"4", b"5"], [4, 1, 0])
slice_compare([b"1", b"2", b"3", b"4", b"5"], [-1, 1, 0])
slice_compare([b"1", b"2", b"3", b"4", b"5"], [-1, -4, -2])
slice_compare([b"1", b"2", b"3", b"4", b"5"], [3, 3, 3])
slice_compare([b"1", b"2", b"3", b"4", b"5"], [1, 1, 1, 1, 1])
def test_slice_slice_obj_2s_str():
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(0, 2))
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(2, 4))
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(4, 10))
def test_slice_slice_obj_1s_str():
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(1))
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(4))
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(10))
def test_slice_slice_obj_3s_str():
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(0, 2, 1))
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(0, 4, 1))
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(0, 10, 1))
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(0, 5, 2))
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(0, 2, 2))
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(0, 1, 2))
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(4, 5, 1))
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(2, 5, 3))
def test_slice_slice_obj_neg_str():
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(-1, -5, -1))
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(-1))
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(-2))
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(-1, -5, -2))
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(-5, -1, 2))
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(-5, -1))
def test_slice_exceptions_str():
with pytest.raises(RuntimeError) as info:
slice_compare([b"1", b"2", b"3", b"4", b"5"], 5)
assert "Index 5 is out of bounds [0,5)" in str(info.value)
with pytest.raises(RuntimeError) as info:
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(0))
assert "Indices are empty, generated tensor would be empty." in str(info.value)
with pytest.raises(RuntimeError) as info:
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(5, 10, 1))
assert "Indices are empty, generated tensor would be empty." in str(info.value)
with pytest.raises(RuntimeError) as info:
slice_compare([b"1", b"2", b"3", b"4", b"5"], slice(-1, -5, 1))
assert "Indices are empty, generated tensor would be empty." in str(info.value)
if __name__ == "__main__":
test_slice_all()
test_slice_single_index()
test_slice_list_index()
test_slice_slice_obj_3s()
test_slice_slice_obj_2s()
test_slice_slice_obj_1s()
test_slice_slice_obj_neg()
test_slice_exceptions()
test_slice_slice_obj_3s_double()
test_slice_all_str()
test_slice_single_index_str()
test_slice_list_index_str()
test_slice_slice_obj_3s_str()
test_slice_slice_obj_2s_str()
test_slice_slice_obj_1s_str()
test_slice_slice_obj_neg_str()
test_slice_exceptions_str()
| [
"mindspore.dataset.transforms.c_transforms.Slice",
"numpy.testing.assert_array_equal",
"pytest.raises",
"numpy.array",
"mindspore.dataset.NumpySlicesDataset"
] | [((873, 903), 'mindspore.dataset.NumpySlicesDataset', 'ds.NumpySlicesDataset', (['[array]'], {}), '([array])\n', (894, 903), True, 'import mindspore.dataset as ds\n'), ((916, 931), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (924, 931), True, 'import numpy as np\n'), ((1119, 1161), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['array', 'd[0]'], {}), '(array, d[0])\n', (1148, 1161), True, 'import numpy as np\n'), ((3503, 3530), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3516, 3530), False, 'import pytest\n'), ((3655, 3682), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3668, 3682), False, 'import pytest\n'), ((3835, 3862), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3848, 3862), False, 'import pytest\n'), ((4022, 4049), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (4035, 4049), False, 'import pytest\n'), ((6594, 6621), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (6607, 6621), False, 'import pytest\n'), ((6761, 6788), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (6774, 6788), False, 'import pytest\n'), ((6956, 6983), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (6969, 6983), False, 'import pytest\n'), ((7158, 7185), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (7171, 7185), False, 'import pytest\n'), ((963, 982), 'mindspore.dataset.transforms.c_transforms.Slice', 'ops.Slice', (['indexing'], {}), '(indexing)\n', (972, 982), True, 'import mindspore.dataset.transforms.c_transforms as ops\n')] |
import numpy as np
import librosa
class AudioTransform:
def __init__(self, always_apply=False, p=0.5):
self.always_apply = always_apply
self.p = p
def __call__(self, y: np.ndarray):
if self.always_apply:
return self.apply(y)
else:
if np.random.rand() < self.p:
return self.apply(y)
else:
return y
def apply(self, y: np.ndarray):
raise NotImplementedError
class NoiseInjection(AudioTransform):
def __init__(self, always_apply=False, p=0.5, max_noise_level=0.5, sr=32000):
super().__init__(always_apply, p)
self.noise_level = (0.0, max_noise_level)
self.sr = sr
def apply(self, y: np.ndarray, **params):
noise_level = np.random.uniform(*self.noise_level)
noise = np.random.randn(len(y))
augmented = (y + noise * noise_level).astype(y.dtype)
return augmented
class GaussianNoise(AudioTransform):
def __init__(self, always_apply=False, p=0.5, min_snr=5, max_snr=20, sr=32000):
super().__init__(always_apply, p)
self.min_snr = min_snr
self.max_snr = max_snr
self.sr = sr
def apply(self, y: np.ndarray, **params):
snr = np.random.uniform(self.min_snr, self.max_snr)
a_signal = np.sqrt(y ** 2).max()
a_noise = a_signal / (10 ** (snr / 20))
white_noise = np.random.randn(len(y))
a_white = np.sqrt(white_noise ** 2).max()
augmented = (y + white_noise * 1 / a_white * a_noise).astype(y.dtype)
return augmented
class PinkNoise(AudioTransform):
def __init__(self, always_apply=False, p=0.5, min_snr=5, max_snr=20, sr=32000):
super().__init__(always_apply, p)
self.min_snr = min_snr
self.max_snr = max_snr
self.sr = sr
def apply(self, y: np.ndarray, **params):
snr = np.random.uniform(self.min_snr, self.max_snr)
a_signal = np.sqrt(y ** 2).max()
a_noise = a_signal / (10 ** (snr / 20))
pink_noise = cn.powerlaw_psd_gaussian(1, len(y))
a_pink = np.sqrt(pink_noise ** 2).max()
augmented = (y + pink_noise * 1 / a_pink * a_noise).astype(y.dtype)
return augmented
class PitchShift(AudioTransform):
def __init__(self, always_apply=False, p=0.5, max_range=5, sr=32000):
super().__init__(always_apply, p)
self.max_range = max_range
self.sr = sr
def apply(self, y: np.ndarray, **params):
n_steps = np.random.randint(-self.max_range, self.max_range)
augmented = librosa.effects.pitch_shift(y, self.sr, n_steps)
return augmented
class TimeStretch(AudioTransform):
def __init__(self, always_apply=False, p=0.5, max_rate=1, sr=32000):
super().__init__(always_apply, p)
self.max_rate = max_rate
self.sr = sr
def apply(self, y: np.ndarray, **params):
rate = np.random.uniform(0, self.max_rate)
augmented = librosa.effects.time_stretch(y, rate)
return augmented
| [
"numpy.random.uniform",
"librosa.effects.time_stretch",
"numpy.random.randint",
"numpy.random.rand",
"numpy.sqrt",
"librosa.effects.pitch_shift"
] | [((784, 820), 'numpy.random.uniform', 'np.random.uniform', (['*self.noise_level'], {}), '(*self.noise_level)\n', (801, 820), True, 'import numpy as np\n'), ((1258, 1303), 'numpy.random.uniform', 'np.random.uniform', (['self.min_snr', 'self.max_snr'], {}), '(self.min_snr, self.max_snr)\n', (1275, 1303), True, 'import numpy as np\n'), ((1899, 1944), 'numpy.random.uniform', 'np.random.uniform', (['self.min_snr', 'self.max_snr'], {}), '(self.min_snr, self.max_snr)\n', (1916, 1944), True, 'import numpy as np\n'), ((2514, 2564), 'numpy.random.randint', 'np.random.randint', (['(-self.max_range)', 'self.max_range'], {}), '(-self.max_range, self.max_range)\n', (2531, 2564), True, 'import numpy as np\n'), ((2585, 2633), 'librosa.effects.pitch_shift', 'librosa.effects.pitch_shift', (['y', 'self.sr', 'n_steps'], {}), '(y, self.sr, n_steps)\n', (2612, 2633), False, 'import librosa\n'), ((2927, 2962), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self.max_rate'], {}), '(0, self.max_rate)\n', (2944, 2962), True, 'import numpy as np\n'), ((2983, 3020), 'librosa.effects.time_stretch', 'librosa.effects.time_stretch', (['y', 'rate'], {}), '(y, rate)\n', (3011, 3020), False, 'import librosa\n'), ((301, 317), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (315, 317), True, 'import numpy as np\n'), ((1323, 1338), 'numpy.sqrt', 'np.sqrt', (['(y ** 2)'], {}), '(y ** 2)\n', (1330, 1338), True, 'import numpy as np\n'), ((1458, 1483), 'numpy.sqrt', 'np.sqrt', (['(white_noise ** 2)'], {}), '(white_noise ** 2)\n', (1465, 1483), True, 'import numpy as np\n'), ((1964, 1979), 'numpy.sqrt', 'np.sqrt', (['(y ** 2)'], {}), '(y ** 2)\n', (1971, 1979), True, 'import numpy as np\n'), ((2109, 2133), 'numpy.sqrt', 'np.sqrt', (['(pink_noise ** 2)'], {}), '(pink_noise ** 2)\n', (2116, 2133), True, 'import numpy as np\n')] |
import os
import sys
import argparse
import configparser
import multiprocessing
from datetime import datetime
import pytz
import math
import matplotlib.pylab as plt
#plt.use('Agg')
sys.path.append(os.getcwd())
import random
import numpy as np
from glob import glob
import shutil
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from src.lib.utils.utils import get_model, get_dataset
from src.lib.utils.cmd_args import create_dataset_parser, create_classifier_parser, create_runtime_parser
from src.lib.trainer.trainer import Trainer
from src.lib.datasets.data_loader import pad_collate
from src.lib.datasets.sampler import BalancedBatchSampler
def main(argv=None):
''' ConfigParser '''
# parsing arguments from command-line or config-file
if argv is None:
argv = sys.argv
conf_parser = argparse.ArgumentParser(
description=__doc__, # printed with -h/--help
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False
)
conf_parser.add_argument("-c", "--conf_file", help="Specify config file", metavar="FILE_PATH")
args, remaining_argv = conf_parser.parse_known_args()
dataset_conf_dict = {}
classifier_conf_dict = {}
runtime_conf_dict = {}
if args.conf_file is not None:
config = configparser.ConfigParser()
config.read([args.conf_file])
dataset_conf_dict = dict(config.items("Dataset"))
classifier_conf_dict = dict(config.items("Model"))
runtime_conf_dict = dict(config.items("Runtime"))
''' Parameters '''
# Dataset options
dataset_parser, dataset_args, remaining_argv = \
create_dataset_parser(remaining_argv, **dataset_conf_dict)
# Classifier options
classifier_parser, classifier_args, remaining_argv = \
create_classifier_parser(remaining_argv, **classifier_conf_dict)
# Runtime options
runtime_parser, runtime_args, remaining_argv = \
create_runtime_parser(remaining_argv, **runtime_conf_dict)
# merge options
parser = argparse.ArgumentParser(
description='PyTorch implementation of embryo classification',
parents=[conf_parser, dataset_parser, classifier_parser, runtime_parser])
args = parser.parse_args()
# Seed
random.seed(int(args.seed))
np.random.seed(int(args.seed))
torch.manual_seed(args.seed)
# Make Directory
current_datetime = datetime.now(pytz.timezone('Asia/Tokyo')).strftime('%Y%m%d_%H%M%S')
save_dir = args.save_dir + '_' + str(current_datetime)
os.makedirs(save_dir, exist_ok=True)
shutil.copy(args.conf_file, os.path.join(save_dir, os.path.basename(args.conf_file)))
''' Classifier '''
# Initialize the classifier to train
classifier = get_model(args)
if args.init_classifier is not None:
print('Load classifier from', args.init_classifier)
classifier = torch.load(args.init_classifier)
# Prepare device
if args.device == 'cuda:0':
classifier = classifier.to(args.device)
else:
classifier = classifier.to(args.device)
''' Dataset '''
# Load dataset
train_dataset, validation_dataset = get_dataset(args)
print('-- train_dataset.size = {}\n-- validation_dataset.size = {}'.format(
train_dataset.__len__(), validation_dataset.__len__()))
train_sampler = BalancedBatchSampler(train_dataset, n_classes=args.num_classes, n_samples=args.batchsize)
''' Iterator '''
# Set up iterators
train_iterator = DataLoader(
dataset=train_dataset,
#batch_size=int(args.batchsize),
batch_sampler=train_sampler,
collate_fn=pad_collate
)
validation_iterator = DataLoader(
dataset=validation_dataset,
batch_size=int(args.val_batchsize),
shuffle=False,
collate_fn=pad_collate
)
''' Optimizer '''
# Initialize an optimizer
if args.optimizer == 'SGD':
optimizer = optim.SGD(
params=classifier.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay
)
elif args.optimizer == 'Adadelta':
optimizer = optim.Adadelta(
params=classifier.parameters(),
lr=args.lr,
rho=args.momentum,
weight_decay=args.weight_decay
)
elif args.optimizer == 'Adagrad':
optimizer = optim.Adagrad(
params=classifier.parameters(),
lr=args.lr,
weight_decay=args.weight_decay
)
elif args.optimizer == 'Adam':
optimizer = optim.Adam(
params=classifier.parameters(),
lr=args.lr,
weight_decay=args.weight_decay
)
elif args.optimizer == 'AdamW':
optimizer = optim.AdamW(
params=classifier.parameters(),
lr=args.lr,
weight_decay=args.weight_decay
)
elif args.optimizer == 'SparseAdam':
optimizer = optim.SparseAdam(
params=classifier.parameters(),
lr=args.lr
)
elif args.optimizer == 'Adamax':
optimizer = optim.Adamax(
params=classifier.parameters(),
lr=args.lr,
weight_decay=args.weight_decay
)
elif args.optimizer == 'ASGD':
optimizer = optim.ASGD(
params=classifier.parameters(),
lr=args.lr,
weight_decay=args.weight_decay
)
elif args.optimizer == 'RMSprop':
optimizer = optim.RMSprop(
params=classifier.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay
)
else:
raise ValueError('Unknown optimizer name: {}'.format(args.optimizer))
''' Graph Visualization '''
if eval(args.graph):
print('Making the graph of model.', end='')
dummy_x = torch.tensor(train_dataset.get_input(0))
input_names = ['input']
print('.', end='')
output_names = ['output']
print('.', end='')
torch.onnx.export(classifier, dummy_x.unsqueeze(0), os.path.join(save_dir, 'graph.onnx'), verbose=True, input_names=input_names, output_names=output_names)
print('Success!')
# Training
trainer_args = {
'optimizer' : optimizer,
'epoch' : args.epoch,
'save_dir' : save_dir,
'eval_metrics' : args.eval_metrics,
'device' : args.device
}
trainer = Trainer(**trainer_args)
trainer.train(
model=classifier,
train_iterator=train_iterator,
validation_iterator=validation_iterator
)
if args.delete_variable is not None:
aw_list = np.load(os.path.join(save_dir, 'aw_best_val.npz'), allow_pickle=True)['arr_0']
aw = np.ones((args.input_dim, 487))
for p in range(len(aw_list)):
aw *= aw_list[p][0, :,:487].cpu().numpy()
aw = aw ** (1/len(aw_list))
aw = np.sum(aw, axis=1)
index = np.argmin(aw)
new_delete_variable = []
cnt = 0
for i in range(11):
if i in eval(args.delete_variable):
new_delete_variable.append(i)
else:
if cnt == index:
new_delete_variable.append(i)
cnt += 1
print(new_delete_variable)
set_num = int(args.split_list_train[len('datasets/split_list/mccv/set'):args.split_list_train.rfind('/')])
filename = os.path.join('confs', 'models', 'mccv_sv', 'NVAN', 'train_set{0:02d}_sv{1:02d}.cfg'.format(set_num, len(new_delete_variable)))
with open(filename, 'w') as f:
f.write('[Dataset]\n')
f.write('root_path = datasets\n')
f.write('split_list_train = datasets/split_list/mccv/set{0:02d}/train.txt\n'.format(set_num))
f.write('split_list_validation = datasets/split_list/mccv/set{0:02d}/validation.txt\n'.format(set_num))
f.write('basename =input\n\n')
f.write('[Model]\n')
f.write('model = NVAN\n')
f.write('# init_classifier =\n')
f.write('input_dim = {}\n'.format(11 - len(new_delete_variable)))
f.write('num_classes = 2\n')
f.write('num_layers = 2\n')
f.write('hidden_dim = 128\n')
f.write('dropout = 0.5\n')
f.write('lossfun = {}\n'.format(args.lossfun))
f.write('eval_metrics = {}\n\n'.format(args.eval_metrics))
f.write('[Runtime]\n')
f.write('save_dir = results/train_NVAN_set{0:02d}_sv{1:02d}\n'.format(set_num, len(new_delete_variable)))
f.write('batchsize = {}\n'.format(args.batchsize))
f.write('val_batchsize = 1\n')
f.write('epoch = {}\n'.format(args.epoch))
f.write('optimizer = {}\n'.format(args.optimizer))
f.write('lr = {}\n'.format(args.lr))
f.write('momentum = {}\n'.format(args.momentum))
f.write('weight_decay = {}\n'.format(args.weight_decay))
f.write('delete_tp = {}\n'.format(args.delete_tp))
f.write('# cuda:0 or cpu\n')
f.write('device = {}\n'.format(args.device))
f.write('seed = {}\n'.format(args.seed))
f.write('phase = train\n')
f.write('graph = False\n')
f.write('# 0: number, 1: volume_mean, 2: volume_sd, 3: surface_mean, 4: surface_sd,\n')
f.write('# 5: aspect_mean, 6: aspect_sd, 7: solidity_mean, 8: solidity_sd, 9: centroid_mean, 10: centroid_sd\n')
f.write('delete_variable = {} # input_dim == 11 - len(delete_variable)\n'.format(new_delete_variable))
if __name__ == '__main__':
main()
| [
"numpy.sum",
"argparse.ArgumentParser",
"numpy.ones",
"numpy.argmin",
"src.lib.trainer.trainer.Trainer",
"os.path.join",
"torch.utils.data.DataLoader",
"torch.load",
"src.lib.datasets.sampler.BalancedBatchSampler",
"src.lib.utils.cmd_args.create_classifier_parser",
"configparser.ConfigParser",
... | [((197, 208), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (206, 208), False, 'import os\n'), ((906, 1025), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter', 'add_help': '(False)'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter, add_help=False)\n', (929, 1025), False, 'import argparse\n'), ((1721, 1779), 'src.lib.utils.cmd_args.create_dataset_parser', 'create_dataset_parser', (['remaining_argv'], {}), '(remaining_argv, **dataset_conf_dict)\n', (1742, 1779), False, 'from src.lib.utils.cmd_args import create_dataset_parser, create_classifier_parser, create_runtime_parser\n'), ((1872, 1936), 'src.lib.utils.cmd_args.create_classifier_parser', 'create_classifier_parser', (['remaining_argv'], {}), '(remaining_argv, **classifier_conf_dict)\n', (1896, 1936), False, 'from src.lib.utils.cmd_args import create_dataset_parser, create_classifier_parser, create_runtime_parser\n'), ((2020, 2078), 'src.lib.utils.cmd_args.create_runtime_parser', 'create_runtime_parser', (['remaining_argv'], {}), '(remaining_argv, **runtime_conf_dict)\n', (2041, 2078), False, 'from src.lib.utils.cmd_args import create_dataset_parser, create_classifier_parser, create_runtime_parser\n'), ((2113, 2282), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch implementation of embryo classification"""', 'parents': '[conf_parser, dataset_parser, classifier_parser, runtime_parser]'}), "(description=\n 'PyTorch implementation of embryo classification', parents=[conf_parser,\n dataset_parser, classifier_parser, runtime_parser])\n", (2136, 2282), False, 'import argparse\n'), ((2405, 2433), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2422, 2433), False, 'import torch\n'), ((2610, 2646), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (2621, 2646), False, 'import os\n'), ((2820, 2835), 'src.lib.utils.utils.get_model', 'get_model', (['args'], {}), '(args)\n', (2829, 2835), False, 'from src.lib.utils.utils import get_model, get_dataset\n'), ((3231, 3248), 'src.lib.utils.utils.get_dataset', 'get_dataset', (['args'], {}), '(args)\n', (3242, 3248), False, 'from src.lib.utils.utils import get_model, get_dataset\n'), ((3414, 3508), 'src.lib.datasets.sampler.BalancedBatchSampler', 'BalancedBatchSampler', (['train_dataset'], {'n_classes': 'args.num_classes', 'n_samples': 'args.batchsize'}), '(train_dataset, n_classes=args.num_classes, n_samples=\n args.batchsize)\n', (3434, 3508), False, 'from src.lib.datasets.sampler import BalancedBatchSampler\n'), ((3570, 3661), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_sampler': 'train_sampler', 'collate_fn': 'pad_collate'}), '(dataset=train_dataset, batch_sampler=train_sampler, collate_fn=\n pad_collate)\n', (3580, 3661), False, 'from torch.utils.data import DataLoader\n'), ((6575, 6598), 'src.lib.trainer.trainer.Trainer', 'Trainer', ([], {}), '(**trainer_args)\n', (6582, 6598), False, 'from src.lib.trainer.trainer import Trainer\n'), ((1372, 1399), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1397, 1399), False, 'import configparser\n'), ((2958, 2990), 'torch.load', 'torch.load', (['args.init_classifier'], {}), '(args.init_classifier)\n', (2968, 2990), False, 'import torch\n'), ((6889, 6919), 'numpy.ones', 'np.ones', (['(args.input_dim, 487)'], {}), '((args.input_dim, 487))\n', (6896, 6919), True, 'import numpy as np\n'), ((7061, 7079), 'numpy.sum', 'np.sum', (['aw'], {'axis': '(1)'}), '(aw, axis=1)\n', (7067, 7079), True, 'import numpy as np\n'), ((7096, 7109), 'numpy.argmin', 'np.argmin', (['aw'], {}), '(aw)\n', (7105, 7109), True, 'import numpy as np\n'), ((2702, 2734), 'os.path.basename', 'os.path.basename', (['args.conf_file'], {}), '(args.conf_file)\n', (2718, 2734), False, 'import os\n'), ((6218, 6254), 'os.path.join', 'os.path.join', (['save_dir', '"""graph.onnx"""'], {}), "(save_dir, 'graph.onnx')\n", (6230, 6254), False, 'import os\n'), ((2492, 2519), 'pytz.timezone', 'pytz.timezone', (['"""Asia/Tokyo"""'], {}), "('Asia/Tokyo')\n", (2505, 2519), False, 'import pytz\n'), ((6805, 6846), 'os.path.join', 'os.path.join', (['save_dir', '"""aw_best_val.npz"""'], {}), "(save_dir, 'aw_best_val.npz')\n", (6817, 6846), False, 'import os\n')] |
'''
License
copyright <NAME> (PTB) 2020
This software is licensed under the BSD-like license:
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
DISCLAIMER
==========
This software was developed at Physikalisch-Technische Bundesanstalt
(PTB). The software is made available "as is" free of cost. PTB assumes
no responsibility whatsoever for its use by other parties, and makes no
guarantees, expressed or implied, about its quality, reliability, safety,
suitability or any other characteristic. In no event will PTB be liable
for any direct, indirect or consequential damage arising in connection
Using this software in publications requires citing the following paper
Compressed FTIR spectroscopy using low-rank matrix reconstruction (to appear in Optics Express)
DOI: https://doi.org/10.1364/OE.404959
'''
import numpy as np
from scipy.linalg import svd
from scipy.sparse import (csr_matrix)
try:
from pypardiso import spsolve
except:
from scipy.sparse.linalg import spsolve
from compressedftir.utils import (scipy_block_diag, relative_residual, ht)
# import time
def updateU(_V, Xomega, curr_r, nnz, lmb=0.0, lap=None):
"""
Update the spatial low-rank component.
Alternating optimization fixes the V component of the model
$$
M = UV
$$
and aims for an optimal U such that
$$
U = argmin_A || Y - AV || + lmbA ||lap U||.
$$
This is a linear problem and can be solved directly.
Arguments:
_V {np.array} -- second model component
Xomega {np.array} -- sub-sampled data
curr_r {int} -- rank
nnz {list} -- non zeros entries
Keyword Arguments:
lmb {float} -- regularization parameter (default: {0.0})
lap {array like} -- GMRF/Laplacian regularization matrix (default: {None})
Returns:
array like -- new component U
"""
hlp = []
_n, _m = Xomega.shape
VY = np.zeros((curr_r, _n), dtype=Xomega.dtype)
zm = csr_matrix((curr_r, curr_r))
# start = time.time()
for k in range(_n):
ind = nnz[k]
if sum(ind) > 0:
VO = _V[:, ind]
hlp.append(csr_matrix(np.dot(VO, VO.T)))
VY[:, k] = np.dot(Xomega[k, ind], VO.T)
else:
hlp.append(zm)
# print("U loop: {}".format(time.time()-start))
# start = time.time()
H = scipy_block_diag(hlp, format="csc")
# print("U build H: {}".format(time.time()-start))
# print("Sum H + lap")
if lap is not None:
H = H + lmb*lap
# start = time.time()
# print("Start spsolve")
# rhs = VY.ravel(order="F")
# changed 15.04.21
rhs = VY.ravel(order="F")
# print("rhs raveled")
# TODO: Here ht?
retval = spsolve(H, rhs).reshape((curr_r, _n), order="F").T
# changed 15.4.21
# retval = spsolve(np.conj(H), rhs).reshape((curr_r, _n)).T
# print("U solve: {}".format(time.time()-start))
return retval
def updateV(_U, Xomega, curr_r, nnz, lmb=0.0, lap=None):
"""
Update the second low-rank component.
Alternating optimization fixes the U component of the model
$$
M = UV
$$
and aims for an optimal V such that
$$
V = argmin_A || Y - UA || + lmb ||lap V||.
$$
This is a linear problem and can be solved directly.
Arguments:
_U {np.array} -- second model component
Xomega {np.array} -- sub-sampled data
curr_r {int} -- rank
nnz {list} -- non zeros entries
Keyword Arguments:
lmb {float} -- regularization parameter (default: {0.0})
lap {array like} -- regularization matrix, here identity (default: {None})
Returns:
array like -- new component U
"""
hlp = []
_n, _m = Xomega.shape
UY = np.zeros((curr_r, _m), dtype=Xomega.dtype)
# start = time.time()
for k in range(_m):
ind = nnz[k]
UO = _U[ind, :]
hlp.append(csr_matrix(np.dot(UO.T, UO)))
UY[:, k] = np.dot(np.transpose(Xomega[ind, k]), UO)
# print("V loop: {}".format(time.time()-start))
# start = time.time()
if lap is None:
H = scipy_block_diag(hlp, format="csr")
else:
H = scipy_block_diag(hlp, format="csr") + lmb*lap
# print("V build H: {}".format(time.time()-start))
# start = time.time()
# changed 15.4.21
retval = spsolve(H, UY.ravel(order="F")).reshape((curr_r, _m), order="F")
# retval = spsolve(np.conj(H), UY.ravel(order="F")).reshape((curr_r, _m), order="F")
# print("V solve: {}".format(time.time()-start))
return retval
def lr_recon_single(Xomega, l_regu, r, T, tau, lapU, lapV, nnz_Z0_U, nnz_Z0_V, Xtrue=None, iv_U=None, iv_V=None, bg=None):
"""
Reconstructs a low-rank model UV to fit the data Xomega.
Assumes a fixed rank r
Arguments:
Xomega {array like} -- sub-sampled data
l_regu {float} -- regularization parameter
r {int} -- fixed rank
T {int} -- maximal number of iteration
tau {float} -- tolerance
lapU {array like} -- regularizer for U
lapV {array like} -- regularizer for V
nnz_Z0_U {list} -- non zero entries for U
nnz_Z0_V {list} -- non zero entries for V
Keyword Arguments:
Xtrue {array like} -- Full dataset for comparison (default: {None})
iv_U {array like} -- proposed initial value of U (default: {None})
iv_V {array like} -- proposed initial value of V (default: {None})
Returns:
dict -- result dictionary containing
U {array like}
V {array like}
resL {list}
resG {list}
if Xtrue is given: resT {list}
"""
# normalize the data
Xomega_masked = np.ma.masked_equal(Xomega, 0)
scl = Xomega_masked.std(ddof=0)
Xomega_scl = Xomega/scl
# print("Do init")
if iv_U is None or iv_V is None:
# initialize using svd. returns (n, r)x(r,r)x(r,m)
W, Lambda, Z = svd(Xomega_scl, full_matrices=False)
# usually, the svd rank is larger than desired -> crop
W = W[:, :r]
Lambda = Lambda[:r]
Z = Z[:r, :]
# distribute the singular values to U and V
U = W*Lambda**(0.5) # shape (n, r)
V = ht(ht(Z)*Lambda**(0.5)) # shape (r, m)
else:
print("initial value given")
U = iv_U
V = iv_V
# initialize the residual lists
resL = [np.infty]
resG = [0]
if Xtrue is not None:
resT = []
t = 0
# print("Get Xt")
Xt = np.dot(U, V) * scl
while t < T and resL[-1] > resG[-1]*tau:
# globstart = time.time()
Xt_old = Xt
# print("Update U")
U = updateU(V, Xomega_scl, r, nnz_Z0_V, l_regu, lapU)
# print("Update V")
V = updateV(U, Xomega_scl, r, nnz_Z0_U, l_regu, lapV)
# print("Compute residual")
Xt = np.dot(U, V) * scl
resL.append(relative_residual(Xt_old, Xt))
resG.append(relative_residual(Xomega, Xt, check_nnz=True))
if Xtrue is not None:
# print("Compute residual to truth")
resT.append(relative_residual(Xtrue, Xt + bg if bg is not None else Xt))
print("it: {:2d}/{}, local res: {:.2e}, global res: {:.2e}, res2Full: {:.2e}".format(t+1, T, resL[-1],
resG[-1], resT[-1]))
else:
print("it: {:2d}/{}, local res: {:.2e}, global res: {:.2e}".format(t+1, T, resL[-1], resG[-1]))
# print("step duration: {}".format(time.time()-globstart))
t = t+1
retval = {
"U": U*np.sqrt(scl),
"V": V*np.sqrt(scl),
"resL": resL[1:],
"resG": resG[1:],
"iv_U": U,
"iv_V": V
}
if Xtrue is not None:
retval["resT"] = resT
return retval
| [
"compressedftir.utils.relative_residual",
"numpy.ma.masked_equal",
"compressedftir.utils.ht",
"numpy.zeros",
"numpy.transpose",
"scipy.linalg.svd",
"scipy.sparse.csr_matrix",
"scipy.sparse.linalg.spsolve",
"numpy.dot",
"compressedftir.utils.scipy_block_diag",
"numpy.sqrt"
] | [((2427, 2469), 'numpy.zeros', 'np.zeros', (['(curr_r, _n)'], {'dtype': 'Xomega.dtype'}), '((curr_r, _n), dtype=Xomega.dtype)\n', (2435, 2469), True, 'import numpy as np\n'), ((2480, 2508), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(curr_r, curr_r)'], {}), '((curr_r, curr_r))\n', (2490, 2508), False, 'from scipy.sparse import csr_matrix\n'), ((2877, 2912), 'compressedftir.utils.scipy_block_diag', 'scipy_block_diag', (['hlp'], {'format': '"""csc"""'}), "(hlp, format='csc')\n", (2893, 2912), False, 'from compressedftir.utils import scipy_block_diag, relative_residual, ht\n'), ((4320, 4362), 'numpy.zeros', 'np.zeros', (['(curr_r, _m)'], {'dtype': 'Xomega.dtype'}), '((curr_r, _m), dtype=Xomega.dtype)\n', (4328, 4362), True, 'import numpy as np\n'), ((6351, 6380), 'numpy.ma.masked_equal', 'np.ma.masked_equal', (['Xomega', '(0)'], {}), '(Xomega, 0)\n', (6369, 6380), True, 'import numpy as np\n'), ((4687, 4722), 'compressedftir.utils.scipy_block_diag', 'scipy_block_diag', (['hlp'], {'format': '"""csr"""'}), "(hlp, format='csr')\n", (4703, 4722), False, 'from compressedftir.utils import scipy_block_diag, relative_residual, ht\n'), ((6593, 6629), 'scipy.linalg.svd', 'svd', (['Xomega_scl'], {'full_matrices': '(False)'}), '(Xomega_scl, full_matrices=False)\n', (6596, 6629), False, 'from scipy.linalg import svd\n'), ((7177, 7189), 'numpy.dot', 'np.dot', (['U', 'V'], {}), '(U, V)\n', (7183, 7189), True, 'import numpy as np\n'), ((2716, 2744), 'numpy.dot', 'np.dot', (['Xomega[k, ind]', 'VO.T'], {}), '(Xomega[k, ind], VO.T)\n', (2722, 2744), True, 'import numpy as np\n'), ((4539, 4567), 'numpy.transpose', 'np.transpose', (['Xomega[ind, k]'], {}), '(Xomega[ind, k])\n', (4551, 4567), True, 'import numpy as np\n'), ((4747, 4782), 'compressedftir.utils.scipy_block_diag', 'scipy_block_diag', (['hlp'], {'format': '"""csr"""'}), "(hlp, format='csr')\n", (4763, 4782), False, 'from compressedftir.utils import scipy_block_diag, relative_residual, ht\n'), ((7533, 7545), 'numpy.dot', 'np.dot', (['U', 'V'], {}), '(U, V)\n', (7539, 7545), True, 'import numpy as np\n'), ((7573, 7602), 'compressedftir.utils.relative_residual', 'relative_residual', (['Xt_old', 'Xt'], {}), '(Xt_old, Xt)\n', (7590, 7602), False, 'from compressedftir.utils import scipy_block_diag, relative_residual, ht\n'), ((7625, 7670), 'compressedftir.utils.relative_residual', 'relative_residual', (['Xomega', 'Xt'], {'check_nnz': '(True)'}), '(Xomega, Xt, check_nnz=True)\n', (7642, 7670), False, 'from compressedftir.utils import scipy_block_diag, relative_residual, ht\n'), ((8315, 8327), 'numpy.sqrt', 'np.sqrt', (['scl'], {}), '(scl)\n', (8322, 8327), True, 'import numpy as np\n'), ((8345, 8357), 'numpy.sqrt', 'np.sqrt', (['scl'], {}), '(scl)\n', (8352, 8357), True, 'import numpy as np\n'), ((3256, 3271), 'scipy.sparse.linalg.spsolve', 'spsolve', (['H', 'rhs'], {}), '(H, rhs)\n', (3263, 3271), False, 'from scipy.sparse.linalg import spsolve\n'), ((4493, 4509), 'numpy.dot', 'np.dot', (['UO.T', 'UO'], {}), '(UO.T, UO)\n', (4499, 4509), True, 'import numpy as np\n'), ((6889, 6894), 'compressedftir.utils.ht', 'ht', (['Z'], {}), '(Z)\n', (6891, 6894), False, 'from compressedftir.utils import scipy_block_diag, relative_residual, ht\n'), ((7778, 7837), 'compressedftir.utils.relative_residual', 'relative_residual', (['Xtrue', '(Xt + bg if bg is not None else Xt)'], {}), '(Xtrue, Xt + bg if bg is not None else Xt)\n', (7795, 7837), False, 'from compressedftir.utils import scipy_block_diag, relative_residual, ht\n'), ((2673, 2689), 'numpy.dot', 'np.dot', (['VO', 'VO.T'], {}), '(VO, VO.T)\n', (2679, 2689), True, 'import numpy as np\n')] |
import logging as log
import unittest
import numpy as np
from mock import patch, mock_open
from lstm import preprocessing as prep
class PreProcessingTest(unittest.TestCase):
@patch("builtins.open", mock_open(read_data="hello foo bar!"))
def test_load_data(self):
path = "data path"
text = prep.load_data(path)
log.info("text: {}".format(text))
self.assertEqual(text, "hello foo bar!")
def test_tokenize(self):
text = "hello Bob!"
_, tokens, _, _ = prep.tokenize(text)
self.assertEqual(len(tokens), len(text))
self.assertEqual(tokens[2], tokens[3])
self.assertEqual(tokens[4], tokens[7])
def test_tokenized_mapping(self):
text = "hello Bob!"
_, _, int2char, char2int = prep.tokenize(text)
h_int = char2int['h']
one_char = int2char[1]
self.assertEqual(int2char[h_int], 'h')
self.assertEqual(char2int[one_char], 1)
def test_one_hot(self):
test_seq = np.array([[3, 5, 1]])
one_hot = prep.one_hot_encode(test_seq, 8)
self.assertTrue(np.array_equal(one_hot, np.array(
[[[0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0.],
[0., 1., 0., 0., 0., 0., 0., 0.]]]
)))
def test_get_batches(self):
encoded = np.array([1., 2., 3., 1., 2., 3., 1., 2., 2., 3., 4., 5., 2., 3., 4., 5.,
3., 4., 5., 6., 7., 3., 4., 5., 4., 5., 6., 7., 8., 9., 0., 1.])
batches = prep.get_batches(encoded, 2, 4)
x, y = next(batches)
self.assertTrue(np.array_equal(
np.array([[1., 2., 3., 1.],
[3., 4., 5., 6.]]),
x
))
self.assertTrue(np.array_equal(
np.array([[2., 3., 1., 2.],
[4., 5., 6., 7.]]),
y
))
x, y = next(batches)
self.assertTrue(np.array_equal(
np.array([[2., 3., 1., 2.],
[7., 3., 4., 5.]]),
x
))
self.assertTrue(np.array_equal(
np.array([[3., 1., 2., 2.],
[3., 4., 5., 4.]]),
y
))
| [
"lstm.preprocessing.get_batches",
"mock.mock_open",
"lstm.preprocessing.load_data",
"lstm.preprocessing.one_hot_encode",
"numpy.array",
"lstm.preprocessing.tokenize"
] | [((318, 338), 'lstm.preprocessing.load_data', 'prep.load_data', (['path'], {}), '(path)\n', (332, 338), True, 'from lstm import preprocessing as prep\n'), ((207, 244), 'mock.mock_open', 'mock_open', ([], {'read_data': '"""hello foo bar!"""'}), "(read_data='hello foo bar!')\n", (216, 244), False, 'from mock import patch, mock_open\n'), ((514, 533), 'lstm.preprocessing.tokenize', 'prep.tokenize', (['text'], {}), '(text)\n', (527, 533), True, 'from lstm import preprocessing as prep\n'), ((779, 798), 'lstm.preprocessing.tokenize', 'prep.tokenize', (['text'], {}), '(text)\n', (792, 798), True, 'from lstm import preprocessing as prep\n'), ((1003, 1024), 'numpy.array', 'np.array', (['[[3, 5, 1]]'], {}), '([[3, 5, 1]])\n', (1011, 1024), True, 'import numpy as np\n'), ((1043, 1075), 'lstm.preprocessing.one_hot_encode', 'prep.one_hot_encode', (['test_seq', '(8)'], {}), '(test_seq, 8)\n', (1062, 1075), True, 'from lstm import preprocessing as prep\n'), ((1342, 1522), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 2.0, 3.0, 4.0, 5.0, 2.0, 3.0, 4.0,\n 5.0, 3.0, 4.0, 5.0, 6.0, 7.0, 3.0, 4.0, 5.0, 4.0, 5.0, 6.0, 7.0, 8.0, \n 9.0, 0.0, 1.0]'], {}), '([1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 2.0, 3.0, 4.0, 5.0, 2.0, \n 3.0, 4.0, 5.0, 3.0, 4.0, 5.0, 6.0, 7.0, 3.0, 4.0, 5.0, 4.0, 5.0, 6.0, \n 7.0, 8.0, 9.0, 0.0, 1.0])\n', (1350, 1522), True, 'import numpy as np\n'), ((1527, 1558), 'lstm.preprocessing.get_batches', 'prep.get_batches', (['encoded', '(2)', '(4)'], {}), '(encoded, 2, 4)\n', (1543, 1558), True, 'from lstm import preprocessing as prep\n'), ((1124, 1267), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, \n 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]]'], {}), '([[[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, \n 0.0, 1.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]])\n', (1132, 1267), True, 'import numpy as np\n'), ((1640, 1694), 'numpy.array', 'np.array', (['[[1.0, 2.0, 3.0, 1.0], [3.0, 4.0, 5.0, 6.0]]'], {}), '([[1.0, 2.0, 3.0, 1.0], [3.0, 4.0, 5.0, 6.0]])\n', (1648, 1694), True, 'import numpy as np\n'), ((1787, 1841), 'numpy.array', 'np.array', (['[[2.0, 3.0, 1.0, 2.0], [4.0, 5.0, 6.0, 7.0]]'], {}), '([[2.0, 3.0, 1.0, 2.0], [4.0, 5.0, 6.0, 7.0]])\n', (1795, 1841), True, 'import numpy as np\n'), ((1964, 2018), 'numpy.array', 'np.array', (['[[2.0, 3.0, 1.0, 2.0], [7.0, 3.0, 4.0, 5.0]]'], {}), '([[2.0, 3.0, 1.0, 2.0], [7.0, 3.0, 4.0, 5.0]])\n', (1972, 2018), True, 'import numpy as np\n'), ((2111, 2165), 'numpy.array', 'np.array', (['[[3.0, 1.0, 2.0, 2.0], [3.0, 4.0, 5.0, 4.0]]'], {}), '([[3.0, 1.0, 2.0, 2.0], [3.0, 4.0, 5.0, 4.0]])\n', (2119, 2165), True, 'import numpy as np\n')] |
from http.server import HTTPServer, BaseHTTPRequestHandler
import cgi
from datetime import datetime
import hashlib
import json
import numpy as np
from biobert_ner.run_ner import BioBERT, FLAGS
from convert import pubtator2dict_list, pubtator_biocxml2dict_list, \
get_pub_annotation, get_pubtator
from normalize import Normalizer
from utils import filter_entities
import os
import random
import shutil
import string
import socket
import struct
import time
import tensorflow as tf
import threading
import urllib.parse as urlparse
# if hasattr(os, "fork"):
# from socketserver import ForkingMixIn
# else:
# from socketserver import ThreadingMixIn
from socketserver import ThreadingMixIn
class GetHandler(BaseHTTPRequestHandler):
stm_dict = None
normalizer = None
def do_GET(self):
get_start_t = time.time()
parsed_path = urlparse.urlparse(self.path)
cur_thread_name = threading.current_thread().getName()
message = '\n'.join([
'CLIENT VALUES:',
'client_address=%s (%s)' % (self.client_address,
self.address_string()),
'command=%s' % self.command,
'path=%s' % self.path,
'real path=%s' % parsed_path.path,
'query=%s' % parsed_path.query,
'request_version=%s' % self.request_version,
'',
'SERVER VALUES:',
'server_version=%s' % self.server_version,
'sys_version=%s' % self.sys_version,
'protocol_version=%s' % self.protocol_version,
'thread_name=%s' % cur_thread_name,
])
self.send_response(200)
self.end_headers()
elapsed_time_dict = dict()
time_format = self.stm_dict['time_format']
available_formats = self.stm_dict['available_formats']
if parsed_path.query is None:
err_msg = 'No url query'
print(datetime.now().strftime(time_format),
'[' + cur_thread_name + ']', err_msg)
message += '\n' + err_msg
self.wfile.write(message.encode('utf-8'))
return
indent = None
# print(datetime.now().strftime(time_format),
# 'query', parsed_path.query)
qs_dict = urlparse.parse_qs(parsed_path.query)
# print(datetime.now().strftime(time_format), 'qs_dict', qs_dict)
if 'pmid' not in qs_dict or len(qs_dict['pmid']) == 0:
err_msg = 'No pmid param'
print(datetime.now().strftime(time_format),
'[' + cur_thread_name + ']', err_msg)
message += '\n' + err_msg
self.wfile.write(message.encode('utf-8'))
return
pmid_list = qs_dict['pmid'][0].split(',')
# print(datetime.now().strftime(time_format), 'pmid', pmid_list)
if len(pmid_list) > self.stm_dict['n_pmid_limit']:
err_msg = 'Too many (> {}) pmids: {}'.format(
self.stm_dict['n_pmid_limit'], len(pmid_list))
print(datetime.now().strftime(time_format),
'[' + cur_thread_name + ']', err_msg)
message += '\n' + err_msg
self.wfile.write(message.encode('utf-8'))
return
out_format = available_formats[0]
if 'format' in qs_dict and len(qs_dict['format']) > 0:
if qs_dict['format'][0] in available_formats:
out_format = qs_dict['format'][0]
else:
print('Unavailable format', qs_dict['format'][0])
# print(datetime.now().strftime(time_format),
# 'pmid:', pmid_list, ', format:', out_format)
if 'indent' in qs_dict and len(qs_dict['indent']) > 0:
indent = qs_dict['indent'][0]
if 'true' == indent.lower():
indent = 4
else:
indent = None
text_hash = \
hashlib.sha224(qs_dict['pmid'][0].encode('utf-8')).hexdigest()
print(datetime.now().strftime(time_format),
'[{}] text_hash: {}'.format(cur_thread_name, text_hash))
# bern_output_path = './output/bern_api_{}.{}'.format(text_hash,
# out_format)
# # Re-use prev. outputs
# if os.path.exists(bern_output_path):
# with open(bern_output_path, 'r', encoding='utf-8') as f_out:
# if out_format == 'json':
# message = \
# json.dumps(json.load(f_out), indent=indent,
# sort_keys=indent is not None)
# elif out_format == 'pubtator':
# message = f_out.read()
# else:
# raise ValueError('Wrong format: {}'.format(out_format))
#
# self.wfile.write(message.encode('utf-8'))
# print(datetime.now().strftime(time_format),
# '[{}] Done. Found prev. output. Total {:.3f} sec\n'.
# format(cur_thread_name, time.time() - get_start_t))
# return
is_raw_text = False
tmtool_start_t = time.time()
dict_list = pubtator_biocxml2dict_list(pmid_list)
tmtool_time = time.time() - tmtool_start_t
elapsed_time_dict['tmtool'] = round(tmtool_time, 3)
if dict_list is None:
error_dict = self.get_err_dict()
error_dict['pmid'] = pmid_list[0] if len(pmid_list) == 1 else ''
error_dict['abstract'] = 'error: tmtool: no response'
print(datetime.now().strftime(time_format),
'[' + cur_thread_name + ']', error_dict['abstract'])
if out_format == available_formats[0]:
self.wfile.write(
json.dumps([get_pub_annotation(error_dict,
is_raw_text=is_raw_text)],
indent=indent,
sort_keys=indent is not None).encode('utf-8'))
elif out_format == available_formats[1]:
self.wfile.write(get_pubtator([error_dict]).encode('utf-8'))
return
elif type(dict_list) is str:
error_dict = self.get_err_dict()
error_dict['pmid'] = pmid_list[0] if len(pmid_list) == 1 else ''
if 'currently unavailable' in dict_list:
error_dict['abstract'] = 'error: tmtool: currently unavailable'
elif 'invalid version format' in dict_list:
error_dict['abstract'] = 'error: tmtool: invalid version format'
else:
error_dict['abstract'] = 'error: tmtool: {}'.format(
dict_list.replace('\n', ''))
print(datetime.now().strftime(time_format),
'[' + cur_thread_name + ']', error_dict['abstract'])
if out_format == available_formats[0]:
self.wfile.write(
json.dumps([get_pub_annotation(error_dict,
is_raw_text=is_raw_text)],
indent=indent,
sort_keys=indent is not None).encode('utf-8'))
elif out_format == available_formats[1]:
self.wfile.write(get_pubtator([error_dict]).encode('utf-8'))
return
print(datetime.now().strftime(time_format),
'[{}] tmTool: PubMed & GNormPlus & tmVar {:.3f} sec'
.format(cur_thread_name, tmtool_time))
# Run BioBERT NER models of Lee et al., 2019
ner_start_time = time.time()
tagged_docs, num_entities = \
self.biobert_recognize(dict_list, is_raw_text, cur_thread_name)
ner_time = time.time() - ner_start_time
elapsed_time_dict['ner'] = round(ner_time, 3)
if tagged_docs is None:
error_dict = self.get_err_dict()
error_dict['pmid'] = pmid_list[0] if len(pmid_list) == 1 else ''
error_dict['abstract'] = 'error: BioBERT NER, out of index range'
if out_format == available_formats[0]:
self.wfile.write(
json.dumps([get_pub_annotation(error_dict,
is_raw_text=is_raw_text)],
indent=indent,
sort_keys=indent is not None).encode('utf-8'))
elif out_format == available_formats[1]:
self.wfile.write(get_pubtator([error_dict]).encode('utf-8'))
return
print(datetime.now().strftime(time_format),
'[%s] NER %.3f sec, #entities: %d, #articles: %d'
% (cur_thread_name, ner_time, num_entities, len(tagged_docs)))
# Normalization models
normalization_time = 0.
if num_entities > 0:
# print(datetime.now().strftime(time_format),
# '[{}] Normalization models..'.format(cur_thread_name))
normalization_start_time = time.time()
tagged_docs = self.normalizer.normalize(text_hash, tagged_docs,
cur_thread_name,
is_raw_text=is_raw_text)
normalization_time = time.time() - normalization_start_time
elapsed_time_dict['normalization'] = round(normalization_time, 3)
# apply output format
if out_format == available_formats[0]:
elapsed_time_dict['total'] = round(time.time() - get_start_t, 3)
# PubAnnotation JSON
pubannotation_res = list()
for d in tagged_docs:
pubannotation_res.append(
get_pub_annotation(d, is_raw_text=is_raw_text,
elapsed_time_dict=elapsed_time_dict))
self.wfile.write(
json.dumps(pubannotation_res, indent=indent,
sort_keys=indent is not None).encode('utf-8'))
# # Save a BERN result
# with open(bern_output_path, 'w', encoding='utf-8') as f_out:
# json.dump(pubannotation_res, f_out)
elif out_format == available_formats[1]:
# PubTator
self.wfile.write(get_pubtator(tagged_docs).encode('utf-8'))
# # Save a BERN result
# with open(bern_output_path, 'w', encoding='utf-8') as f_out:
# f_out.write(pubtator_res)
print(datetime.now().strftime(time_format),
'[{}] Done. Total {:.3f} sec\n'.format(cur_thread_name,
time.time() - get_start_t))
return
def do_POST(self):
post_start_t = time.time()
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type'],
}
)
self.send_response(200)
self.end_headers()
cur_thread_name = threading.current_thread().getName()
time_format = self.stm_dict['time_format']
# input
if 'param' not in form:
err_msg = [{"error": "no param"}]
print(datetime.now().strftime(time_format),
'[' + cur_thread_name + ']', err_msg)
self.wfile.write(json.dumps(err_msg).encode('utf-8'))
return
data = json.loads(form['param'].value)
if 'text' not in data:
err_msg = [{"error": "no text"}]
print(datetime.now().strftime(time_format),
'[' + cur_thread_name + ']', err_msg)
self.wfile.write(json.dumps(err_msg).encode('utf-8'))
return
text = str(data['text'])
# print(datetime.now().strftime(time_format), 'Input:', text)
if text == '':
err_msg = [{"error": "empty text"}]
print(datetime.now().strftime(time_format),
'[' + cur_thread_name + ']', err_msg)
self.wfile.write(json.dumps(err_msg).encode('utf-8'))
return
if len(text.strip()) == 0:
err_msg = [{"error": "only whitespace letters"}]
print(datetime.now().strftime(time_format),
'[' + cur_thread_name + ']', err_msg)
self.wfile.write(json.dumps(err_msg).encode('utf-8'))
return
text = self.preprocess_input(text, cur_thread_name)
# NER
result_dict = self.tag_entities(
text, cur_thread_name, is_raw_text=True, reuse=False)
if result_dict is None:
err_msg = [{"error": "NER crash"}]
print(datetime.now().strftime(time_format),
'[' + cur_thread_name + ']', err_msg)
self.wfile.write(json.dumps(err_msg).encode('utf-8'))
return
result_dict_str = json.dumps(result_dict)
# # output, pretty json
# print(datetime.now().strftime(time_format), 'output:',
# json.dumps(result_dict, indent=4, sort_keys=True))
# send message
self.wfile.write(result_dict_str.encode('utf-8'))
print(datetime.now().strftime(self.stm_dict['time_format']),
'[{}] Done. total {:.3f} sec\n'
.format(cur_thread_name, time.time() - post_start_t))
return
def preprocess_input(self, text, cur_thread_name):
if '\n' in text:
print(datetime.now().strftime(self.stm_dict['time_format']),
'[{}] Found a line break -> replace w/ a space'
.format(cur_thread_name))
text = text.replace('\n', ' ')
if '\t' in text:
print(datetime.now().strftime(self.stm_dict['time_format']),
'[{}] Found a tab -> replace w/ a space'
.format(cur_thread_name))
text = text.replace('\t', ' ')
found_too_long_words = 0
tokens = text.split(' ')
for idx, tk in enumerate(tokens):
if len(tk) > self.stm_dict['max_word_len']:
tokens[idx] = tk[:self.stm_dict['max_word_len']]
found_too_long_words += 1
if found_too_long_words > 0:
print(datetime.now().strftime(self.stm_dict['time_format']),
'[{}] Found a too long word -> cut the suffix of the word'
.format(cur_thread_name))
text = ' '.join(tokens)
return text
def tag_entities(self, text, cur_thread_name, is_raw_text, reuse=False):
assert self.stm_dict is not None
get_start_t = time.time()
elapsed_time_dict = dict()
n_ascii_letters = 0
for l in text:
if l not in string.ascii_letters:
continue
n_ascii_letters += 1
if n_ascii_letters == 0:
text = 'No ascii letters. Please enter your text in English.'
text_hash = hashlib.sha224(text.encode('utf-8')).hexdigest()
print(datetime.now().strftime(self.stm_dict['time_format']),
'[{}] text_hash: {}'.format(cur_thread_name, text_hash))
bern_output_path = './output/bern_demo_{}.json'.format(text_hash)
if reuse and os.path.exists(bern_output_path):
print(datetime.now().strftime(self.stm_dict['time_format']),
'[{}] Found prev. output'.format(cur_thread_name))
with open(bern_output_path, 'r', encoding='utf-8') as f_out:
return json.load(f_out)
home_gnormplus = self.stm_dict['gnormplus_home']
input_gnormplus = os.path.join(home_gnormplus, 'input',
'{}.PubTator'.format(text_hash))
output_gnormplus = os.path.join(home_gnormplus, 'output',
'{}.PubTator'.format(text_hash))
home_tmvar2 = self.stm_dict['tmvar2_home']
input_dir_tmvar2 = os.path.join(home_tmvar2, 'input')
input_tmvar2 = os.path.join(input_dir_tmvar2,
'{}.PubTator'.format(text_hash))
output_tmvar2 = os.path.join(home_tmvar2, 'output',
'{}.PubTator.PubTator'.format(text_hash))
# Write input str to a .PubTator format file
with open(input_gnormplus, 'w', encoding='utf-8') as f:
# only title
f.write(text_hash + '|t|')
f.write('\n')
f.write(text_hash + '|a|' + text + '\n\n')
# Run GNormPlus
gnormplus_start_time = time.time()
tell_inputfile(self.stm_dict['gnormplus_host'],
self.stm_dict['gnormplus_port'],
'{}.PubTator'.format(text_hash))
gnormplus_time = time.time() - gnormplus_start_time
elapsed_time_dict['gnormplus'] = round(gnormplus_time, 3)
print(datetime.now().strftime(self.stm_dict['time_format']),
'[{}] GNormPlus {:.3f} sec'
.format(cur_thread_name, gnormplus_time))
# Move a GNormPlus output file to the tmVar2 input directory
shutil.move(output_gnormplus, input_tmvar2)
# Run tmVar 2.0
tmvar2_start_time = time.time()
tell_inputfile(self.stm_dict['tmvar2_host'],
self.stm_dict['tmvar2_port'],
'{}.PubTator'.format(text_hash))
tmvar2_time = time.time() - tmvar2_start_time
elapsed_time_dict['tmvar2'] = round(tmvar2_time, 3)
print(datetime.now().strftime(self.stm_dict['time_format']),
'[{}] tmVar 2.0 {:.3f} sec'
.format(cur_thread_name, tmvar2_time))
# Convert tmVar 2.0 outputs (?.PubTator.PubTator) to python dict
dict_list = pubtator2dict_list(output_tmvar2, is_raw_text=True)
# Delete temp files
os.remove(input_gnormplus)
os.remove(input_tmvar2)
os.remove(output_tmvar2)
# error
if type(dict_list) is str:
print(dict_list)
return None
# Run BioBERT of Lee et al., 2019
ner_start_time = time.time()
tagged_docs, num_entities = \
self.biobert_recognize(dict_list, is_raw_text, cur_thread_name)
if tagged_docs is None:
return None
assert len(tagged_docs) == 1
ner_time = time.time() - ner_start_time
elapsed_time_dict['ner'] = round(ner_time, 3)
print(datetime.now().strftime(self.stm_dict['time_format']),
'[%s] NER %.3f sec, #entities: %d' %
(cur_thread_name, ner_time, num_entities))
# Normalization models
normalization_time = 0.
if num_entities > 0:
normalization_start_time = time.time()
# print(datetime.now().strftime(time_format),
# '[{}] Normalization models..'.format(cur_thread_name))
tagged_docs = self.normalizer.normalize(text_hash, tagged_docs,
cur_thread_name,
is_raw_text=is_raw_text)
normalization_time = time.time() - normalization_start_time
elapsed_time_dict['normalization'] = round(normalization_time, 3)
# Convert to PubAnnotation JSON
elapsed_time_dict['total'] = round(time.time() - get_start_t, 3)
tagged_docs[0] = get_pub_annotation(tagged_docs[0],
is_raw_text=is_raw_text,
elapsed_time_dict=elapsed_time_dict)
# Save a BERN result
with open(bern_output_path, 'w', encoding='utf-8') as f_out:
json.dump(tagged_docs[0], f_out, sort_keys=True)
return tagged_docs[0]
def biobert_recognize(self, dict_list, is_raw_text, cur_thread_name):
res = self.stm_dict['biobert'].recognize(dict_list,
is_raw_text=is_raw_text,
thread_id=cur_thread_name)
if res is None:
return None, 0
num_filtered_species_per_doc = filter_entities(res, is_raw_text)
for n_f_spcs in num_filtered_species_per_doc:
if n_f_spcs[1] > 0:
print(datetime.now().strftime(self.stm_dict['time_format']),
'[{}] Filtered {} species{}'
.format(cur_thread_name, n_f_spcs[1],
'' if is_raw_text
else ' in PMID:%s' % n_f_spcs[0]))
num_entities = count_entities(res)
return res, num_entities
@staticmethod
def get_err_dict():
return {
'pmid': '',
'title': '',
'abstract': '',
'entities': {
'mutation': [],
'drug': [],
'gene': [],
'disease': [],
'species': [],
}
}
# https://docs.python.org/3.6/library/socketserver.html#asynchronous-mixins
# https://stackoverflow.com/a/14089457
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
request_queue_size = 4 # to match our server's memory size (default: 5)
def count_entities(data):
num_entities = 0
for d in data:
if 'entities' not in d:
continue
doc_ett = d['entities']
num_entities += len(doc_ett['gene'])
num_entities += len(doc_ett['disease'])
num_entities += len(doc_ett['drug'])
num_entities += len(doc_ett['species'])
if 'mutation' in doc_ett:
num_entities += len(doc_ett['mutation'])
return num_entities
def tell_inputfile(host, port, inputfile):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
sock.connect((host, port))
except ConnectionRefusedError as cre:
print(cre)
from utils import send_mail
from service_checker import FROM_GMAIL_ADDR, FROM_GMAIL_ACCOUNT_PASSWORD, \
TO_EMAIL_ADDR
send_mail(FROM_GMAIL_ADDR, TO_EMAIL_ADDR,
'[BERN] Error: Connection refused',
'inputfile: ' + inputfile,
FROM_GMAIL_ADDR, FROM_GMAIL_ACCOUNT_PASSWORD)
input_str = inputfile
input_stream = struct.pack('>H', len(input_str)) + input_str.encode('utf-8')
sock.send(input_stream)
output_stream = sock.recv(512)
resp = output_stream.decode('utf-8')[2:]
sock.close()
return resp
def delete_files(dirname):
if not os.path.exists(dirname):
return
for f in os.listdir(dirname):
f_path = os.path.join(dirname, f)
if not os.path.isfile(f_path):
continue
print('Delete', f_path)
os.remove(f_path)
class Main:
def __init__(self, params):
print(datetime.now().strftime(params.time_format), 'Starting..')
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # verbose off(info, warning)
random.seed(params.seed)
np.random.seed(params.seed)
tf.set_random_seed(params.seed)
print("A GPU is{} available".format(
"" if tf.test.is_gpu_available() else " NOT"))
stm_dict = dict()
stm_dict['params'] = params
FLAGS.model_dir = './biobert_ner/pretrainedBERT/'
FLAGS.bert_config_file = './biobert_ner/conf/bert_config.json'
FLAGS.vocab_file = './biobert_ner/conf/vocab.txt'
FLAGS.init_checkpoint = \
'./biobert_ner/pretrainedBERT/pubmed_pmc_470k/biobert_model.ckpt'
FLAGS.ip = params.ip
FLAGS.port = params.port
FLAGS.gnormplus_home = params.gnormplus_home
FLAGS.gnormplus_host = params.gnormplus_host
FLAGS.gnormplus_port = params.gnormplus_port
FLAGS.tmvar2_home = params.tmvar2_home
FLAGS.tmvar2_host = params.tmvar2_host
FLAGS.tmvar2_port = params.tmvar2_port
# import pprint
# pprint.PrettyPrinter().pprint(FLAGS.__flags)
stm_dict['biobert'] = BioBERT(FLAGS)
stm_dict['gnormplus_home'] = params.gnormplus_home
stm_dict['gnormplus_host'] = params.gnormplus_host
stm_dict['gnormplus_port'] = params.gnormplus_port
stm_dict['tmvar2_home'] = params.tmvar2_home
stm_dict['tmvar2_host'] = params.tmvar2_host
stm_dict['tmvar2_port'] = params.tmvar2_port
stm_dict['max_word_len'] = params.max_word_len
stm_dict['ner_model'] = params.ner_model
stm_dict['n_pmid_limit'] = params.n_pmid_limit
stm_dict['time_format'] = params.time_format
stm_dict['available_formats'] = params.available_formats
if not os.path.exists('./output'):
os.mkdir('output')
else:
# delete prev. version outputs
delete_files('./output')
delete_files(os.path.join(params.gnormplus_home, 'input'))
delete_files(os.path.join(params.tmvar2_home, 'input'))
print(datetime.now().strftime(params.time_format),
'Starting server at http://{}:{}'.format(params.ip, params.port))
# https://stackoverflow.com/a/18445168
GetHandler.stm_dict = stm_dict
GetHandler.normalizer = Normalizer()
# https://docs.python.org/3.6/library/socketserver.html#asynchronous-mixins
# https://stackoverflow.com/a/14089457
server = ThreadedHTTPServer((params.ip, params.port), GetHandler)
server.serve_forever()
if __name__ == '__main__':
import argparse
argparser = argparse.ArgumentParser()
argparser.add_argument('--ip', default='0.0.0.0')
argparser.add_argument('--port', type=int, default=8888)
argparser.add_argument('--ner_model', default='BioBERT')
argparser.add_argument('--max_word_len', type=int, help='word max chars',
default=50)
argparser.add_argument('--seed', type=int, help='seed value', default=2019)
argparser.add_argument('--gnormplus_home',
help='GNormPlus home',
default=os.path.join(os.path.expanduser('~'),
'bern', 'GNormPlusJava'))
argparser.add_argument('--gnormplus_host',
help='GNormPlus host', default='localhost')
argparser.add_argument('--gnormplus_port', type=int,
help='GNormPlus port', default=18895)
argparser.add_argument('--tmvar2_home',
help='tmVar 2.0 home',
default=os.path.join(os.path.expanduser('~'),
'bern', 'tmVarJava'))
argparser.add_argument('--tmvar2_host',
help='tmVar 2.0 host', default='localhost')
argparser.add_argument('--tmvar2_port', type=int,
help='tmVar 2.0 port', default=18896)
argparser.add_argument('--n_pmid_limit', type=int,
help='max # of pmids', default=10)
argparser.add_argument('--available_formats', type=list,
help='output formats', default=['json', 'pubtator'])
argparser.add_argument('--time_format',
help='time format', default='[%d/%b/%Y %H:%M:%S.%f]')
args = argparser.parse_args()
Main(args)
| [
"biobert_ner.run_ner.BioBERT",
"os.remove",
"os.mkdir",
"numpy.random.seed",
"argparse.ArgumentParser",
"socket.socket",
"json.dumps",
"convert.get_pub_annotation",
"os.path.isfile",
"os.path.join",
"urllib.parse.urlparse",
"json.loads",
"utils.filter_entities",
"normalize.Normalizer",
"... | [((21802, 21851), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (21815, 21851), False, 'import socket\n'), ((22724, 22743), 'os.listdir', 'os.listdir', (['dirname'], {}), '(dirname)\n', (22734, 22743), False, 'import os\n'), ((25665, 25690), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (25688, 25690), False, 'import argparse\n'), ((828, 839), 'time.time', 'time.time', ([], {}), '()\n', (837, 839), False, 'import time\n'), ((862, 890), 'urllib.parse.urlparse', 'urlparse.urlparse', (['self.path'], {}), '(self.path)\n', (879, 890), True, 'import urllib.parse as urlparse\n'), ((2282, 2318), 'urllib.parse.parse_qs', 'urlparse.parse_qs', (['parsed_path.query'], {}), '(parsed_path.query)\n', (2299, 2318), True, 'import urllib.parse as urlparse\n'), ((5162, 5173), 'time.time', 'time.time', ([], {}), '()\n', (5171, 5173), False, 'import time\n'), ((5194, 5231), 'convert.pubtator_biocxml2dict_list', 'pubtator_biocxml2dict_list', (['pmid_list'], {}), '(pmid_list)\n', (5220, 5231), False, 'from convert import pubtator2dict_list, pubtator_biocxml2dict_list, get_pub_annotation, get_pubtator\n'), ((7634, 7645), 'time.time', 'time.time', ([], {}), '()\n', (7643, 7645), False, 'import time\n'), ((10778, 10789), 'time.time', 'time.time', ([], {}), '()\n', (10787, 10789), False, 'import time\n'), ((10805, 10945), 'cgi.FieldStorage', 'cgi.FieldStorage', ([], {'fp': 'self.rfile', 'headers': 'self.headers', 'environ': "{'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': self.headers['Content-Type']}"}), "(fp=self.rfile, headers=self.headers, environ={\n 'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': self.headers['Content-Type']})\n", (10821, 10945), False, 'import cgi\n'), ((11515, 11546), 'json.loads', 'json.loads', (["form['param'].value"], {}), "(form['param'].value)\n", (11525, 11546), False, 'import json\n'), ((12975, 12998), 'json.dumps', 'json.dumps', (['result_dict'], {}), '(result_dict)\n', (12985, 12998), False, 'import json\n'), ((14697, 14708), 'time.time', 'time.time', ([], {}), '()\n', (14706, 14708), False, 'import time\n'), ((16015, 16049), 'os.path.join', 'os.path.join', (['home_tmvar2', '"""input"""'], {}), "(home_tmvar2, 'input')\n", (16027, 16049), False, 'import os\n'), ((16631, 16642), 'time.time', 'time.time', ([], {}), '()\n', (16640, 16642), False, 'import time\n'), ((17182, 17225), 'shutil.move', 'shutil.move', (['output_gnormplus', 'input_tmvar2'], {}), '(output_gnormplus, input_tmvar2)\n', (17193, 17225), False, 'import shutil\n'), ((17279, 17290), 'time.time', 'time.time', ([], {}), '()\n', (17288, 17290), False, 'import time\n'), ((17825, 17876), 'convert.pubtator2dict_list', 'pubtator2dict_list', (['output_tmvar2'], {'is_raw_text': '(True)'}), '(output_tmvar2, is_raw_text=True)\n', (17843, 17876), False, 'from convert import pubtator2dict_list, pubtator_biocxml2dict_list, get_pub_annotation, get_pubtator\n'), ((17914, 17940), 'os.remove', 'os.remove', (['input_gnormplus'], {}), '(input_gnormplus)\n', (17923, 17940), False, 'import os\n'), ((17949, 17972), 'os.remove', 'os.remove', (['input_tmvar2'], {}), '(input_tmvar2)\n', (17958, 17972), False, 'import os\n'), ((17981, 18005), 'os.remove', 'os.remove', (['output_tmvar2'], {}), '(output_tmvar2)\n', (17990, 18005), False, 'import os\n'), ((18179, 18190), 'time.time', 'time.time', ([], {}), '()\n', (18188, 18190), False, 'import time\n'), ((19462, 19562), 'convert.get_pub_annotation', 'get_pub_annotation', (['tagged_docs[0]'], {'is_raw_text': 'is_raw_text', 'elapsed_time_dict': 'elapsed_time_dict'}), '(tagged_docs[0], is_raw_text=is_raw_text,\n elapsed_time_dict=elapsed_time_dict)\n', (19480, 19562), False, 'from convert import pubtator2dict_list, pubtator_biocxml2dict_list, get_pub_annotation, get_pubtator\n'), ((20214, 20247), 'utils.filter_entities', 'filter_entities', (['res', 'is_raw_text'], {}), '(res, is_raw_text)\n', (20229, 20247), False, 'from utils import filter_entities\n'), ((22670, 22693), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (22684, 22693), False, 'import os\n'), ((22762, 22786), 'os.path.join', 'os.path.join', (['dirname', 'f'], {}), '(dirname, f)\n', (22774, 22786), False, 'import os\n'), ((22887, 22904), 'os.remove', 'os.remove', (['f_path'], {}), '(f_path)\n', (22896, 22904), False, 'import os\n'), ((23113, 23137), 'random.seed', 'random.seed', (['params.seed'], {}), '(params.seed)\n', (23124, 23137), False, 'import random\n'), ((23146, 23173), 'numpy.random.seed', 'np.random.seed', (['params.seed'], {}), '(params.seed)\n', (23160, 23173), True, 'import numpy as np\n'), ((23182, 23213), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['params.seed'], {}), '(params.seed)\n', (23200, 23213), True, 'import tensorflow as tf\n'), ((24158, 24172), 'biobert_ner.run_ner.BioBERT', 'BioBERT', (['FLAGS'], {}), '(FLAGS)\n', (24165, 24172), False, 'from biobert_ner.run_ner import BioBERT, FLAGS\n'), ((25349, 25361), 'normalize.Normalizer', 'Normalizer', ([], {}), '()\n', (25359, 25361), False, 'from normalize import Normalizer\n'), ((5254, 5265), 'time.time', 'time.time', ([], {}), '()\n', (5263, 5265), False, 'import time\n'), ((7779, 7790), 'time.time', 'time.time', ([], {}), '()\n', (7788, 7790), False, 'import time\n'), ((9053, 9064), 'time.time', 'time.time', ([], {}), '()\n', (9062, 9064), False, 'import time\n'), ((15314, 15346), 'os.path.exists', 'os.path.exists', (['bern_output_path'], {}), '(bern_output_path)\n', (15328, 15346), False, 'import os\n'), ((16836, 16847), 'time.time', 'time.time', ([], {}), '()\n', (16845, 16847), False, 'import time\n'), ((17475, 17486), 'time.time', 'time.time', ([], {}), '()\n', (17484, 17486), False, 'import time\n'), ((18418, 18429), 'time.time', 'time.time', ([], {}), '()\n', (18427, 18429), False, 'import time\n'), ((18810, 18821), 'time.time', 'time.time', ([], {}), '()\n', (18819, 18821), False, 'import time\n'), ((19758, 19806), 'json.dump', 'json.dump', (['tagged_docs[0]', 'f_out'], {'sort_keys': '(True)'}), '(tagged_docs[0], f_out, sort_keys=True)\n', (19767, 19806), False, 'import json\n'), ((22174, 22332), 'utils.send_mail', 'send_mail', (['FROM_GMAIL_ADDR', 'TO_EMAIL_ADDR', '"""[BERN] Error: Connection refused"""', "('inputfile: ' + inputfile)", 'FROM_GMAIL_ADDR', 'FROM_GMAIL_ACCOUNT_PASSWORD'], {}), "(FROM_GMAIL_ADDR, TO_EMAIL_ADDR,\n '[BERN] Error: Connection refused', 'inputfile: ' + inputfile,\n FROM_GMAIL_ADDR, FROM_GMAIL_ACCOUNT_PASSWORD)\n", (22183, 22332), False, 'from utils import send_mail\n'), ((22802, 22824), 'os.path.isfile', 'os.path.isfile', (['f_path'], {}), '(f_path)\n', (22816, 22824), False, 'import os\n'), ((24805, 24831), 'os.path.exists', 'os.path.exists', (['"""./output"""'], {}), "('./output')\n", (24819, 24831), False, 'import os\n'), ((24845, 24863), 'os.mkdir', 'os.mkdir', (['"""output"""'], {}), "('output')\n", (24853, 24863), False, 'import os\n'), ((24980, 25024), 'os.path.join', 'os.path.join', (['params.gnormplus_home', '"""input"""'], {}), "(params.gnormplus_home, 'input')\n", (24992, 25024), False, 'import os\n'), ((25047, 25088), 'os.path.join', 'os.path.join', (['params.tmvar2_home', '"""input"""'], {}), "(params.tmvar2_home, 'input')\n", (25059, 25088), False, 'import os\n'), ((917, 943), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (941, 943), False, 'import threading\n'), ((9320, 9331), 'time.time', 'time.time', ([], {}), '()\n', (9329, 9331), False, 'import time\n'), ((11118, 11144), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (11142, 11144), False, 'import threading\n'), ((15586, 15602), 'json.load', 'json.load', (['f_out'], {}), '(f_out)\n', (15595, 15602), False, 'import json\n'), ((19210, 19221), 'time.time', 'time.time', ([], {}), '()\n', (19219, 19221), False, 'import time\n'), ((19407, 19418), 'time.time', 'time.time', ([], {}), '()\n', (19416, 19418), False, 'import time\n'), ((26210, 26233), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (26228, 26233), False, 'import os\n'), ((26691, 26714), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (26709, 26714), False, 'import os\n'), ((3994, 4008), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4006, 4008), False, 'from datetime import datetime\n'), ((7397, 7411), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7409, 7411), False, 'from datetime import datetime\n'), ((8609, 8623), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8621, 8623), False, 'from datetime import datetime\n'), ((9559, 9570), 'time.time', 'time.time', ([], {}), '()\n', (9568, 9570), False, 'import time\n'), ((9758, 9846), 'convert.get_pub_annotation', 'get_pub_annotation', (['d'], {'is_raw_text': 'is_raw_text', 'elapsed_time_dict': 'elapsed_time_dict'}), '(d, is_raw_text=is_raw_text, elapsed_time_dict=\n elapsed_time_dict)\n', (9776, 9846), False, 'from convert import pubtator2dict_list, pubtator_biocxml2dict_list, get_pub_annotation, get_pubtator\n'), ((10527, 10541), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10539, 10541), False, 'from datetime import datetime\n'), ((10688, 10699), 'time.time', 'time.time', ([], {}), '()\n', (10697, 10699), False, 'import time\n'), ((13260, 13274), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13272, 13274), False, 'from datetime import datetime\n'), ((13400, 13411), 'time.time', 'time.time', ([], {}), '()\n', (13409, 13411), False, 'import time\n'), ((15091, 15105), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15103, 15105), False, 'from datetime import datetime\n'), ((16951, 16965), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16963, 16965), False, 'from datetime import datetime\n'), ((17581, 17595), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (17593, 17595), False, 'from datetime import datetime\n'), ((18515, 18529), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (18527, 18529), False, 'from datetime import datetime\n'), ((22965, 22979), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (22977, 22979), False, 'from datetime import datetime\n'), ((23278, 23304), 'tensorflow.test.is_gpu_available', 'tf.test.is_gpu_available', ([], {}), '()\n', (23302, 23304), True, 'import tensorflow as tf\n'), ((25105, 25119), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (25117, 25119), False, 'from datetime import datetime\n'), ((1936, 1950), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1948, 1950), False, 'from datetime import datetime\n'), ((2513, 2527), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2525, 2527), False, 'from datetime import datetime\n'), ((3041, 3055), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3053, 3055), False, 'from datetime import datetime\n'), ((5579, 5593), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5591, 5593), False, 'from datetime import datetime\n'), ((9928, 10002), 'json.dumps', 'json.dumps', (['pubannotation_res'], {'indent': 'indent', 'sort_keys': '(indent is not None)'}), '(pubannotation_res, indent=indent, sort_keys=indent is not None)\n', (9938, 10002), False, 'import json\n'), ((11320, 11334), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11332, 11334), False, 'from datetime import datetime\n'), ((11443, 11462), 'json.dumps', 'json.dumps', (['err_msg'], {}), '(err_msg)\n', (11453, 11462), False, 'import json\n'), ((11642, 11656), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11654, 11656), False, 'from datetime import datetime\n'), ((11765, 11784), 'json.dumps', 'json.dumps', (['err_msg'], {}), '(err_msg)\n', (11775, 11784), False, 'import json\n'), ((12015, 12029), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (12027, 12029), False, 'from datetime import datetime\n'), ((12138, 12157), 'json.dumps', 'json.dumps', (['err_msg'], {}), '(err_msg)\n', (12148, 12157), False, 'import json\n'), ((12309, 12323), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (12321, 12323), False, 'from datetime import datetime\n'), ((12432, 12451), 'json.dumps', 'json.dumps', (['err_msg'], {}), '(err_msg)\n', (12442, 12451), False, 'import json\n'), ((12769, 12783), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (12781, 12783), False, 'from datetime import datetime\n'), ((12892, 12911), 'json.dumps', 'json.dumps', (['err_msg'], {}), '(err_msg)\n', (12902, 12911), False, 'import json\n'), ((13543, 13557), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13555, 13557), False, 'from datetime import datetime\n'), ((13795, 13809), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13807, 13809), False, 'from datetime import datetime\n'), ((14323, 14337), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14335, 14337), False, 'from datetime import datetime\n'), ((15366, 15380), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15378, 15380), False, 'from datetime import datetime\n'), ((6772, 6786), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6784, 6786), False, 'from datetime import datetime\n'), ((10314, 10339), 'convert.get_pubtator', 'get_pubtator', (['tagged_docs'], {}), '(tagged_docs)\n', (10326, 10339), False, 'from convert import pubtator2dict_list, pubtator_biocxml2dict_list, get_pub_annotation, get_pubtator\n'), ((20356, 20370), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (20368, 20370), False, 'from datetime import datetime\n'), ((6125, 6151), 'convert.get_pubtator', 'get_pubtator', (['[error_dict]'], {}), '([error_dict])\n', (6137, 6151), False, 'from convert import pubtator2dict_list, pubtator_biocxml2dict_list, get_pub_annotation, get_pubtator\n'), ((8531, 8557), 'convert.get_pubtator', 'get_pubtator', (['[error_dict]'], {}), '([error_dict])\n', (8543, 8557), False, 'from convert import pubtator2dict_list, pubtator_biocxml2dict_list, get_pub_annotation, get_pubtator\n'), ((5806, 5861), 'convert.get_pub_annotation', 'get_pub_annotation', (['error_dict'], {'is_raw_text': 'is_raw_text'}), '(error_dict, is_raw_text=is_raw_text)\n', (5824, 5861), False, 'from convert import pubtator2dict_list, pubtator_biocxml2dict_list, get_pub_annotation, get_pubtator\n'), ((7318, 7344), 'convert.get_pubtator', 'get_pubtator', (['[error_dict]'], {}), '([error_dict])\n', (7330, 7344), False, 'from convert import pubtator2dict_list, pubtator_biocxml2dict_list, get_pub_annotation, get_pubtator\n'), ((8212, 8267), 'convert.get_pub_annotation', 'get_pub_annotation', (['error_dict'], {'is_raw_text': 'is_raw_text'}), '(error_dict, is_raw_text=is_raw_text)\n', (8230, 8267), False, 'from convert import pubtator2dict_list, pubtator_biocxml2dict_list, get_pub_annotation, get_pubtator\n'), ((6999, 7054), 'convert.get_pub_annotation', 'get_pub_annotation', (['error_dict'], {'is_raw_text': 'is_raw_text'}), '(error_dict, is_raw_text=is_raw_text)\n', (7017, 7054), False, 'from convert import pubtator2dict_list, pubtator_biocxml2dict_list, get_pub_annotation, get_pubtator\n')] |
# BSD 2-CLAUSE LICENSE
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# #ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# original author: <NAME>
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
import numpy as np
import modin.pandas as pd
from pmdarima.arima import AutoARIMA
from sklearn.metrics import mean_squared_error
from greykite.common.constants import PREDICTED_COL
from greykite.common.constants import PREDICTED_LOWER_COL
from greykite.common.constants import PREDICTED_UPPER_COL
from greykite.common.constants import TIME_COL
from greykite.common.constants import VALUE_COL
from greykite.sklearn.estimator.base_forecast_estimator import BaseForecastEstimator
class AutoArimaEstimator(BaseForecastEstimator):
"""Wrapper for ``pmdarima.arima.AutoARIMA``.
It currently does not handle the regressor issue when there is
gap between train and predict periods.
Parameters
----------
score_func : callable
see ``BaseForecastEstimator``.
coverage : float between [0.0, 1.0]
see ``BaseForecastEstimator``.
null_model_params : dict with arguments to define DummyRegressor null model, optional, default=None
see ``BaseForecastEstimator``.
regressor_cols: `list` [`str`], optional, default None
A list of regressor columns used during training and prediction.
If None, no regressor columns are used.
See ``AutoArima`` documentation for rest of the parameter descriptions:
* https://alkaline-ml.com/pmdarima/modules/generated/pmdarima.arima.AutoARIMA.html#pmdarima.arima.AutoARIMA
Attributes
----------
model : ``AutoArima`` object
Auto arima model object
fit_df : `pandas.DataFrame` or None
The training data used to fit the model.
forecast : `pandas.DataFrame`
Output of the predict method of ``AutoArima``.
"""
def __init__(
self,
# Null model parameters
score_func: callable = mean_squared_error,
coverage: float = 0.90,
null_model_params: Optional[Dict] = None,
# Additional parameters
regressor_cols: Optional[List[str]] = None,
freq: Optional[float] = None,
# pmdarima fit parameters
start_p: Optional[int] = 2,
d: Optional[int] = None,
start_q: Optional[int] = 2,
max_p: Optional[int] = 5,
max_d: Optional[int] = 2,
max_q: Optional[int] = 5,
start_P: Optional[int] = 1,
D: Optional[int] = None,
start_Q: Optional[int] = 1,
max_P: Optional[int] = 2,
max_D: Optional[int] = 1,
max_Q: Optional[int] = 2,
max_order: Optional[int] = 5,
m: Optional[int] = 1,
seasonal: Optional[bool] = True,
stationary: Optional[bool] = False,
information_criterion: Optional[str] = 'aic',
alpha: Optional[int] = 0.05,
test: Optional[str] = 'kpss',
seasonal_test: Optional[str] = 'ocsb',
stepwise: Optional[bool] = True,
n_jobs: Optional[int] = 1,
start_params: Optional[Dict] = None,
trend: Optional[str] = None,
method: Optional[str] = 'lbfgs',
maxiter: Optional[int] = 50,
offset_test_args: Optional[Dict] = None,
seasonal_test_args: Optional[Dict] = None,
suppress_warnings: Optional[bool] = True,
error_action: Optional[str] = 'trace',
trace: Optional[Union[int, bool]] = False,
random: Optional[bool] = False,
random_state: Optional[Union[int, callable]] = None,
n_fits: Optional[int] = 10,
out_of_sample_size: Optional[int] = 0,
scoring: Optional[str] = 'mse',
scoring_args: Optional[Dict] = None,
with_intercept: Optional[Union[bool, str]] = "auto",
# pmdarima predict parameters
return_conf_int: Optional[bool] = True,
dynamic: Optional[bool] = False):
# Every subclass of BaseForecastEstimator must call super().__init__
super().__init__(
score_func=score_func,
coverage=coverage,
null_model_params=null_model_params)
self.regressor_cols = regressor_cols
self.freq = freq
self.start_p = start_p
self.d = d
self.start_q = start_q
self.max_p = max_p
self.max_d = max_d
self.max_q = max_q
self.start_P = start_P
self.D = D
self.start_Q = start_Q
self.max_P = max_P
self.max_D = max_D
self.max_Q = max_Q
self.max_order = max_order
self.m = m
self.seasonal = seasonal
self.stationary = stationary
self.information_criterion = information_criterion
self.alpha = alpha
self.test = test
self.seasonal_test = seasonal_test
self.stepwise = stepwise
self.n_jobs = n_jobs
self.start_params = start_params
self.trend = trend
self.method = method
self.maxiter = maxiter
self.offset_test_args = offset_test_args
self.seasonal_test_args = seasonal_test_args
self.suppress_warnings = suppress_warnings
self.error_action = error_action
self.trace = trace
self.random = random
self.random_state = random_state
self.n_fits = n_fits
self.out_of_sample_size = out_of_sample_size
self.scoring = scoring
self.scoring_args = scoring_args
self.with_intercept = with_intercept
self.return_conf_int = return_conf_int
self.coverage = coverage
self.dynamic = dynamic
# set by the fit method
self.model = None
self.fit_df = None
# set by the predict method
self.forecast = None
def fit(self, X, y=None, time_col=TIME_COL, value_col=VALUE_COL, **fit_params):
"""Fits ``ARIMA`` forecast model.
Parameters
----------
X : `pandas.DataFrame`
Input timeseries, with timestamp column,
value column, and any additional regressors.
The value column is the response, included in
X to allow transformation by `sklearn.pipeline.Pipeline`
y : ignored
The original timeseries values, ignored.
(The y for fitting is included in ``X``.)
time_col : `str`
Time column name in ``X``
value_col : `str`
Value column name in ``X``
fit_params : `dict`
additional parameters for null model
Returns
-------
self : self
Fitted model is stored in ``self.model``.
"""
X = X.sort_values(by=time_col)
# fits null model
super().fit(X, y=y, time_col=time_col, value_col=value_col, **fit_params)
self.fit_df = X
# fits AutoArima model
self.model = AutoARIMA(
start_p=self.start_p,
d=self.d,
start_q=self.start_q,
max_p=self.max_p,
max_d=self.max_d,
max_q=self.max_q,
start_P=self.start_P,
D=self.D,
start_Q=self.start_Q,
max_P=self.max_P,
max_D=self.max_D,
max_Q=self.max_Q,
max_order=self.max_order,
m=self.m,
seasonal=self.seasonal,
stationary=self.stationary,
information_criterion=self.information_criterion,
alpha=self.alpha,
test=self.test,
seasonal_test=self.seasonal_test,
stepwise=self.stepwise,
n_jobs=self.n_jobs,
start_params=self.start_params,
trend=self.trend,
method=self.method,
maxiter=self.maxiter,
offset_test_args=self.offset_test_args,
seasonal_test_args=self.seasonal_test_args,
suppress_warnings=self.suppress_warnings,
error_action=self.error_action,
trace=self.trace,
random=self.random,
random_state=self.random_state,
n_fits=self.n_fits,
out_of_sample_size=self.out_of_sample_size,
scoring=self.scoring,
scoring_args=self.scoring_args,
with_intercept=self.with_intercept,
return_conf_int=self.return_conf_int,
dynamic=self.dynamic,
regressor_cols=self.regressor_cols
)
# fits auto-arima
if self.regressor_cols is None:
reg_df = None
else:
reg_df = X[self.regressor_cols]
self.model.fit(y=X[[value_col]], X=reg_df)
return self
def predict(self, X, y=None):
"""Creates forecast for the dates specified in ``X``.
Currently does not support the regressor case where there is gap between
train and predict periods.
Parameters
----------
X: `pandas.DataFrame`
Input timeseries with timestamp column and any additional regressors.
Timestamps are the dates for prediction.
Value column, if provided in ``X``, is ignored.
y: ignored.
Returns
-------
predictions: `pandas.DataFrame`
Forecasted values for the dates in ``X``. Columns:
- ``TIME_COL``: dates
- ``PREDICTED_COL``: predictions
- ``PREDICTED_LOWER_COL``: lower bound of predictions
- ``PREDICTED_UPPER_COL``: upper bound of predictions
"""
X = X.sort_values(by=self.time_col_)
# Returns the cached result if applicable
cached_predictions = super().predict(X=X)
if cached_predictions is not None:
return cached_predictions
# Currently does not support the regressor case where
# there is gap between train and predict periods
if self.regressor_cols is None:
fut_reg_df = None
else:
fut_df = X[X[self.time_col_] > self.fit_df[self.time_col_].iloc[-1]]
fut_reg_df = fut_df[self.regressor_cols] # Auto-arima only accepts regressor values beyond `fit_df`
if self.freq is None:
self.freq = pd.infer_freq(self.fit_df[self.time_col_])
if self.freq == "H":
self.freq = self.freq.lower() # np.timedelta recognizes lower case letters
chosen_d = self.model.model_.order[1] # This is the value of the d chosen by auto-arima
forecast_start = int((X[self.time_col_].iloc[0] - self.fit_df[self.time_col_].iloc[0])/np.timedelta64(1, self.freq))
if forecast_start < chosen_d:
append_length = chosen_d - forecast_start # Number of NaNs to append to `pred_df`
forecast_start = chosen_d # Auto-arima can not predict below the chosen d
else:
append_length = 0
forecast_end = int((X[self.time_col_].iloc[-1] - self.fit_df[self.time_col_].iloc[0])/np.timedelta64(1, self.freq))
predictions = self.model.predict_in_sample(
X=fut_reg_df,
start=forecast_start,
end=forecast_end,
dynamic=self.dynamic,
return_conf_int=self.return_conf_int,
alpha=(1-self.coverage)
)
if append_length > 0:
pred_df = pd.DataFrame({
TIME_COL: X[self.time_col_],
PREDICTED_COL: np.append(np.repeat(np.nan, append_length), predictions[0]),
PREDICTED_LOWER_COL: np.append(np.repeat(np.nan, append_length), predictions[1][:, 0]),
PREDICTED_UPPER_COL: np.append(np.repeat(np.nan, append_length), predictions[1][:, 1])
})
else:
pred_df = pd.DataFrame({
TIME_COL: X[self.time_col_],
PREDICTED_COL: predictions[0],
PREDICTED_LOWER_COL: predictions[1][:, 0],
PREDICTED_UPPER_COL: predictions[1][:, 1]
})
self.forecast = pred_df
# Caches the predictions
self.cached_predictions_ = pred_df
return pred_df
def summary(self):
BaseForecastEstimator.summary(self)
# AutoArima summary
return self.model.summary()
| [
"greykite.sklearn.estimator.base_forecast_estimator.BaseForecastEstimator.summary",
"numpy.timedelta64",
"pmdarima.arima.AutoARIMA",
"modin.pandas.DataFrame",
"modin.pandas.infer_freq",
"numpy.repeat"
] | [((8259, 9370), 'pmdarima.arima.AutoARIMA', 'AutoARIMA', ([], {'start_p': 'self.start_p', 'd': 'self.d', 'start_q': 'self.start_q', 'max_p': 'self.max_p', 'max_d': 'self.max_d', 'max_q': 'self.max_q', 'start_P': 'self.start_P', 'D': 'self.D', 'start_Q': 'self.start_Q', 'max_P': 'self.max_P', 'max_D': 'self.max_D', 'max_Q': 'self.max_Q', 'max_order': 'self.max_order', 'm': 'self.m', 'seasonal': 'self.seasonal', 'stationary': 'self.stationary', 'information_criterion': 'self.information_criterion', 'alpha': 'self.alpha', 'test': 'self.test', 'seasonal_test': 'self.seasonal_test', 'stepwise': 'self.stepwise', 'n_jobs': 'self.n_jobs', 'start_params': 'self.start_params', 'trend': 'self.trend', 'method': 'self.method', 'maxiter': 'self.maxiter', 'offset_test_args': 'self.offset_test_args', 'seasonal_test_args': 'self.seasonal_test_args', 'suppress_warnings': 'self.suppress_warnings', 'error_action': 'self.error_action', 'trace': 'self.trace', 'random': 'self.random', 'random_state': 'self.random_state', 'n_fits': 'self.n_fits', 'out_of_sample_size': 'self.out_of_sample_size', 'scoring': 'self.scoring', 'scoring_args': 'self.scoring_args', 'with_intercept': 'self.with_intercept', 'return_conf_int': 'self.return_conf_int', 'dynamic': 'self.dynamic', 'regressor_cols': 'self.regressor_cols'}), '(start_p=self.start_p, d=self.d, start_q=self.start_q, max_p=self.\n max_p, max_d=self.max_d, max_q=self.max_q, start_P=self.start_P, D=self\n .D, start_Q=self.start_Q, max_P=self.max_P, max_D=self.max_D, max_Q=\n self.max_Q, max_order=self.max_order, m=self.m, seasonal=self.seasonal,\n stationary=self.stationary, information_criterion=self.\n information_criterion, alpha=self.alpha, test=self.test, seasonal_test=\n self.seasonal_test, stepwise=self.stepwise, n_jobs=self.n_jobs,\n start_params=self.start_params, trend=self.trend, method=self.method,\n maxiter=self.maxiter, offset_test_args=self.offset_test_args,\n seasonal_test_args=self.seasonal_test_args, suppress_warnings=self.\n suppress_warnings, error_action=self.error_action, trace=self.trace,\n random=self.random, random_state=self.random_state, n_fits=self.n_fits,\n out_of_sample_size=self.out_of_sample_size, scoring=self.scoring,\n scoring_args=self.scoring_args, with_intercept=self.with_intercept,\n return_conf_int=self.return_conf_int, dynamic=self.dynamic,\n regressor_cols=self.regressor_cols)\n', (8268, 9370), False, 'from pmdarima.arima import AutoARIMA\n'), ((13492, 13527), 'greykite.sklearn.estimator.base_forecast_estimator.BaseForecastEstimator.summary', 'BaseForecastEstimator.summary', (['self'], {}), '(self)\n', (13521, 13527), False, 'from greykite.sklearn.estimator.base_forecast_estimator import BaseForecastEstimator\n'), ((11582, 11624), 'modin.pandas.infer_freq', 'pd.infer_freq', (['self.fit_df[self.time_col_]'], {}), '(self.fit_df[self.time_col_])\n', (11595, 11624), True, 'import modin.pandas as pd\n'), ((13088, 13256), 'modin.pandas.DataFrame', 'pd.DataFrame', (['{TIME_COL: X[self.time_col_], PREDICTED_COL: predictions[0],\n PREDICTED_LOWER_COL: predictions[1][:, 0], PREDICTED_UPPER_COL:\n predictions[1][:, 1]}'], {}), '({TIME_COL: X[self.time_col_], PREDICTED_COL: predictions[0],\n PREDICTED_LOWER_COL: predictions[1][:, 0], PREDICTED_UPPER_COL:\n predictions[1][:, 1]})\n', (13100, 13256), True, 'import modin.pandas as pd\n'), ((11934, 11962), 'numpy.timedelta64', 'np.timedelta64', (['(1)', 'self.freq'], {}), '(1, self.freq)\n', (11948, 11962), True, 'import numpy as np\n'), ((12322, 12350), 'numpy.timedelta64', 'np.timedelta64', (['(1)', 'self.freq'], {}), '(1, self.freq)\n', (12336, 12350), True, 'import numpy as np\n'), ((12779, 12811), 'numpy.repeat', 'np.repeat', (['np.nan', 'append_length'], {}), '(np.nan, append_length)\n', (12788, 12811), True, 'import numpy as np\n'), ((12877, 12909), 'numpy.repeat', 'np.repeat', (['np.nan', 'append_length'], {}), '(np.nan, append_length)\n', (12886, 12909), True, 'import numpy as np\n'), ((12981, 13013), 'numpy.repeat', 'np.repeat', (['np.nan', 'append_length'], {}), '(np.nan, append_length)\n', (12990, 13013), True, 'import numpy as np\n')] |
import numpy as np
'''
Numpy axes:
Axis 1: ====>
|-------+-------+-------+-------+
| R/C | col 1 | col 2 | ... |
|-------+-------+-------+-------+
Axis 0: | row 1 | | | |
|| --------+-------+-------+-------+
|| | row 2 | | | |
\/ --------+-------+-------+-------+
| ... | | | |
--------+-------+-------+-------+
* In an array, axis 0 is the "first" axis and axis 1 is the "second" axis
* This follows 0-based indexing, the first axis is numbered 0
* In a 2D array, axis 0 runs down along the rows (see above)
np.concatenate([np.array([[1,2], [3,4]]), np.array([[5,6], [7,8])])], axis=0)
[[1 2],
[3 4],
[5 6],
[7 8]]
* Note: axis=0 is the default if it's omitted
* In a 2D array, axis 1 runs across the columns (see above)
np.concatenate([np.array([[1,2], [3,4]]), np.array([[5,6], [7,8])])], axis=1)
[[1 2 5 6],
[3 4 7 8]]
* In a 1D array, there is only one axis - 0, using axis 1 is an error
* Because there is only 1 axis, axis 0 acts like axis 1 in a 2D array:
np.concatenate([np.array([1,2]), np.array([3,4])])
[1 2 3 4]
Numpy features:
* Concatenation:
array_1 = np.array([1,2,3])
array_2 = np.array([4,5,6])
array_3 = np.array([7,8,9])
np.concatenate((array_1, array_2, array_3))
[1 2 3 4 5 6 7 8 9]
* Why this result? See axes explanation above for 1D array.
Reference: https://www.sharpsightlabs.com/blog/numpy-axes-explained/
'''
def main():
rows1, rows2, cols = map(int, input().split())
matrix1 = np.array([list(map(int, input().split())) for _ in range(rows1)])
matrix2 = np.array([list(map(int, input().split())) for _ in range(rows2)])
print(f'{np.concatenate([matrix1, matrix2])}')
if __name__ == '__main__':
main()
| [
"numpy.concatenate"
] | [((1764, 1798), 'numpy.concatenate', 'np.concatenate', (['[matrix1, matrix2]'], {}), '([matrix1, matrix2])\n', (1778, 1798), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# reading the dataset
data = pd.read_csv("dataset.csv")
data_label0 = data[data['Label'] == 0]
data_label1 = data[data['Label'] == 1]
# splitting the dataset into 2 sets: train set and test set
train_set = data.sample(frac=0.8)
test_set = data.drop(train_set.index)
# plotting the dataset
# plt.scatter(data_label0['X1'], data_label0['X2'], color='r')
# plt.scatter(data_label1['X1'], data_label1['X2'], color='b')
# plt.show()
# initializing the weights
W_weights = np.random.normal(0, 1, 3).reshape(3, 1)
V_weights = np.random.normal(0, 1, 3).reshape(3, 1)
U_weights = np.random.normal(0, 1, 3).reshape(3, 1)
n_epoch = 10000
lr = 10
# training
n = len(train_set)
inputs = np.append(train_set.values[:, 0:2], np.ones(n).reshape(n, 1), 1)
yt = train_set.values[:, 2:3].reshape(n, 1)
for i in range(0, n_epoch):
z0 = 1 / (1 + np.exp(-inputs.dot(W_weights)))
z1 = 1 / (1 + np.exp(-inputs.dot(V_weights)))
z = np.append(np.append(z0, z1, 1), np.ones(n).reshape(n, 1), 1)
y = 1 / (1 + np.exp(-z.dot(U_weights)))
W_grad = inputs.transpose().dot(2 * (y - yt) * U_weights[0] * y * (1 - y) * z0 * (1 - z0))
V_grad = inputs.transpose().dot(2 * (y - yt) * U_weights[1] * y * (1 - y) * z1 * (1 - z1))
U_grad = z.transpose().dot(2 * (y - yt) * y * (1 - y))
W_weights -= lr * W_grad / n
V_weights -= lr * V_grad / n
U_weights -= lr * U_grad / n
# evaluate the prediction
true_predicts = 0
n = len(test_set)
test_inputs = np.append(test_set.values[:, 0:2], np.ones(n).reshape(n, 1), 1)
z0 = 1 / (1 + np.exp(-test_inputs.dot(W_weights)))
z1 = 1 / (1 + np.exp(-test_inputs.dot(V_weights)))
z = np.append(np.append(z0, z1, 1), np.ones(n).reshape(n, 1), 1)
output = z.dot(U_weights)
for i in range(n):
if output[i] > 0:
if test_set.values[i, 2] == 1:
true_predicts += 1
plt.scatter(test_set.values[i, 0], test_set.values[i, 1], color='r')
else:
if test_set.values[i, 2] == 0:
true_predicts += 1
plt.scatter(test_set.values[i, 0], test_set.values[i, 1], color='b')
print("Accuracy: {}".format(true_predicts/n))
plt.show()
| [
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"numpy.ones",
"numpy.append",
"numpy.random.normal"
] | [((101, 127), 'pandas.read_csv', 'pd.read_csv', (['"""dataset.csv"""'], {}), "('dataset.csv')\n", (112, 127), True, 'import pandas as pd\n'), ((2174, 2184), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2182, 2184), True, 'import matplotlib.pyplot as plt\n'), ((1706, 1726), 'numpy.append', 'np.append', (['z0', 'z1', '(1)'], {}), '(z0, z1, 1)\n', (1715, 1726), True, 'import numpy as np\n'), ((542, 567), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(3)'], {}), '(0, 1, 3)\n', (558, 567), True, 'import numpy as np\n'), ((594, 619), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(3)'], {}), '(0, 1, 3)\n', (610, 619), True, 'import numpy as np\n'), ((646, 671), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(3)'], {}), '(0, 1, 3)\n', (662, 671), True, 'import numpy as np\n'), ((1005, 1025), 'numpy.append', 'np.append', (['z0', 'z1', '(1)'], {}), '(z0, z1, 1)\n', (1014, 1025), True, 'import numpy as np\n'), ((1902, 1970), 'matplotlib.pyplot.scatter', 'plt.scatter', (['test_set.values[i, 0]', 'test_set.values[i, 1]'], {'color': '"""r"""'}), "(test_set.values[i, 0], test_set.values[i, 1], color='r')\n", (1913, 1970), True, 'import matplotlib.pyplot as plt\n'), ((2059, 2127), 'matplotlib.pyplot.scatter', 'plt.scatter', (['test_set.values[i, 0]', 'test_set.values[i, 1]'], {'color': '"""b"""'}), "(test_set.values[i, 0], test_set.values[i, 1], color='b')\n", (2070, 2127), True, 'import matplotlib.pyplot as plt\n'), ((786, 796), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (793, 796), True, 'import numpy as np\n'), ((1561, 1571), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (1568, 1571), True, 'import numpy as np\n'), ((1728, 1738), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (1735, 1738), True, 'import numpy as np\n'), ((1027, 1037), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (1034, 1037), True, 'import numpy as np\n')] |
import os
import shutil
from typing import List
import numpy as np
import torch
ORIGIN_DIR = 'data/UTKFace'
DESTINATION_DIR = 'data/renamed'
def rename_files():
if not os.path.exists(DESTINATION_DIR):
os.makedirs(DESTINATION_DIR)
else:
x = input("Do you want to rewrite the folder? Y/N").lower()
if x == 'y':
shutil.rmtree(DESTINATION_DIR, ignore_errors=True)
os.makedirs(DESTINATION_DIR)
elif x == 'n':
print("You chose not to delete the folder")
else:
print("Please give Y/N response")
for index, file in enumerate(os.listdir(ORIGIN_DIR)):
# retrieving age and gender from file names
try:
age, gender, _, _ = file.split('_')
except ValueError:
age, gender, _ = file.split('_')
destination_name = '.'.join(['_'.join([str(age), str(gender), str(index)]), 'jpg'])
os.rename(os.path.join(ORIGIN_DIR, file), os.path.join(DESTINATION_DIR, destination_name))
def convert_age(age: int, interval: List[int] = list(np.linspace(10, 120, 10))) -> int:
"""
Converts age to label based on given interval.
:param age: Age to be encoded.
:param interval: List with age's interval.
:return: Index of age.
"""
for index, value in enumerate(interval):
if age <= value:
return index
def index_to_one_hot(label: int, N: int):
"""
Converts index to one hot encoded tensor.
:param label: Index to encoded.
:param N: Number of classes.
:return: Tensor with one hot encoded index.
"""
assert torch.max(label).item() < N
zeros = torch.zeros(label.size(0), N)
try:
one_hot = zeros.scatter_(1, label, 1)
except RuntimeError:
label = label.view(-1, 1)
one_hot = zeros.scatter_(1, label, 1)
return one_hot
def total_variation_loss(image: np.ndarray) -> float:
"""
Calculates total variation loss which allows loss function to account for noise reduction in generated images.
:param image: Input image to calculate loss from
:return: Value of a loss
"""
rows = (image[:, :, :, 1:] - image[:, :, :, :-1]) ** 2
columns = (image[:, :, 1:, :] - image[:, :, :-1:, :]) ** 2
loss = (rows.mean(dim=3) + columns.mean(dim=2)).mean()
return loss
| [
"os.makedirs",
"os.path.exists",
"torch.max",
"numpy.linspace",
"shutil.rmtree",
"os.path.join",
"os.listdir"
] | [((176, 207), 'os.path.exists', 'os.path.exists', (['DESTINATION_DIR'], {}), '(DESTINATION_DIR)\n', (190, 207), False, 'import os\n'), ((217, 245), 'os.makedirs', 'os.makedirs', (['DESTINATION_DIR'], {}), '(DESTINATION_DIR)\n', (228, 245), False, 'import os\n'), ((621, 643), 'os.listdir', 'os.listdir', (['ORIGIN_DIR'], {}), '(ORIGIN_DIR)\n', (631, 643), False, 'import os\n'), ((1085, 1109), 'numpy.linspace', 'np.linspace', (['(10)', '(120)', '(10)'], {}), '(10, 120, 10)\n', (1096, 1109), True, 'import numpy as np\n'), ((357, 407), 'shutil.rmtree', 'shutil.rmtree', (['DESTINATION_DIR'], {'ignore_errors': '(True)'}), '(DESTINATION_DIR, ignore_errors=True)\n', (370, 407), False, 'import shutil\n'), ((420, 448), 'os.makedirs', 'os.makedirs', (['DESTINATION_DIR'], {}), '(DESTINATION_DIR)\n', (431, 448), False, 'import os\n'), ((941, 971), 'os.path.join', 'os.path.join', (['ORIGIN_DIR', 'file'], {}), '(ORIGIN_DIR, file)\n', (953, 971), False, 'import os\n'), ((973, 1020), 'os.path.join', 'os.path.join', (['DESTINATION_DIR', 'destination_name'], {}), '(DESTINATION_DIR, destination_name)\n', (985, 1020), False, 'import os\n'), ((1627, 1643), 'torch.max', 'torch.max', (['label'], {}), '(label)\n', (1636, 1643), False, 'import torch\n')] |
#!/usr/bin/env python3
"""Turn evaluation file into sequences of videos. Not sure how I'm going to
make this method-agnostic."""
import argparse
import json
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from scipy.misc import imread
import h5py
import addpaths # noqa
from plot_seqs import draw_poses
from common_pp.completion_video_common import load_sorted_paths
FRAME_DIR = '/data/home/cherian/IkeaDataset/Frames/'
DB_PATH = '/data/home/cherian/IkeaDataset/IkeaClipsDB_withactions.mat'
POSE_DIR = '/home/sam/etc/cpm-keras/ikea-mat-poses/'
parser = argparse.ArgumentParser()
parser.add_argument('results_h5_path', help='path to .h5 from eval code')
parser.add_argument(
'--vid-dir',
type=str,
default=None,
help='save videos to this directory instead of showing poses')
parser.add_argument(
'--best-only',
action='store_true',
default=False,
help='only show best sample')
if __name__ == '__main__':
args = parser.parse_args()
db = loadmat(DB_PATH, squeeze_me=True)['IkeaDB']
# could get just one entry (one relevant to our vid) instead of looping
# over all. oh well
meta_dict = {}
for video_entry in db:
clip_path = video_entry['clip_path']
prefix = '/data/home/cherian/IkeaDataset/Frames/'
assert clip_path.startswith(prefix)
path_suffix = clip_path[len(prefix):]
# This is same number used to identify pose clip (not sequential!)
tmp2_id = video_entry['video_id']
new_name = 'vid%d' % tmp2_id
meta_dict[new_name] = {'path_suffix': path_suffix, 'tmp2_id': tmp2_id}
with h5py.File(args.results_h5_path, 'r') as fp:
num_seqs = len(fp['/poses_2d_pred'])
# pick a random one!
sel_idx = np.random.randint(num_seqs)
json_vid_names = json.loads(
fp['/seq_ids_2d_json'].value.decode('utf8'))
vid_name = json_vid_names[sel_idx]
print('Selected sample set %d (video name %s)' % (sel_idx, vid_name))
seq_frame_inds_pred = fp['/pred_frame_numbers_2d'].value[sel_idx]
true_poses_pred = fp['/poses_2d_true'].value[sel_idx]
pred_poses_pred = fp['/poses_2d_pred'].value[sel_idx]
seq_frame_inds_cond = fp['/cond_frame_numbers_2d'].value[sel_idx]
true_poses_cond = fp['/poses_2d_cond_true'].value[sel_idx]
pred_poses_cond = fp['/poses_2d_cond_pred'].value[sel_idx]
cond_steps = true_poses_cond.shape[0]
seq_frame_inds = np.concatenate(
[seq_frame_inds_cond, seq_frame_inds_pred])
true_poses = np.concatenate([true_poses_cond, true_poses_pred], axis=0)
out_poses = np.concatenate([pred_poses_cond, pred_poses_pred], axis=1)
if args.best_only:
# only use the samples which are closest to the ground truth
diffs = true_poses[None, ...] - out_poses
# gives us N*S array; need to min over S
sq_diffs = (diffs**2).sum(axis=-1).sum(axis=-1).T
best_inds = np.argmin(sq_diffs, axis=1)
ax1_lin = np.arange(out_poses.shape[1])
out_poses = out_poses[None, best_inds, ax1_lin]
num_samples = out_poses.shape[0]
if args.best_only:
seq_names = ['Estimated pose', ('Decoded pose', 'Forecasted pose')]
else:
seq_names = ['True poses'] + \
['Sample %d' % d for d in range(num_samples)]
pose_seqs = np.stack([true_poses] + [r for r in out_poses], axis=0)
parents = fp['/parents_2d'].value
meta = meta_dict[vid_name]
path_suffix = meta['path_suffix']
tmp2_id = meta['tmp2_id']
# tmp2_id = int(re.match(r'^vid(\d+)$', vid_name).groups()[0])
all_frame_fns = load_sorted_paths(os.path.join(FRAME_DIR, path_suffix))
# for some reason there is one video directory with a subdirectory that has
# a numeric name
all_frame_fns = [f for f in all_frame_fns if f.endswith('.jpg')]
frame_paths = [all_frame_fns[i] for i in seq_frame_inds]
# we will preload frames
orig_frames = [imread(fn) for fn in frame_paths]
# drop brightness the hacky way
dark_final = (orig_frames[cond_steps-1] / 2).astype('uint8')
dark_list = [dark_final] * (len(orig_frames) - cond_steps)
trunc_frames = orig_frames[:cond_steps] + dark_list
pose_mat_path = os.path.join(POSE_DIR, 'pose_clip_%d.mat' % tmp2_id)
pose_mat = loadmat(pose_mat_path, squeeze_me=True)
# important not to let return value be gc'd (anims won't run otherwise!)
anim = draw_poses(
None,
parents,
pose_seqs,
frames=[orig_frames] + [trunc_frames] * num_samples,
subplot_titles=seq_names,
fps=50 / 9.0,
crossover=cond_steps)
if args.vid_dir is not None:
# save video
print('Saving video')
try:
os.makedirs(args.vid_dir)
except FileExistsError:
pass
key = vid_name + '-' + str(sel_idx)
anim.save(
os.path.join(args.vid_dir, key + '.mp4'),
writer='avconv',
# no idea what bitrate defaults to, but empirically it seems to be
# around 1000 (?)
bitrate=3000,
# dpi defaults to 300
dpi=300,
# go at one-third of original speed
fps=50 / (3 * 3))
else:
print('Showing sequence')
plt.show()
| [
"numpy.stack",
"h5py.File",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"os.makedirs",
"scipy.io.loadmat",
"numpy.argmin",
"scipy.misc.imread",
"numpy.random.randint",
"numpy.arange",
"plot_seqs.draw_poses",
"os.path.join",
"numpy.concatenate"
] | [((601, 626), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (624, 626), False, 'import argparse\n'), ((4361, 4413), 'os.path.join', 'os.path.join', (['POSE_DIR', "('pose_clip_%d.mat' % tmp2_id)"], {}), "(POSE_DIR, 'pose_clip_%d.mat' % tmp2_id)\n", (4373, 4413), False, 'import os\n'), ((4429, 4468), 'scipy.io.loadmat', 'loadmat', (['pose_mat_path'], {'squeeze_me': '(True)'}), '(pose_mat_path, squeeze_me=True)\n', (4436, 4468), False, 'from scipy.io import loadmat\n'), ((4558, 4713), 'plot_seqs.draw_poses', 'draw_poses', (['None', 'parents', 'pose_seqs'], {'frames': '([orig_frames] + [trunc_frames] * num_samples)', 'subplot_titles': 'seq_names', 'fps': '(50 / 9.0)', 'crossover': 'cond_steps'}), '(None, parents, pose_seqs, frames=[orig_frames] + [trunc_frames] *\n num_samples, subplot_titles=seq_names, fps=50 / 9.0, crossover=cond_steps)\n', (4568, 4713), False, 'from plot_seqs import draw_poses\n'), ((1025, 1058), 'scipy.io.loadmat', 'loadmat', (['DB_PATH'], {'squeeze_me': '(True)'}), '(DB_PATH, squeeze_me=True)\n', (1032, 1058), False, 'from scipy.io import loadmat\n'), ((1651, 1687), 'h5py.File', 'h5py.File', (['args.results_h5_path', '"""r"""'], {}), "(args.results_h5_path, 'r')\n", (1660, 1687), False, 'import h5py\n'), ((1787, 1814), 'numpy.random.randint', 'np.random.randint', (['num_seqs'], {}), '(num_seqs)\n', (1804, 1814), True, 'import numpy as np\n'), ((2507, 2565), 'numpy.concatenate', 'np.concatenate', (['[seq_frame_inds_cond, seq_frame_inds_pred]'], {}), '([seq_frame_inds_cond, seq_frame_inds_pred])\n', (2521, 2565), True, 'import numpy as np\n'), ((2600, 2658), 'numpy.concatenate', 'np.concatenate', (['[true_poses_cond, true_poses_pred]'], {'axis': '(0)'}), '([true_poses_cond, true_poses_pred], axis=0)\n', (2614, 2658), True, 'import numpy as np\n'), ((2679, 2737), 'numpy.concatenate', 'np.concatenate', (['[pred_poses_cond, pred_poses_pred]'], {'axis': '(1)'}), '([pred_poses_cond, pred_poses_pred], axis=1)\n', (2693, 2737), True, 'import numpy as np\n'), ((3466, 3521), 'numpy.stack', 'np.stack', (['([true_poses] + [r for r in out_poses])'], {'axis': '(0)'}), '([true_poses] + [r for r in out_poses], axis=0)\n', (3474, 3521), True, 'import numpy as np\n'), ((3768, 3804), 'os.path.join', 'os.path.join', (['FRAME_DIR', 'path_suffix'], {}), '(FRAME_DIR, path_suffix)\n', (3780, 3804), False, 'import os\n'), ((4086, 4096), 'scipy.misc.imread', 'imread', (['fn'], {}), '(fn)\n', (4092, 4096), False, 'from scipy.misc import imread\n'), ((5418, 5428), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5426, 5428), True, 'import matplotlib.pyplot as plt\n'), ((3031, 3058), 'numpy.argmin', 'np.argmin', (['sq_diffs'], {'axis': '(1)'}), '(sq_diffs, axis=1)\n', (3040, 3058), True, 'import numpy as np\n'), ((3081, 3110), 'numpy.arange', 'np.arange', (['out_poses.shape[1]'], {}), '(out_poses.shape[1])\n', (3090, 3110), True, 'import numpy as np\n'), ((4876, 4901), 'os.makedirs', 'os.makedirs', (['args.vid_dir'], {}), '(args.vid_dir)\n', (4887, 4901), False, 'import os\n'), ((5027, 5067), 'os.path.join', 'os.path.join', (['args.vid_dir', "(key + '.mp4')"], {}), "(args.vid_dir, key + '.mp4')\n", (5039, 5067), False, 'import os\n')] |
# To import required modules:
import numpy as np
import time
import os
import sys
import matplotlib
import matplotlib.cm as cm #for color maps
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec #for specifying plot attributes
from matplotlib import ticker #for setting contour plots to log scale
import scipy.integrate #for numerical integration
import scipy.misc #for factorial function
from scipy.special import erf #error function, used in computing CDF of normal distribution
import scipy.interpolate #for interpolation functions
import corner #corner.py package for corner plots
#matplotlib.rc('text', usetex=True)
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from src.functions_general import *
from src.functions_compare_kepler import *
from src.functions_load_sims import *
from src.functions_plot_catalogs import *
from src.functions_plot_params import *
##### To load the files with the GP evaluated points:
savefigures = False
transformed_rates = True
run_directory = 'Model_Optimization/AMD_system/Split_stars/Singles_ecc/Params11_KS/Distribute_AMD_per_mass/durations_norm_circ_singles_multis_GF2020_KS/GP_files/'
loadfiles_directory = '/Users/hematthi/Documents/GradSchool/Research/ACI/' + run_directory
sub_directory = ''
savefigures_directory = '/Users/hematthi/Documents/GradSchool/Research/ExoplanetsSysSim_Clusters/Figures/' + run_directory + sub_directory
model_name = 'AMD_Model'
active_params_symbols = [#r'$f_{\sigma_{i,\rm high}}$',
#r'$f_{\rm swpa}$',
#r'$f_{\rm swpa,bluer}$',
#r'$f_{\rm swpa,redder}$',
r'$f_{\rm swpa,med}$',
r'$df_{\rm swpa}/d(b_p-r_p-E^*)$',
r'$\ln{(\lambda_c)}$',
r'$\ln{(\lambda_p)}$',
r'$\Delta_c$',
r'$\alpha_P$',
#r'$\alpha_{P,\rm med}$',
#r'$d\alpha_P/d(b_p-r_p-E^*)$',
r'$\alpha_{R1}$',
r'$\alpha_{R2}$',
r'$\sigma_{e,1}$', #'$\sigma_{e,1}$'
#r'$\sigma_{i,\rm high}$ ($^\circ$)', # '\n',
#r'$\sigma_{i,\rm low}$ ($^\circ$)', # '\n',
r'$\sigma_R$',
r'$\sigma_P$'
] #this list of parameter symbols must match the order of parameters in the loaded table!
long_symbols = [False, True, False, False, False, False, False, False, False, False, False] # Clustered_P_R_fswp_bprp_AMD_sys with Delta_c + sigma_{e,1}
#long_symbols = [False, True, False, False, False, False, False, False, False] # Clustered_P_R_fswp_bprp_AMD_sys
#long_symbols = [False, True, True, True, True, True, True, True, True, True, True, True, False, False] # Clustered_P_R_fswp_alphaP_bprp
#long_symbols = [False, True, True, True, True, True, True, True, True, True, True, False, False] # Clustered_P_R_alphaP_bprp
#long_symbols = [False, True, True, True, True, False, True, True, True, True, True, False, False] # Clustered_P_R_fswp_bprp
#long_symbols = [False, True, True, True, False, True, True, True, True, True, False, False] # Clustered_P_R_fswp
dims = len(active_params_symbols)
active_params_transformed_symbols = np.copy(active_params_symbols)
i_transformed, j_transformed = 2, 3
if transformed_rates:
active_params_transformed_symbols[i_transformed] = r'$\ln{(\lambda_c \lambda_p)}$' #'\n'
active_params_transformed_symbols[j_transformed] = r'$\ln{(\lambda_p/\lambda_c)}$' #'\n'
# To load the training points:
data_train = load_training_points(dims, file_name_path=loadfiles_directory, file_name='Active_params_recomputed_distances_table_best90000_every9.txt')
active_params_names = np.array(data_train['active_params_names'])
# To load the tables of points drawn from the prior based on the GP model:
n_train, mean_f, sigma_f, lscales, vol = 2000, 100.0, 2.7, 9.7, 109.18
n_points, max_mean, max_std, max_post = 50000, 'Inf', 'Inf', -35.0 #100000, 'Inf', 'Inf', 'Inf'
file_name = 'GP_train%s_meanf%s_sigmaf%s_lscales%s_vol%s_points%s_mean%s_std%s_post%s.csv' % (n_train, mean_f, sigma_f, lscales, vol, n_points, max_mean, max_std, max_post)
xprior_accepted_table = load_GP_table_prior_draws(file_name, file_name_path=loadfiles_directory + sub_directory)
active_params_transformed_names = np.array(xprior_accepted_table.dtype.names[:dims])
GP_model_name = '_GP_train%s_meanf%s_sigmaf%s_lscales%s' % (n_train, mean_f, sigma_f, lscales)
model_name = model_name + GP_model_name
##### To plot the mean, std, and posterior draws as histograms:
#plot_fig_hists_GP_draws((16,8), xprior_accepted_table, save_name=savefigures_directory + model_name + '_vol%s_prior%s_GP_mean%s_std%s_post%s_hists.pdf' % (vol, len(xprior_accepted_table), max_mean, max_std, max_post), save_fig=savefigures)
plt.show()
##### To make cuts for the posterior:
mean_cut, std_cut, post_cut = np.inf, np.inf, -30.0 #-90.0
#xprior_accepts = make_cuts_GP_mean_std_post(active_params_transformed_names, xprior_accepted_table, max_mean=mean_cut, max_std=std_cut, max_post=post_cut)
if transformed_rates:
xprior_accepts_transformed = make_cuts_GP_mean_std_post(active_params_transformed_names, xprior_accepted_table, max_mean=mean_cut, max_std=std_cut, max_post=post_cut)
xprior_accepts = transform_sum_diff_params_inverse(xprior_accepts_transformed, i_transformed, j_transformed)
else:
xprior_accepts = make_cuts_GP_mean_std_post(active_params_names, xprior_accepted_table, max_mean=mean_cut, max_std=std_cut, max_post=post_cut)
##### To make corner plots for the GP draws:
'''
if transformed_rates:
plot_cornerpy_wrapper(active_params_symbols, xprior_accepts, save_name=savefigures_directory + model_name + '_vol%s_prior%s_GP_mean%s_std%s_post%s_corner.pdf' % (vol, len(xprior_accepts), mean_cut, std_cut, post_cut), save_fig=savefigures)
plot_cornerpy_wrapper(active_params_transformed_symbols, xprior_accepts_transformed, save_name=savefigures_directory + model_name + '_vol%s_prior%s_GP_mean%s_std%s_post%s_transformed_corner.pdf' % (vol, len(xprior_accepts_transformed), mean_cut, std_cut, post_cut), save_fig=savefigures)
else:
plot_cornerpy_wrapper(active_params_symbols, xprior_accepts, save_name=savefigures_directory + model_name + '_vol%s_prior%s_GP_mean%s_std%s_post%s_corner.pdf' % (vol, len(xprior_accepts), mean_cut, std_cut, post_cut), save_fig=savefigures)
'''
fig = plot_cornerpy_wrapper(active_params_symbols, xprior_accepts, show_titles=False)
axes = np.array(fig.axes).reshape((dims, dims))
for i in range(dims):
q = corner.quantile(xprior_accepts[:,i], [0.16, 0.5, 0.84])
q_pm = np.diff(q)
if long_symbols[i]:
title = active_params_symbols[i] + '=\n' + r'$%s_{-%s}^{+%s}$' % ('{:0.2f}'.format(q[1]), '{:0.2f}'.format(q_pm[0]), '{:0.2f}'.format(q_pm[1]))
else:
title = active_params_symbols[i] + r'$=%s_{-%s}^{+%s}$' % ('{:0.2f}'.format(q[1]), '{:0.2f}'.format(q_pm[0]), '{:0.2f}'.format(q_pm[1]))
#if active_params_names[i] == 'sigma_hk':
# title = active_params_symbols[i] + '=\n' + r'$%s_{-%s}^{+%s}$' % ('{:0.3f}'.format(q[1]), '{:0.3f}'.format(q_pm[0]), '{:0.3f}'.format(q_pm[1]))
ax = axes[i,i]
ax.set_title(title, fontsize=20)
if savefigures:
plt.savefig(savefigures_directory + model_name + '_vol%s_prior%s_GP_mean%s_std%s_post%s_corner.pdf' % (vol, len(xprior_accepts), mean_cut, std_cut, post_cut))
plt.close()
#plt.show()
plt.close()
#To compute the quantiles directly (and get more digits, say for the eccentricity scale):
for i in range(dims):
q = corner.quantile(xprior_accepts[:,i], [0.16, 0.5, 0.84])
q_pm = np.diff(q)
if active_params_names[i] == 'sigma_hk':
print('%s = %s_{-%s}^{+%s}' % ('{:<50}'.format(active_params_names[i]), '{:0.3f}'.format(q[1]), '{:0.3f}'.format(q_pm[0]), '{:0.3f}'.format(q_pm[1])))
else:
print('%s = %s_{-%s}^{+%s}' % ('{:<50}'.format(active_params_names[i]), '{:0.2f}'.format(q[1]), '{:0.2f}'.format(q_pm[0]), '{:0.2f}'.format(q_pm[1])))
if (active_params_names[i] == 'log_rate_clusters') or (active_params_names[i] == 'log_rate_planets_per_cluster'):
q_unlogged = np.exp(q)
q_pm_unlogged = np.diff(q_unlogged)
print('%s = %s_{-%s}^{+%s}' % ('{:<50}'.format(active_params_names[i][4:]), '{:0.2f}'.format(q_unlogged[1]), '{:0.2f}'.format(q_pm_unlogged[0]), '{:0.2f}'.format(q_pm_unlogged[1])))
##### To plot single corner plots for pairs of parameters:
'''
afs, tfs, lfs = 20, 20, 16
bins = 20
ix, iy = 2, 6
qx = corner.quantile(xprior_accepts[:,ix], [0.16, 0.5, 0.84])
qy = corner.quantile(xprior_accepts[:,iy], [0.16, 0.5, 0.84])
qx_pm, qy_pm = np.diff(qx), np.diff(qy)
fig = plt.figure(figsize=(8,8))
plot = GridSpec(5,5,left=0.17,bottom=0.15,right=0.92,top=0.9,wspace=0,hspace=0)
ax = plt.subplot(plot[1:,:4])
corner.hist2d(xprior_accepts[:,ix], xprior_accepts[:,iy], bins=bins, plot_density=True, contour_kwargs={'colors': ['0.6','0.4','0.2','0']}, data_kwargs={'color': 'k'})
ax.tick_params(axis='both', labelsize=afs)
plt.xlabel(active_params_symbols[ix], fontsize=tfs)
plt.ylabel(active_params_symbols[iy], fontsize=tfs)
ax.text(x=0.05, y=0.9, s='KS', ha='left', fontsize=tfs, transform=ax.transAxes)
ax = plt.subplot(plot[0,:4]) # top histogram
plt.title(active_params_symbols[ix] + r'$= %s_{-%s}^{+%s}$' % ('{:0.2f}'.format(qx[1]), '{:0.2f}'.format(qx_pm[0]), '{:0.2f}'.format(qx_pm[1])), fontsize=tfs)
plt.hist(xprior_accepts[:,ix], bins=bins, histtype='step', color='k', ls='-', lw=1)
plt.xticks([])
plt.yticks([])
#plt.xlim()
ax = plt.subplot(plot[1:,4]) # side histogram
plt.hist(xprior_accepts[:,iy], bins=bins, histtype='step', orientation='horizontal', color='k', ls='-', lw=1)
plt.xticks([])
plt.yticks([])
ax.yaxis.set_label_position('right')
plt.ylabel(active_params_symbols[iy] + r'$= %s_{-%s}^{+%s}$' % ('{:0.2f}'.format(qy[1]), '{:0.2f}'.format(qy_pm[0]), '{:0.2f}'.format(qy_pm[1])), fontsize=tfs)
#plt.ylim()
#plt.savefig(savefigures_directory + model_name + '_vol%s_prior%s_GP_mean%s_std%s_post%s_corner_%s_%s.pdf' % (vol, len(xprior_accepts), mean_cut, std_cut, post_cut, ix, iy))
plt.close()
'''
##### To make a custom plotting function for making 'corner' plots with contours based on an array of values instead of the density of points:
'''
grid_dims = 50
GP_grids = load_GP_2d_grids(dims, n_train, mean_f, sigma_f, lscales, file_name_path=loadfiles_directory, grid_dims=grid_dims)
dist_cut = -15. # after subtracting the mean function
GP_prob_below_dist_cut_2d_grids = cdf_normal(dist_cut, mu=GP_grids['mean_grids'], std=GP_grids['std_grids'])
xtrain_sample = data_train['xtrain'][np.random.choice(np.arange(len(data_train['xtrain'])), 1000)]
plot_contours_and_points_corner(active_params_symbols, GP_grids['xlower'], GP_grids['xupper'], GP_grids['mean_grids'], xpoints=xtrain_sample, points_alpha=0.1, save_name=savefigures_directory + model_name + '_grids2d_mean_corner.pdf', save_fig=savefigures)
plot_contours_and_points_corner(active_params_symbols, GP_grids['xlower'], GP_grids['xupper'], GP_grids['std_grids'], xpoints=xtrain_sample, points_alpha=0.1, save_name=savefigures_directory + model_name + '_grids2d_std_corner.pdf', save_fig=savefigures)
plot_contours_and_points_corner(active_params_symbols, GP_grids['xlower'], GP_grids['xupper'], GP_prob_below_dist_cut_2d_grids, xpoints=xtrain_sample, points_alpha=0.1, save_name=savefigures_directory + model_name + '_grids2d_frac_mean%s_corner.pdf' % dist_cut, save_fig=savefigures)
plt.close()
'''
| [
"matplotlib.pyplot.show",
"numpy.copy",
"matplotlib.pyplot.close",
"os.path.realpath",
"corner.quantile",
"numpy.diff",
"numpy.array",
"numpy.exp"
] | [((3401, 3431), 'numpy.copy', 'np.copy', (['active_params_symbols'], {}), '(active_params_symbols)\n', (3408, 3431), True, 'import numpy as np\n'), ((3881, 3924), 'numpy.array', 'np.array', (["data_train['active_params_names']"], {}), "(data_train['active_params_names'])\n", (3889, 3924), True, 'import numpy as np\n'), ((4488, 4538), 'numpy.array', 'np.array', (['xprior_accepted_table.dtype.names[:dims]'], {}), '(xprior_accepted_table.dtype.names[:dims])\n', (4496, 4538), True, 'import numpy as np\n'), ((4986, 4996), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4994, 4996), True, 'import matplotlib.pyplot as plt\n'), ((7611, 7622), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7620, 7622), True, 'import matplotlib.pyplot as plt\n'), ((6738, 6794), 'corner.quantile', 'corner.quantile', (['xprior_accepts[:, i]', '[0.16, 0.5, 0.84]'], {}), '(xprior_accepts[:, i], [0.16, 0.5, 0.84])\n', (6753, 6794), False, 'import corner\n'), ((6805, 6815), 'numpy.diff', 'np.diff', (['q'], {}), '(q)\n', (6812, 6815), True, 'import numpy as np\n'), ((7586, 7597), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7595, 7597), True, 'import matplotlib.pyplot as plt\n'), ((7744, 7800), 'corner.quantile', 'corner.quantile', (['xprior_accepts[:, i]', '[0.16, 0.5, 0.84]'], {}), '(xprior_accepts[:, i], [0.16, 0.5, 0.84])\n', (7759, 7800), False, 'import corner\n'), ((7811, 7821), 'numpy.diff', 'np.diff', (['q'], {}), '(q)\n', (7818, 7821), True, 'import numpy as np\n'), ((6667, 6685), 'numpy.array', 'np.array', (['fig.axes'], {}), '(fig.axes)\n', (6675, 6685), True, 'import numpy as np\n'), ((8334, 8343), 'numpy.exp', 'np.exp', (['q'], {}), '(q)\n', (8340, 8343), True, 'import numpy as np\n'), ((8368, 8387), 'numpy.diff', 'np.diff', (['q_unlogged'], {}), '(q_unlogged)\n', (8375, 8387), True, 'import numpy as np\n'), ((707, 733), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (723, 733), False, 'import os\n')] |
from enum import Enum
import numpy as np
class TradeDirection(Enum):
LONG = 1
#SHORT = -1
class Trade():
def __init__(self, entry_price : float, quantity : float, direction : TradeDirection, take_profit_pct : float,
commission_pct : float = 0.001, stop_loss_pct : float = None, leverage : int = 10) -> None:
self._entry_price = entry_price
self._direction = direction
self._take_profit_pct = abs(take_profit_pct)
self._stop_loss_pct = -abs(stop_loss_pct) if stop_loss_pct is not None else -100
self._leverage = int(np.clip(leverage, 1, 100))
self._quantity = quantity
self._commission_pct = commission_pct
self.__calc_price_from_pct = lambda pct: self._entry_price * (1 + ((pct / 100) * self._direction.value))
self._take_profit_price = self.__calc_price_from_pct(self._take_profit_pct)
self._stop_loss_price = self.__calc_price_from_pct(self._stop_loss_pct)
self._liq_price = self._liquidation_price()
self.profit = 0
if direction != TradeDirection.LONG:
raise NotImplementedError
def has_closed(self, high, low) -> bool:
c = self._leverage * self._quantity * self._direction.value / 100
is_closed = False
if high >= self._take_profit_price:
self.profit = self._take_profit_pct * c
self.profit *= 1 - self._commission_pct
is_closed = True
elif low <= self._stop_loss_price:
self.profit = self._stop_loss_pct * c
self.profit *= 1 + self._commission_pct
is_closed = True
elif low <= self._liq_price:
self.profit = -self._quantity
is_closed = True
else:
is_closed = False
return is_closed
def _liquidation_price(self) -> float:
return self.__calc_price_from_pct(-100)
@property
def profit_pct(self):
return self.profit / self._quantity
import unittest
class TestTrade(unittest.TestCase):
def __init__(self, methodName: str) -> None:
super().__init__(methodName=methodName)
self.trade = Trade(1000, 1000, TradeDirection.LONG, commission_pct=0.001, take_profit_pct=1, stop_loss_pct=-5)
def test_calc_take_profit(self):
self.assertEqual(self.trade._take_profit_price, 1010, "Take profit price should be 1010")
def test_calc_stop_loss(self):
self.assertEqual(self.trade._stop_loss_price, 950, "Stop loss price should be 950")
def test_continuing(self):
self.assertEqual(self.trade.has_closed(1000, 1000), False)
def test_close_profit(self):
close = self.trade.has_closed(2000, 1000)
self.assertEqual(close, True)
self.assertEqual(self.trade.profit, 99.9)
self.assertEqual(self.trade.profit_pct, 0.0999)
def test_close_loss(self):
close = self.trade.has_closed(1000, 500)
self.assertEqual(close, True)
self.assertEqual(self.trade.profit, -500.49999999999994)
self.assertEqual(self.trade.profit_pct, -0.5005)
# def test_close_liquidation(self):
# self.trade._stop_loss_price = 0
# close = self.trade.has_closed(1000, 100)
# self.assertEqual(close, True)
# self.assertEqual(self.trade.profit, -1000)
# self.assertEqual(self.trade.profit_pct, -1)
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"numpy.clip"
] | [((3405, 3420), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3418, 3420), False, 'import unittest\n'), ((586, 611), 'numpy.clip', 'np.clip', (['leverage', '(1)', '(100)'], {}), '(leverage, 1, 100)\n', (593, 611), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*-
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rc('font',family='Times New Roman')
plt.rc('font',size=9.5)
import random
import os
cwd = r'..\large_scale_synchronization_r4'
import csv
def main(nb_DC,nb_warehouse,nb_pick_up_station,square_length):
with open(os.path.join(cwd,'node.csv'), 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['node_name', 'node_id','x_coord', 'y_coord','FT'])
node_id=1
print('distribution center')
count = 0
x_dc = np.zeros(nb_DC)
y_dc = np.zeros(nb_DC)
while count < nb_DC:
x_dc[count] = np.random.randint(-square_length*0.9,square_length*0.9)
y_dc[count] = np.random.randint(-square_length*0.9,square_length*0.9)
print(x_dc[count], y_dc[count])
line=['DC'+str(count+1),node_id,x_dc[count],y_dc[count],'distribution_center' ]
writer.writerow(line)
node_id+=1
count += 1
print('random generate '+str(count)+' distribution centers...')
transshipment_cost=np.zeros([nb_DC*nb_rec,nb_DC*nb_del])
for k in range(nb_rec):
for l in range(nb_del):
for i in range(nb_DC):
for j in range(nb_DC):
# mahanttan distance
transshipment_cost[k*nb_DC+i][l*nb_DC+j]=abs(y_dc[i]-y_dc[j])+abs(x_dc[i]-x_dc[j])
# convert array into dataframe
DF = pd.DataFrame(transshipment_cost)
# save the dataframe as a csv file
DF.to_csv(os.path.join(cwd,"transshipment_time.csv"),index=False,header=False)
print('warehouse')
x_w = np.zeros(nb_warehouse)
y_w = np.zeros(nb_warehouse)
count = 0
while count < nb_warehouse:
x_w[count] = np.random.randint(-square_length,square_length)
y_w[count] = np.random.randint(-square_length,square_length)
print(x_w[count], y_w[count])
line=['WH'+str(count+1),node_id,x_w[count],y_w[count],'warehouse']
writer.writerow(line)
node_id+=1
count += 1
print('random generate '+str(count)+' warehouses...')
travel_time_1=np.zeros([nb_DC*nb_rec,nb_DC*nb_rec])
for k in range(nb_rec):
for i in range(nb_warehouse):
for j in range(nb_DC):
# mahanttan distance
travel_time_1[i][k*nb_DC+j]=(abs(y_w[i]-y_dc[j])+abs(x_w[i]-x_dc[j]))/2
# convert array into dataframe
DF = pd.DataFrame(travel_time_1)
# save the dataframe as a csv file
DF.to_csv(os.path.join(cwd,"travel_time_1.csv"),index=False,header=False)
print('pick-up station')
x_s = np.zeros(nb_pick_up_station)
y_s = np.zeros(nb_pick_up_station)
count = 0
while count < nb_pick_up_station:
x_s[count] = np.random.randint(-square_length,square_length)
y_s[count] = np.random.randint(-square_length,square_length)
print(x_s[count], y_s[count])
line=['PS'+str(count+1),node_id,x_s[count],y_s[count],'pick-up_station']
writer.writerow(line)
node_id+=1
count += 1
print('random generate '+str(count)+' pick up stations...')
travel_time_2=np.zeros([nb_DC*nb_del,nb_DC*nb_del])
for k in range (nb_del):
for i in range(nb_pick_up_station):
for j in range(nb_DC):
travel_time_2[nb_DC*k+j][i]=(abs(y_s[i]-y_dc[j])+abs(x_s[i]-x_dc[j]))/2
# convert array into dataframe
DF = pd.DataFrame(travel_time_2)
# save the dataframe as a csv file
DF.to_csv(os.path.join(cwd,"travel_time_2.csv"),index=False,header=False)
plt.figure(figsize=(8,8.1),dpi=125)
plt.plot(x_dc,y_dc,'o',label='Distribution centers',markersize=8,c='k')
plt.plot(x_w,y_w,'D', label ='Warehouses',markersize=5,c='b')
plt.plot(x_s,y_s,'D', label='Pick-up stations',markersize=5,c='r')
plt.xlim((-square_length-3,square_length+3))
plt.ylim((-square_length-3,square_length+3))
my_x_ticks = np.arange(-square_length-3,square_length+3, 1)
my_y_ticks = np.arange(-square_length-3,square_length+3, 1)
plt.xticks(my_x_ticks)
plt.yticks(my_y_ticks)
plt.legend(loc='best')
plt.grid(True)
plt.title('Random Scatter')
plt.savefig(os.path.join(cwd,'imag.png'))
plt.show()
return travel_time_1,travel_time_2,transshipment_cost
if __name__=='__main__':
receive_wave=[11,16]
order_time=[6,9,10,14,17,6,9,11,14,17,7,10,12,15,8,16]
delivery_wave=[6,14]
pick_up_time=[8,9.5,16,18,7.5,10,17,19,8,9.5,17,19,20,21,17.5,13,8,9,11,15]
period_time=24
timewindow=24
global nb_rec
global nb_del
nb_rec=len(receive_wave)
nb_del=len(delivery_wave)
nb_DC=12
nb_warehouse=len(order_time)
nb_station=len(pick_up_time)
city_radius=6
fix_transport_cost=2 # dollar per hour
fix_inventory_cost=0.1 # dollar per hour
var_transport_rate=1 # dollar per hour
var_inventory_cost=0.1 # dollar per hour
data_flow = np.loadtxt(os.path.join(cwd,'t_flow_matrix.txt'))
print('first-stage timetable...')
timetable_1=np.zeros((2,nb_DC*nb_rec))
for i in range(2):
timetable_1[0][i*nb_DC:i*nb_DC+nb_DC]=receive_wave[i]
timetable_1[1,:]=-1
for w in range(nb_warehouse):
timetable_1[1,w]=np.random.randint(0,24)
timetable_1[1,w]=order_time[w]
timetable_o=timetable_1
# convert array into dataframe
DF = pd.DataFrame(timetable_1)
# save the dataframe as a csv file
DF.to_csv(os.path.join(cwd,"timetable_1.csv"),index=False,header=False)
print('second-stage timetable...')
timetable_2=np.zeros((2,nb_DC*nb_del))
for i in range(2):
timetable_2[1][i*nb_DC:i*nb_DC+nb_DC]=receive_wave[i]
timetable_2[0,:]=-1
for s in range(nb_station):
timetable_2[0,s]=np.random.randint(0,24)
timetable_2[0,s]=pick_up_time[s]
timetable_d=timetable_2
# convert array into dataframe
DF = pd.DataFrame(timetable_2)
# save the dataframe as a csv file
DF.to_csv(os.path.join(cwd,"timetable_2.csv"),index=False,header=False)
travel_time_o,travel_time_d,transshipment_time=main(nb_DC,nb_warehouse,nb_station,city_radius)
q,n= timetable_o.shape
q,m= timetable_d.shape
data_built_1=np.zeros([n,n])
data_built_2=np.zeros([m,m])
data_dis=np.zeros([n,m])
for i in range(n):
if timetable_o[1][i] !=-1:
for k in range(n):
tmp=np.mod(timetable_o[1][i]+travel_time_o[i][k],period_time)
nb_of_period=abs(np.floor((timetable_o[1][i]+travel_time_o[i][k])/period_time))
if timetable_o[0][k]<tmp:
data_built_1[i][k]=period_time-timetable_o[1][i]+timetable_o[0][k]+nb_of_period*period_time
if timetable_o[0][k]>=tmp:
data_built_1[i][k]=timetable_o[0][k]-timetable_o[1][i]+nb_of_period*period_time
elif timetable_o[1][i] ==-1:
for k in range(n):
data_built_1[i][k]=0
for j in range(m):
if timetable_d[0][j] !=-1:
for l in range(m):
tmp=np.mod(timetable_d[0][j]-travel_time_d[l][j],period_time)
nb_of_period=abs(np.floor((timetable_o[1][i]+travel_time_o[i][k])/period_time))
if timetable_d[1][l]>tmp:
data_built_2[l][j]=period_time-timetable_d[1][l]+timetable_d[0][j]
if timetable_d[1][l]<=tmp:
data_built_2[l][j]=timetable_d[0][j]-timetable_d[1][l]
elif timetable_d[0][j] ==-1:
for l in range(m):
data_built_2[l][j]=0
for i in range(n):
for k in range(n):
if (travel_time_o[i][k]+timewindow<data_built_1[i][k]):
if travel_time_o[i][k] !=0:
data_built_1[i][k]=100000000
for l in range(m):
for j in range(m):
if (travel_time_d[l][j]+timewindow<data_built_2[l][j]):
if travel_time_d[l][j] !=0:
data_built_2[l][j]=100000000
data_built_1_A=(data_built_1-travel_time_o)*fix_inventory_cost+travel_time_o*fix_transport_cost
data_built_2_A=(data_built_2-travel_time_d)*fix_inventory_cost+travel_time_d*fix_transport_cost
for k in range(n):
for l in range(m):
tmp=np.mod(timetable_o[0][k]+transshipment_time[k][l],period_time)
nb_of_period=abs(np.floor((timetable_o[1][i]+travel_time_o[i][k])/period_time))
if tmp<timetable_d[1][l]:
data_dis[k][l] = (timetable_d[1][l]-timetable_o[0][k])+nb_of_period*period_time
if tmp>=timetable_d[1][l]:
data_dis[k][l]=(period_time-timetable_o[0][k]+timetable_d[1][l])+nb_of_period*period_time
data_dis=(data_dis-transshipment_time)*var_inventory_cost+transshipment_time*var_transport_rate
built1_df=pd.DataFrame(data_built_1)
built2_df=pd.DataFrame(data_built_2)
flow_df=pd.DataFrame(data_flow)
transshipment_df=pd.DataFrame(data_dis)
built1_df.to_csv(os.path.join(cwd,'built1.csv'))
built2_df.to_csv(os.path.join(cwd,'built2.csv'))
transshipment_df.to_csv(os.path.join(cwd,'transshipment_cost.csv'))
flow_df.to_csv(os.path.join(cwd,'flow.csv'))
n,m=data_flow.shape
node_num_each = [n,n,m,m]
#Read Data
nodename_array=['building node1','location node1','location node2','building node2']
with open(os.path.join(cwd,'input_node.csv'), 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['node_name', 'node_id','x', 'y'])
for i in range(0,4):
for j in range(node_num_each[i]):
if(i<4):
nodeid=1000*(i+1)+j+1
locationx = 100 * i
locationy = 10 * j
line = [nodename_array[i],
nodeid,
locationx,
locationy]
writer.writerow(line)
with open(os.path.join(cwd,'input_link.csv'), 'w', newline='') as outfile:
writer = csv.writer(outfile)
writer.writerow(
['link_id','from_node_id', 'to_node_id', 'built_cost','trans_cost','link_type','acting_link_id'])
count=0
for i in range(node_num_each[0]):# building1 to location1
for j in range(node_num_each[1]):
count+=1
line1=[count,
1000+i+1,
2000+j+1,
data_built_1[i][j], # built cost
0, #trans cost
2,#building to location
count]
writer.writerow(line1)
for i in range(node_num_each[2]):# location 2 to building 2
for j in range(node_num_each[3]):
count+=1
line2=[count,
3000+i+1,
4000+j+1,
data_built_2[i][j], # built cost
0,# trans cost
3,#location to building
count]
writer.writerow(line2)
# print('node',node_num_each)
# print('x',i)
# print('y',j)
# print(data_built_2[i][j])
for i in range(node_num_each[1]): # location 1 to location 2 transportation
for j in range(node_num_each[2]):
#if(j!=i):
count += 1
line3 = [count,
2000 + i + 1,
3000 + j + 1,
0, # built
data_dis[i][j],# trans
1,#physical transportation link
count]
writer.writerow(line3)
with open(os.path.join(cwd,'input_agent.csv'), 'w', newline='') as outfile:
writer = csv.writer(outfile)
writer.writerow(
['agent_id','origin_node_id','destination_node_id','customized_cost_link_type',
'customized_cost_link_value','agent_type','set_of_allowed_link_types'])
count = 0
# transportation agent
for i in range(node_num_each[0]):
for j in range(node_num_each[3]):
count += 1
line1 = [count,
1000 + i + 1,
4000 + j + 1,
1,
data_flow[i][j],#customized_cost:the flow(i,j)
1,
'1;2;3;4']
writer.writerow(line1)
#print(data_flow[i][j])
| [
"matplotlib.pyplot.title",
"numpy.floor",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"numpy.arange",
"os.path.join",
"pandas.DataFrame",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"csv.writer",
"matplotlib.pyplot.ylim",
... | [((106, 146), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""Times New Roman"""'}), "('font', family='Times New Roman')\n", (112, 146), True, 'import matplotlib.pyplot as plt\n'), ((146, 170), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(9.5)'}), "('font', size=9.5)\n", (152, 170), True, 'import matplotlib.pyplot as plt\n'), ((3755, 3782), 'pandas.DataFrame', 'pd.DataFrame', (['travel_time_2'], {}), '(travel_time_2)\n', (3767, 3782), True, 'import pandas as pd\n'), ((3909, 3946), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8.1)', 'dpi': '(125)'}), '(figsize=(8, 8.1), dpi=125)\n', (3919, 3946), True, 'import matplotlib.pyplot as plt\n'), ((3949, 4025), 'matplotlib.pyplot.plot', 'plt.plot', (['x_dc', 'y_dc', '"""o"""'], {'label': '"""Distribution centers"""', 'markersize': '(8)', 'c': '"""k"""'}), "(x_dc, y_dc, 'o', label='Distribution centers', markersize=8, c='k')\n", (3957, 4025), True, 'import matplotlib.pyplot as plt\n'), ((4025, 4089), 'matplotlib.pyplot.plot', 'plt.plot', (['x_w', 'y_w', '"""D"""'], {'label': '"""Warehouses"""', 'markersize': '(5)', 'c': '"""b"""'}), "(x_w, y_w, 'D', label='Warehouses', markersize=5, c='b')\n", (4033, 4089), True, 'import matplotlib.pyplot as plt\n'), ((4091, 4161), 'matplotlib.pyplot.plot', 'plt.plot', (['x_s', 'y_s', '"""D"""'], {'label': '"""Pick-up stations"""', 'markersize': '(5)', 'c': '"""r"""'}), "(x_s, y_s, 'D', label='Pick-up stations', markersize=5, c='r')\n", (4099, 4161), True, 'import matplotlib.pyplot as plt\n'), ((4167, 4216), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-square_length - 3, square_length + 3)'], {}), '((-square_length - 3, square_length + 3))\n', (4175, 4216), True, 'import matplotlib.pyplot as plt\n'), ((4219, 4268), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-square_length - 3, square_length + 3)'], {}), '((-square_length - 3, square_length + 3))\n', (4227, 4268), True, 'import matplotlib.pyplot as plt\n'), ((4282, 4333), 'numpy.arange', 'np.arange', (['(-square_length - 3)', '(square_length + 3)', '(1)'], {}), '(-square_length - 3, square_length + 3, 1)\n', (4291, 4333), True, 'import numpy as np\n'), ((4346, 4397), 'numpy.arange', 'np.arange', (['(-square_length - 3)', '(square_length + 3)', '(1)'], {}), '(-square_length - 3, square_length + 3, 1)\n', (4355, 4397), True, 'import numpy as np\n'), ((4397, 4419), 'matplotlib.pyplot.xticks', 'plt.xticks', (['my_x_ticks'], {}), '(my_x_ticks)\n', (4407, 4419), True, 'import matplotlib.pyplot as plt\n'), ((4424, 4446), 'matplotlib.pyplot.yticks', 'plt.yticks', (['my_y_ticks'], {}), '(my_y_ticks)\n', (4434, 4446), True, 'import matplotlib.pyplot as plt\n'), ((4451, 4473), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (4461, 4473), True, 'import matplotlib.pyplot as plt\n'), ((4478, 4492), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4486, 4492), True, 'import matplotlib.pyplot as plt\n'), ((4497, 4524), 'matplotlib.pyplot.title', 'plt.title', (['"""Random Scatter"""'], {}), "('Random Scatter')\n", (4506, 4524), True, 'import matplotlib.pyplot as plt\n'), ((4575, 4585), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4583, 4585), True, 'import matplotlib.pyplot as plt\n'), ((5395, 5424), 'numpy.zeros', 'np.zeros', (['(2, nb_DC * nb_rec)'], {}), '((2, nb_DC * nb_rec))\n', (5403, 5424), True, 'import numpy as np\n'), ((5730, 5755), 'pandas.DataFrame', 'pd.DataFrame', (['timetable_1'], {}), '(timetable_1)\n', (5742, 5755), True, 'import pandas as pd\n'), ((5931, 5960), 'numpy.zeros', 'np.zeros', (['(2, nb_DC * nb_del)'], {}), '((2, nb_DC * nb_del))\n', (5939, 5960), True, 'import numpy as np\n'), ((6267, 6292), 'pandas.DataFrame', 'pd.DataFrame', (['timetable_2'], {}), '(timetable_2)\n', (6279, 6292), True, 'import pandas as pd\n'), ((6585, 6601), 'numpy.zeros', 'np.zeros', (['[n, n]'], {}), '([n, n])\n', (6593, 6601), True, 'import numpy as np\n'), ((6618, 6634), 'numpy.zeros', 'np.zeros', (['[m, m]'], {}), '([m, m])\n', (6626, 6634), True, 'import numpy as np\n'), ((6647, 6663), 'numpy.zeros', 'np.zeros', (['[n, m]'], {}), '([n, m])\n', (6655, 6663), True, 'import numpy as np\n'), ((9209, 9235), 'pandas.DataFrame', 'pd.DataFrame', (['data_built_1'], {}), '(data_built_1)\n', (9221, 9235), True, 'import pandas as pd\n'), ((9250, 9276), 'pandas.DataFrame', 'pd.DataFrame', (['data_built_2'], {}), '(data_built_2)\n', (9262, 9276), True, 'import pandas as pd\n'), ((9289, 9312), 'pandas.DataFrame', 'pd.DataFrame', (['data_flow'], {}), '(data_flow)\n', (9301, 9312), True, 'import pandas as pd\n'), ((9335, 9357), 'pandas.DataFrame', 'pd.DataFrame', (['data_dis'], {}), '(data_dis)\n', (9347, 9357), True, 'import pandas as pd\n'), ((403, 422), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (413, 422), False, 'import csv\n'), ((590, 605), 'numpy.zeros', 'np.zeros', (['nb_DC'], {}), '(nb_DC)\n', (598, 605), True, 'import numpy as np\n'), ((621, 636), 'numpy.zeros', 'np.zeros', (['nb_DC'], {}), '(nb_DC)\n', (629, 636), True, 'import numpy as np\n'), ((1146, 1188), 'numpy.zeros', 'np.zeros', (['[nb_DC * nb_rec, nb_DC * nb_del]'], {}), '([nb_DC * nb_rec, nb_DC * nb_del])\n', (1154, 1188), True, 'import numpy as np\n'), ((1546, 1578), 'pandas.DataFrame', 'pd.DataFrame', (['transshipment_cost'], {}), '(transshipment_cost)\n', (1558, 1578), True, 'import pandas as pd\n'), ((1763, 1785), 'numpy.zeros', 'np.zeros', (['nb_warehouse'], {}), '(nb_warehouse)\n', (1771, 1785), True, 'import numpy as np\n'), ((1800, 1822), 'numpy.zeros', 'np.zeros', (['nb_warehouse'], {}), '(nb_warehouse)\n', (1808, 1822), True, 'import numpy as np\n'), ((2324, 2366), 'numpy.zeros', 'np.zeros', (['[nb_DC * nb_rec, nb_DC * nb_rec]'], {}), '([nb_DC * nb_rec, nb_DC * nb_rec])\n', (2332, 2366), True, 'import numpy as np\n'), ((2662, 2689), 'pandas.DataFrame', 'pd.DataFrame', (['travel_time_1'], {}), '(travel_time_1)\n', (2674, 2689), True, 'import pandas as pd\n'), ((2880, 2908), 'numpy.zeros', 'np.zeros', (['nb_pick_up_station'], {}), '(nb_pick_up_station)\n', (2888, 2908), True, 'import numpy as np\n'), ((2923, 2951), 'numpy.zeros', 'np.zeros', (['nb_pick_up_station'], {}), '(nb_pick_up_station)\n', (2931, 2951), True, 'import numpy as np\n'), ((3459, 3501), 'numpy.zeros', 'np.zeros', (['[nb_DC * nb_del, nb_DC * nb_del]'], {}), '([nb_DC * nb_del, nb_DC * nb_del])\n', (3467, 3501), True, 'import numpy as np\n'), ((3840, 3878), 'os.path.join', 'os.path.join', (['cwd', '"""travel_time_2.csv"""'], {}), "(cwd, 'travel_time_2.csv')\n", (3852, 3878), False, 'import os\n'), ((4541, 4570), 'os.path.join', 'os.path.join', (['cwd', '"""imag.png"""'], {}), "(cwd, 'imag.png')\n", (4553, 4570), False, 'import os\n'), ((5302, 5340), 'os.path.join', 'os.path.join', (['cwd', '"""t_flow_matrix.txt"""'], {}), "(cwd, 't_flow_matrix.txt')\n", (5314, 5340), False, 'import os\n'), ((5594, 5618), 'numpy.random.randint', 'np.random.randint', (['(0)', '(24)'], {}), '(0, 24)\n', (5611, 5618), True, 'import numpy as np\n'), ((5813, 5849), 'os.path.join', 'os.path.join', (['cwd', '"""timetable_1.csv"""'], {}), "(cwd, 'timetable_1.csv')\n", (5825, 5849), False, 'import os\n'), ((6128, 6152), 'numpy.random.randint', 'np.random.randint', (['(0)', '(24)'], {}), '(0, 24)\n', (6145, 6152), True, 'import numpy as np\n'), ((6350, 6386), 'os.path.join', 'os.path.join', (['cwd', '"""timetable_2.csv"""'], {}), "(cwd, 'timetable_2.csv')\n", (6362, 6386), False, 'import os\n'), ((9379, 9410), 'os.path.join', 'os.path.join', (['cwd', '"""built1.csv"""'], {}), "(cwd, 'built1.csv')\n", (9391, 9410), False, 'import os\n'), ((9432, 9463), 'os.path.join', 'os.path.join', (['cwd', '"""built2.csv"""'], {}), "(cwd, 'built2.csv')\n", (9444, 9463), False, 'import os\n'), ((9492, 9535), 'os.path.join', 'os.path.join', (['cwd', '"""transshipment_cost.csv"""'], {}), "(cwd, 'transshipment_cost.csv')\n", (9504, 9535), False, 'import os\n'), ((9555, 9584), 'os.path.join', 'os.path.join', (['cwd', '"""flow.csv"""'], {}), "(cwd, 'flow.csv')\n", (9567, 9584), False, 'import os\n'), ((9845, 9864), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (9855, 9864), False, 'import csv\n'), ((10428, 10447), 'csv.writer', 'csv.writer', (['outfile'], {}), '(outfile)\n', (10438, 10447), False, 'import csv\n'), ((12178, 12197), 'csv.writer', 'csv.writer', (['outfile'], {}), '(outfile)\n', (12188, 12197), False, 'import csv\n'), ((327, 356), 'os.path.join', 'os.path.join', (['cwd', '"""node.csv"""'], {}), "(cwd, 'node.csv')\n", (339, 356), False, 'import os\n'), ((692, 752), 'numpy.random.randint', 'np.random.randint', (['(-square_length * 0.9)', '(square_length * 0.9)'], {}), '(-square_length * 0.9, square_length * 0.9)\n', (709, 752), True, 'import numpy as np\n'), ((774, 834), 'numpy.random.randint', 'np.random.randint', (['(-square_length * 0.9)', '(square_length * 0.9)'], {}), '(-square_length * 0.9, square_length * 0.9)\n', (791, 834), True, 'import numpy as np\n'), ((1644, 1687), 'os.path.join', 'os.path.join', (['cwd', '"""transshipment_time.csv"""'], {}), "(cwd, 'transshipment_time.csv')\n", (1656, 1687), False, 'import os\n'), ((1904, 1952), 'numpy.random.randint', 'np.random.randint', (['(-square_length)', 'square_length'], {}), '(-square_length, square_length)\n', (1921, 1952), True, 'import numpy as np\n'), ((1977, 2025), 'numpy.random.randint', 'np.random.randint', (['(-square_length)', 'square_length'], {}), '(-square_length, square_length)\n', (1994, 2025), True, 'import numpy as np\n'), ((2755, 2793), 'os.path.join', 'os.path.join', (['cwd', '"""travel_time_1.csv"""'], {}), "(cwd, 'travel_time_1.csv')\n", (2767, 2793), False, 'import os\n'), ((3039, 3087), 'numpy.random.randint', 'np.random.randint', (['(-square_length)', 'square_length'], {}), '(-square_length, square_length)\n', (3056, 3087), True, 'import numpy as np\n'), ((3112, 3160), 'numpy.random.randint', 'np.random.randint', (['(-square_length)', 'square_length'], {}), '(-square_length, square_length)\n', (3129, 3160), True, 'import numpy as np\n'), ((8656, 8721), 'numpy.mod', 'np.mod', (['(timetable_o[0][k] + transshipment_time[k][l])', 'period_time'], {}), '(timetable_o[0][k] + transshipment_time[k][l], period_time)\n', (8662, 8721), True, 'import numpy as np\n'), ((9763, 9798), 'os.path.join', 'os.path.join', (['cwd', '"""input_node.csv"""'], {}), "(cwd, 'input_node.csv')\n", (9775, 9798), False, 'import os\n'), ((10346, 10381), 'os.path.join', 'os.path.join', (['cwd', '"""input_link.csv"""'], {}), "(cwd, 'input_link.csv')\n", (10358, 10381), False, 'import os\n'), ((12095, 12131), 'os.path.join', 'os.path.join', (['cwd', '"""input_agent.csv"""'], {}), "(cwd, 'input_agent.csv')\n", (12107, 12131), False, 'import os\n'), ((6772, 6832), 'numpy.mod', 'np.mod', (['(timetable_o[1][i] + travel_time_o[i][k])', 'period_time'], {}), '(timetable_o[1][i] + travel_time_o[i][k], period_time)\n', (6778, 6832), True, 'import numpy as np\n'), ((7457, 7517), 'numpy.mod', 'np.mod', (['(timetable_d[0][j] - travel_time_d[l][j])', 'period_time'], {}), '(timetable_d[0][j] - travel_time_d[l][j], period_time)\n', (7463, 7517), True, 'import numpy as np\n'), ((8748, 8813), 'numpy.floor', 'np.floor', (['((timetable_o[1][i] + travel_time_o[i][k]) / period_time)'], {}), '((timetable_o[1][i] + travel_time_o[i][k]) / period_time)\n', (8756, 8813), True, 'import numpy as np\n'), ((6863, 6928), 'numpy.floor', 'np.floor', (['((timetable_o[1][i] + travel_time_o[i][k]) / period_time)'], {}), '((timetable_o[1][i] + travel_time_o[i][k]) / period_time)\n', (6871, 6928), True, 'import numpy as np\n'), ((7548, 7613), 'numpy.floor', 'np.floor', (['((timetable_o[1][i] + travel_time_o[i][k]) / period_time)'], {}), '((timetable_o[1][i] + travel_time_o[i][k]) / period_time)\n', (7556, 7613), True, 'import numpy as np\n')] |
from x2df.examples.__example__ import AbstractExample
from scipy import signal
from math import pi
from numpy import linspace
from pandas import DataFrame
class Example(AbstractExample):
def createDF(self):
f = 10
omega = 2 * pi * f
Ds = linspace(0.1, 2, 10)
df = DataFrame()
t = linspace(0, 10 * 1 / f, 500)
df["time/s"] = t
for D in Ds:
lti = signal.lti([omega ** 2], [1.0, 2 * D * omega, omega ** 2])
t, y = signal.step(lti, T=t)
df[f"D={D:.2f}"] = y
df.set_index("time/s")
return df
| [
"pandas.DataFrame",
"scipy.signal.step",
"numpy.linspace",
"scipy.signal.lti"
] | [((268, 288), 'numpy.linspace', 'linspace', (['(0.1)', '(2)', '(10)'], {}), '(0.1, 2, 10)\n', (276, 288), False, 'from numpy import linspace\n'), ((302, 313), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (311, 313), False, 'from pandas import DataFrame\n'), ((326, 354), 'numpy.linspace', 'linspace', (['(0)', '(10 * 1 / f)', '(500)'], {}), '(0, 10 * 1 / f, 500)\n', (334, 354), False, 'from numpy import linspace\n'), ((419, 477), 'scipy.signal.lti', 'signal.lti', (['[omega ** 2]', '[1.0, 2 * D * omega, omega ** 2]'], {}), '([omega ** 2], [1.0, 2 * D * omega, omega ** 2])\n', (429, 477), False, 'from scipy import signal\n'), ((497, 518), 'scipy.signal.step', 'signal.step', (['lti'], {'T': 't'}), '(lti, T=t)\n', (508, 518), False, 'from scipy import signal\n')] |
import pandas as pd
import numpy as np
import scipy.optimize
import numdifftools as nd
from pyswarm import pso
from matplotlib import pyplot
import pickle
import time
size = 7
train_time = 7
max_time = 8
state_map_dict = {0:'KY', 1:'OH', 2:'PA', 3:'VA', 4:'WV'}
county_map_dict = {0:'NELSON', 1:'AUGUSTA', 2:'ROCKBRIDGE', 3:'AMHERST', 4:'APPOMATTOX', 5:'BUCKINGHAM', 6:'NELSON'}
#county_map_dict = {0:'NELSON', 1:'AUGUSTA', 2:'AMHERST', 3:'BUCKINGHAM', 4:'NELSON'}
time_map_dict = {0:2010, 1:2011, 2:2012, 3:2013, 4:2014, 5:2015, 6:2016, 7:2017}
time_list = [2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017]
full2abbrev_dict = {'Kentucky':'KY', 'Ohio':'OH', 'Pennsylvania':'PA', 'Virginia':'VA', 'West Virginia':'WV'}
'''
df = pd.read_csv('MCM_NFLIS_Data.csv')
df = df[df['State']=='VA']
df = df.groupby(['COUNTY', 'YYYY'])['TotalDrugReportsCounty'].mean()
total_set = set(df.index.map(lambda x:x[0]))
for year in time_list:
d = df[df.index.map(lambda x:x[1])==year]
counties = d.index.map(lambda x:x[0])
total_set = total_set.intersection(set(counties))
print(total_set)
'''
I_df = pd.read_csv('MCM_NFLIS_Data.csv')
I_df = I_df[I_df['State']=='VA']
#I_df = I_df.groupby(['COUNTY', 'YYYY'])['TotalDrugReportsCounty'].mean()
I_df = I_df.groupby(['COUNTY', 'YYYY'])['TotalDrugReportsCounty'].mean()
I_dict = {}
for i in range(0, size):
for t in range(0, max_time):
I_dict[(i, t)] = I_df[county_map_dict[i], time_map_dict[t]]
population_df = pd.read_csv('ACS_10_5YR_DP02_with_ann.csv')
population_df = population_df.iloc[1:]
population_df['HC01_VC128'] = population_df['HC01_VC128'].apply(lambda x:int(x))
population_df['State'] = population_df['GEO.display-label'].apply(lambda x:full2abbrev_dict[x.split(', ')[1]])
population_df = population_df[population_df['State']=='VA']
population_df['COUNTY'] = population_df['GEO.display-label'].apply(lambda x:x.split(' County, ')[0].upper())
population_df = population_df.set_index('COUNTY')
population_df = population_df[['HC01_VC128']]
population_list = []
for i in range(0, size):
population_list.append(int(population_df.loc[county_map_dict[i]].values))
initial_state = I_df[I_df.index.map(lambda x:x[1])==2010]
initial_state.index = initial_state.index.map(lambda x:x[0])
'''
gamma = np.random.rand(size)
beta = np.random.rand()
A = np.random.rand(size, size)
'''
arg_sizes = [size*size, size]
total_size = sum(arg_sizes)
args = np.random.rand(total_size)
bounds = []
lb = []
ub = []
bias = 0
for i in range(0, arg_sizes[0]):
lb.append(-1)
ub.append(1)
bounds.append((-0.1, 0.1))
bias += arg_sizes[0]
for i in range(bias, bias+arg_sizes[1]):
lb.append(0)
ub.append(0.25)
bounds.append((0, 0.25))
bias += arg_sizes[1]
def get_gamma(args):
bias = arg_sizes[0]
return args[bias+0: bias+size]
get_A = lambda args, i, j: args[size*i+j]
I_results = {}
S_results = {}
summed_results = {}
def I(i, t, args):
if (i, t) in I_results:
return I_results[(i, t)]
if t == 0:
county_name = county_map_dict[i]
result = initial_state[county_name]
else:
result = I(i, t-1, args) + S(i, t-1, args) -S(i, t, args)
I_results[(i, t)] = result
return result
def S(i, t, args):
if (i, t) in S_results:
return S_results[(i, t)]
if t == 0:
result = fastN(i) - I(i, t, args)
else:
result = -summed(i, t-1, args)*S(i, t-1, args) + get_gamma(args)[i]*I(i, t-1, args) + S(i, t-1, args)
S_results[(i, t)] = result
return result
def summed(i, t, args):
if (i, t) in summed_results:
return summed_results[(i, t)]
result = 0
for j in range(0, size):
result += get_A(args, i, j)*I(j, t, args)/fastN(j)
summed_results[(i, t)] = result
return result
fastN = lambda i:population_list[i]
def N(i):
county_name = county_map_dict[i]
return population_df[county_name]
fastI_bar = lambda i, t:I_dict[(i, t)]
def I_bar(i, t):
return I_df[county_map_dict[i], time_map_dict[t]]
def dict_clear():
I_results.clear()
S_results.clear()
summed_results.clear()
def f(args):
result = 0
for i in range(0, size):
for t in range(0, train_time):
result += abs( (I(i, t, args)-fastI_bar(i, t)) / fastI_bar(i, t) )
result = result / (size*train_time)
dict_clear()
return result
def f_test(args):
result = 0
for i in range(0, size):
for t in range(train_time, max_time):
result += abs( (I(i, t, args)-fastI_bar(i, t)) / fastI_bar(i, t) )
result = result / (size*(max_time-train_time))
dict_clear()
return result
def inspect():
for i in range(0, size):
for t in range(0, max_time):
print('predict:'+str(I(i, t, args)))
print('real:'+str(fastI_bar(i, t)))
print('')
def plot(opt_args):
for i in range(0, size):
predict = []
real = []
for t in range(0, max_time):
predict.append(I(i, t, opt_args))
real.append(fastI_bar(i, t))
pyplot.plot(time_list, predict)
pyplot.plot(time_list, real)
pyplot.xlabel('Year')
pyplot.ylabel('TotalDrugReportsCounty')
pyplot.title(county_map_dict[i])
pyplot.legend(['predict', 'real'])
pyplot.show()
'''
while True:
start = time.time()
print(f(args))
args = np.random.rand(total_size)
print(time.time()-start)
'''
result = scipy.optimize.differential_evolution(f, bounds, recombination=1, disp=True, maxiter=100)
pickle.dump(result, open('result_county', 'wb'))
print('!') | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((1100, 1133), 'pandas.read_csv', 'pd.read_csv', (['"""MCM_NFLIS_Data.csv"""'], {}), "('MCM_NFLIS_Data.csv')\n", (1111, 1133), True, 'import pandas as pd\n'), ((1469, 1512), 'pandas.read_csv', 'pd.read_csv', (['"""ACS_10_5YR_DP02_with_ann.csv"""'], {}), "('ACS_10_5YR_DP02_with_ann.csv')\n", (1480, 1512), True, 'import pandas as pd\n'), ((2412, 2438), 'numpy.random.rand', 'np.random.rand', (['total_size'], {}), '(total_size)\n', (2426, 2438), True, 'import numpy as np\n'), ((5026, 5057), 'matplotlib.pyplot.plot', 'pyplot.plot', (['time_list', 'predict'], {}), '(time_list, predict)\n', (5037, 5057), False, 'from matplotlib import pyplot\n'), ((5066, 5094), 'matplotlib.pyplot.plot', 'pyplot.plot', (['time_list', 'real'], {}), '(time_list, real)\n', (5077, 5094), False, 'from matplotlib import pyplot\n'), ((5103, 5124), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Year"""'], {}), "('Year')\n", (5116, 5124), False, 'from matplotlib import pyplot\n'), ((5133, 5172), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""TotalDrugReportsCounty"""'], {}), "('TotalDrugReportsCounty')\n", (5146, 5172), False, 'from matplotlib import pyplot\n'), ((5181, 5213), 'matplotlib.pyplot.title', 'pyplot.title', (['county_map_dict[i]'], {}), '(county_map_dict[i])\n', (5193, 5213), False, 'from matplotlib import pyplot\n'), ((5222, 5256), 'matplotlib.pyplot.legend', 'pyplot.legend', (["['predict', 'real']"], {}), "(['predict', 'real'])\n", (5235, 5256), False, 'from matplotlib import pyplot\n'), ((5265, 5278), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (5276, 5278), False, 'from matplotlib import pyplot\n')] |
"""SIFT Detector-Descriptor implementation.
The detector was proposed in 'Distinctive Image Features from Scale-Invariant Keypoints' and is implemented by wrapping
over OpenCV's API.
References:
- https://www.cs.ubc.ca/~lowe/papers/ijcv04.pdf
- https://docs.opencv.org/3.4.2/d5/d3c/classcv_1_1xfeatures2d_1_1SIFT.html
Authors: <NAME>
"""
from typing import Tuple
import cv2 as cv
import numpy as np
import gtsfm.utils.features as feature_utils
import gtsfm.utils.images as image_utils
from gtsfm.common.image import Image
from gtsfm.common.keypoints import Keypoints
from gtsfm.frontend.detector_descriptor.detector_descriptor_base import (
DetectorDescriptorBase,
)
class SIFTDetectorDescriptor(DetectorDescriptorBase):
"""SIFT detector-descriptor using OpenCV's implementation."""
def detect_and_describe(self, image: Image) -> Tuple[Keypoints, np.ndarray]:
"""Perform feature detection as well as their description.
Refer to detect() in DetectorBase and describe() in DescriptorBase for details about the output format.
Args:
image: the input image.
Returns:
Detected keypoints, with length N <= max_keypoints.
Corr. descriptors, of shape (N, D) where D is the dimension of each descriptor.
"""
# conert to grayscale
gray_image = image_utils.rgb_to_gray_cv(image)
# Creating OpenCV object
opencv_obj = cv.SIFT_create()
# Run the opencv code
cv_keypoints, descriptors = opencv_obj.detectAndCompute(gray_image.value_array, None)
# convert to GTSFM's keypoints
keypoints = feature_utils.cast_to_gtsfm_keypoints(cv_keypoints)
# sort the features and descriptors by the score
# (need to sort here as we need the sorting order for descriptors)
sort_idx = np.argsort(-keypoints.responses)[: self.max_keypoints]
keypoints = Keypoints(
coordinates=keypoints.coordinates[sort_idx],
scales=keypoints.scales[sort_idx],
responses=keypoints.responses[sort_idx],
)
descriptors = descriptors[sort_idx]
return keypoints, descriptors
| [
"gtsfm.utils.features.cast_to_gtsfm_keypoints",
"numpy.argsort",
"cv2.SIFT_create",
"gtsfm.utils.images.rgb_to_gray_cv",
"gtsfm.common.keypoints.Keypoints"
] | [((1349, 1382), 'gtsfm.utils.images.rgb_to_gray_cv', 'image_utils.rgb_to_gray_cv', (['image'], {}), '(image)\n', (1375, 1382), True, 'import gtsfm.utils.images as image_utils\n'), ((1438, 1454), 'cv2.SIFT_create', 'cv.SIFT_create', ([], {}), '()\n', (1452, 1454), True, 'import cv2 as cv\n'), ((1640, 1691), 'gtsfm.utils.features.cast_to_gtsfm_keypoints', 'feature_utils.cast_to_gtsfm_keypoints', (['cv_keypoints'], {}), '(cv_keypoints)\n', (1677, 1691), True, 'import gtsfm.utils.features as feature_utils\n'), ((1920, 2055), 'gtsfm.common.keypoints.Keypoints', 'Keypoints', ([], {'coordinates': 'keypoints.coordinates[sort_idx]', 'scales': 'keypoints.scales[sort_idx]', 'responses': 'keypoints.responses[sort_idx]'}), '(coordinates=keypoints.coordinates[sort_idx], scales=keypoints.\n scales[sort_idx], responses=keypoints.responses[sort_idx])\n', (1929, 2055), False, 'from gtsfm.common.keypoints import Keypoints\n'), ((1844, 1876), 'numpy.argsort', 'np.argsort', (['(-keypoints.responses)'], {}), '(-keypoints.responses)\n', (1854, 1876), True, 'import numpy as np\n')] |
import numpy as np
import time
import subprocess
import json
HOST = "http://localhost:3000"
CLUSTER = "test"
PART = 'main-part'
USERS = np.array(['user1_1', 'user2_1', 'user3_1'])
STATES = np.array(['COMPLETED', 'CANCELLED', 'FAILED', 'TIMEOUT'])
STATES_P = np.array([0.47, 0.07, 0.24, 0.22])
TIME_START = 1646082000
TIME_END = 1648760400
NUM_JOBS = 1
def post_info(info):
p = subprocess.Popen('curl --request POST -H "Content-Type: application/json" "' + HOST + '/jobstat/job/info" --data "' + info + '"', shell=True)
p.wait()
def post_perf(perf):
p = subprocess.Popen('curl --request POST -H "Content-Type: application/json" "' + HOST + '/jobstat/job/performance" --data "' + perf + '"', shell=True)
p.wait()
def post_tags(tags):
p = subprocess.Popen('curl --request POST -H "Content-Type: application/json" "' + HOST + '/jobstat/job/tags" --data "' + tags + '"', shell=True)
p.wait()
def post_mon_data(job_id, mon_data):
p = subprocess.Popen('curl --request POST -H "Content-Type: application/json" "' + HOST + '/jobstat/job/digest?cluster=' + CLUSTER + '&job_id=' + str(job_id) + '&name=cpu_user" --data "{\"data\": ' + mon_data + '}"', shell=True)
p.wait()
p = subprocess.Popen('curl --request POST -H "Content-Type: application/json" "' + HOST + '/jobstat/job/digest?cluster=' + CLUSTER + '&job_id=' + str(job_id) + '&name=loadavg" --data "{\"data\": ' + mon_data + '}"', shell=True)
p.wait()
p = subprocess.Popen('curl --request POST -H "Content-Type: application/json" "' + HOST + '/jobstat/job/digest?cluster=' + CLUSTER + '&job_id=' + str(job_id) + '&name=gpu_load" --data "{\"data\": ' + mon_data + '}"', shell=True)
p.wait()
p = subprocess.Popen('curl --request POST -H "Content-Type: application/json" "' + HOST + '/jobstat/job/digest?cluster=' + CLUSTER + '&job_id=' + str(job_id) + '&name=ipc" --data "{\"data\": ' + mon_data + '}"', shell=True)
p.wait()
p = subprocess.Popen('curl --request POST -H "Content-Type: application/json" "' + HOST + '/jobstat/job/digest?cluster=' + CLUSTER + '&job_id=' + str(job_id) + '&name=ib_rcv_data_mpi" --data "{\"data\": ' + mon_data + '}"', shell=True)
p.wait()
p = subprocess.Popen('curl --request POST -H "Content-Type: application/json" "' + HOST + '/jobstat/job/digest?cluster=' + CLUSTER + '&job_id=' + str(job_id) + '&name=ib_xmit_data_mpi" --data "{\"data\": ' + mon_data + '}"', shell=True)
p.wait()
p = subprocess.Popen('curl --request POST -H "Content-Type: application/json" "' + HOST + '/jobstat/job/digest?cluster=' + CLUSTER + '&job_id=' + str(job_id) + '&name=ib_rcv_data_fs" --data "{\"data\": ' + mon_data + '}"', shell=True)
p.wait()
p = subprocess.Popen('curl --request POST -H "Content-Type: application/json" "' + HOST + '/jobstat/job/digest?cluster=' + CLUSTER + '&job_id=' + str(job_id) + '&name=ib_xmit_data_fs" --data "{\"data\": ' + mon_data + '}"', shell=True)
p.wait()
def generate_info(user, job_id, state, time_end, time_start, time_submit):
return '{ \
"account": "' + user + '", \
"command": "fake_command", \
"job_id": ' + str(job_id) + ', \
"cluster": "' + CLUSTER + '", \
"nodelist": "n48418", \
"num_cores": 14, \
"num_nodes": 1, \
"partition": "' + PART + '", \
"priority": 4294729060, \
"state": "' + state + '", \
"t_end": ' + str(time_end) + ', \
"t_start": ' + str(time_start) + ', \
"t_submit": ' + str(time_submit) + ', \
"task_id": 0, \
"timelimit": 259200, \
"workdir": "fake_workdir" }'
def generate_tags(job_id):
return '{ \
"job_id": ' + str(job_id) + ', \
"cluster": "' + CLUSTER + '", \
"tags": [ \
"rule_normal_serial", \
"rule_bad_locality", \
"rule_not_effective", \
"class_bad_locality", \
"class_single", \
"class_serial", \
"class_less_suspicious", \
"thr_low_l1_cache_miss", \
"thr_low_mem_store", \
"thr_low_mem_load", \
"thr_low_ib_mpi", \
"thr_low_gpu_load", \
"thr_low_cpu_iowait", \
"thr_low_cpu_nice", \
"thr_low_cpu_system", \
"thr_low_cpu_user", \
"thr_low_loadavg", \
"rule_mem_disbalance", \
"rule_one_active_process", \
"rule_wrong_partition_gpu", \
"thr_low_l2_cache_miss" \
], \
"task_id": 0 }'
def generate_perf(job_id):
return '{ \
"job_id": \'"' + str(job_id) + '"\', \
"cluster": "\'' + CLUSTER + '\'", \
"avg": { \
"cpu_iowait": 0.00252202132591562, \
"cpu_nice": 0.0, \
"cpu_system": 0.452331942512755, \
"cpu_user": 3.34764487714418, \
"fixed_counter1": 164619049.992754, \
"fixed_counter2": 65706.6251253482, \
"fixed_counter3": 98664632.1906222, \
"gpu_load": 0.0, \
"gpu_mem_load": 0.0, \
"gpu_mem_usage": 0.0, \
"ib_rcv_data_fs": 5640.88743625407, \
"ib_rcv_data_mpi": 0.935808993973111, \
"ib_rcv_pckts_fs": 120.896161335188, \
"ib_rcv_pckts_mpi": 0.00571627260083449, \
"ib_xmit_data_fs": 6627867.5321187, \
"ib_xmit_data_mpi": 0.3350672229949, \
"ib_xmit_pckts_fs": 121.013463143255, \
"ib_xmit_pckts_mpi": 0.0011682892906815, \
"ipc": 1.64777811846452, \
"loadavg": 0.99086694483078, \
"memory_free": 59182738.2905331, \
"perf_counter1": 1054279.18002321, \
"perf_counter2": 1079096.7151532, \
"perf_counter3": 27417870.2084216, \
"perf_counter4": 12044159.2276369 \
}, \
"max": { \
"cpu_idle": 100.0, \
"cpu_iowait": 45.0, \
"cpu_nice": 0.0, \
"cpu_system": 24.0, \
"cpu_user": 100.0, \
"fixed_counter1": 6716118117.0, \
"fixed_counter2": 13592296.0, \
"fixed_counter3": 2264389423.63, \
"gpu_load": 0.0, \
"gpu_mem_load": 0.0, \
"gpu_mem_usage": 0.0, \
"ib_rcv_data_fs": 218274.93, \
"ib_rcv_data_mpi": 1342.0, \
"ib_rcv_pckts_fs": 5149.27, \
"ib_rcv_pckts_mpi": 10.17, \
"ib_xmit_data_fs": 224559432.0, \
"ib_xmit_data_mpi": 326.4, \
"ib_xmit_pckts_fs": 5151.47, \
"ib_xmit_pckts_mpi": 1.13, \
"ipc": 3.20930056165718, \
"loadavg": 1.62, \
"memory_free": 61121384.0, \
"perf_counter1": 188266425.0, \
"perf_counter2": 111703868.33, \
"perf_counter3": 1676351094.33, \
"perf_counter4": 535411580.0 \
}, \
"min": { \
"cpu_idle": 0.0, \
"cpu_iowait": 0.0, \
"cpu_nice": 0.0, \
"cpu_system": 0.0, \
"cpu_user": 0.0, \
"fixed_counter1": 2233045.93, \
"fixed_counter2": 0.0, \
"fixed_counter3": 10103163.27, \
"gpu_load": 0.0, \
"gpu_mem_load": 0.0, \
"gpu_mem_usage": 0.0, \
"ib_rcv_data_fs": 545.07, \
"ib_rcv_data_mpi": 0.0, \
"ib_rcv_pckts_fs": 3.2, \
"ib_rcv_pckts_mpi": 0.0, \
"ib_xmit_data_fs": 460.4, \
"ib_xmit_data_mpi": 0.0, \
"ib_xmit_pckts_fs": 1.17, \
"ib_xmit_pckts_mpi": 0.0, \
"ipc": 0.145227933620309, \
"loadavg": 0.0, \
"memory_free": 56340236.0, \
"perf_counter1": 60320.13, \
"perf_counter2": 12989.17, \
"perf_counter3": 561744.53, \
"perf_counter4": 289109.93 \
} }'
def generate_mon_data():
return '[ \
{ \
"avg": 3.13, \
"avg_max": 100.0, \
"avg_min": 0.0, \
"max": 100.0, \
"min": 0.0, \
"time": 1542015480 \
}, \
{ \
"avg": 1.41, \
"avg_max": 45.4, \
"avg_min": 0.0, \
"max": 100.0, \
"min": 0.0, \
"time": 1542023520 \
}, \
{ \
"avg": 3.09, \
"avg_max": 100.0, \
"avg_min": 0.0, \
"max": 100.0, \
"min": 0.0, \
"time": 1542031560 \
}, \
{ \
"avg": 3.23, \
"avg_max": 100.0, \
"avg_min": 0.0, \
"max": 100.0, \
"min": 0.0, \
"time": 1542039600 \
}, \
{ \
"avg": 3.10, \
"avg_max": 100.0, \
"avg_min": 0.0, \
"max": 100.0, \
"min": 0.0, \
"time": 1542047640 \
}, \
{ \
"avg": 3.36, \
"avg_max": 100.0, \
"avg_min": 0.0, \
"max": 100.0, \
"min": 0.0, \
"time": 1542055680 \
}, \
{ \
"avg": 3.49, \
"avg_max": 100.0, \
"avg_min": 0.0, \
"max": 100.0, \
"min": 0.0, \
"time": 1542063720 \
}, \
{ \
"avg": 3.49, \
"avg_max": 100.0, \
"avg_min": 0.0, \
"max": 100.0, \
"min": 0.0, \
"time": 1542071760 \
}, \
{ \
"avg": 3.51, \
"avg_max": 100.0, \
"avg_min": 0.0, \
"max": 100.0, \
"min": 0.0, \
"time": 1542079800 \
}, \
{ \
"avg": 3.45, \
"avg_max": 100.0, \
"avg_min": 0.0, \
"max": 100.0, \
"min": 0.0, \
"time": 1542087840 \
}, \
{ \
"avg": 3.43, \
"avg_max": 100.0, \
"avg_min": 0.0, \
"max": 100.0, \
"min": 0.0, \
"time": 1542095880 \
}, \
{ \
"avg": 3.57, \
"avg_max": 100.0, \
"avg_min": 0.0, \
"max": 100.0, \
"min": 0.0, \
"time": 1542103920 \
} ]'
def get_last_job_id():
p = subprocess.Popen('psql -h localhost -U octo -t new_octoshell -c \' select max(drms_job_id) from jobstat_jobs;\'', stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
out = p.communicate()[0]
print(out)
return int(out.decode())
def generate_job(job_num, job_id, user):
rng = np.random.default_rng()
state = str(rng.choice(STATES, 1, p=STATES_P)[0])
time_segment = (TIME_END - TIME_START)//NUM_JOBS
time_start = TIME_START + time_segment*job_num + int(rng.normal(0.0, 43200, 1))
time_end = time_start + int(rng.normal(100, 50, 1))
time_submit = time_start - 600 - int(rng.normal(0.0, 600, 1))
info = json.loads(generate_info(user, job_id, state, time_end, time_start, time_submit))
# perf = generate_perf(job_id)
# tags = generate_tags(job_id)
# mon_data = generate_mon_data()
print(info)
post_info(json.dumps(info))
# post_perf(perf)
# post_tags(tags)
# post_mon_data(job_id, mon_data)
if __name__ == '__main__':
user_rng = np.random.default_rng()
last_id = get_last_job_id()
for job_num in range(NUM_JOBS):
job_id = last_id + job_num
user = str(user_rng.choice(USERS, 1, replace=False)[0])
generate_job(job_num, job_id, user)
| [
"numpy.random.default_rng",
"subprocess.Popen",
"numpy.array",
"json.dumps"
] | [((138, 181), 'numpy.array', 'np.array', (["['user1_1', 'user2_1', 'user3_1']"], {}), "(['user1_1', 'user2_1', 'user3_1'])\n", (146, 181), True, 'import numpy as np\n'), ((191, 248), 'numpy.array', 'np.array', (["['COMPLETED', 'CANCELLED', 'FAILED', 'TIMEOUT']"], {}), "(['COMPLETED', 'CANCELLED', 'FAILED', 'TIMEOUT'])\n", (199, 248), True, 'import numpy as np\n'), ((260, 294), 'numpy.array', 'np.array', (['[0.47, 0.07, 0.24, 0.22]'], {}), '([0.47, 0.07, 0.24, 0.22])\n', (268, 294), True, 'import numpy as np\n'), ((386, 536), 'subprocess.Popen', 'subprocess.Popen', (['(\'curl --request POST -H "Content-Type: application/json" "\' + HOST +\n \'/jobstat/job/info" --data "\' + info + \'"\')'], {'shell': '(True)'}), '(\n \'curl --request POST -H "Content-Type: application/json" "\' + HOST +\n \'/jobstat/job/info" --data "\' + info + \'"\', shell=True)\n', (402, 536), False, 'import subprocess\n'), ((571, 728), 'subprocess.Popen', 'subprocess.Popen', (['(\'curl --request POST -H "Content-Type: application/json" "\' + HOST +\n \'/jobstat/job/performance" --data "\' + perf + \'"\')'], {'shell': '(True)'}), '(\n \'curl --request POST -H "Content-Type: application/json" "\' + HOST +\n \'/jobstat/job/performance" --data "\' + perf + \'"\', shell=True)\n', (587, 728), False, 'import subprocess\n'), ((763, 913), 'subprocess.Popen', 'subprocess.Popen', (['(\'curl --request POST -H "Content-Type: application/json" "\' + HOST +\n \'/jobstat/job/tags" --data "\' + tags + \'"\')'], {'shell': '(True)'}), '(\n \'curl --request POST -H "Content-Type: application/json" "\' + HOST +\n \'/jobstat/job/tags" --data "\' + tags + \'"\', shell=True)\n', (779, 913), False, 'import subprocess\n'), ((9403, 9583), 'subprocess.Popen', 'subprocess.Popen', (['"""psql -h localhost -U octo -t new_octoshell -c \' select max(drms_job_id) from jobstat_jobs;\'"""'], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'shell': '(True)'}), '(\n "psql -h localhost -U octo -t new_octoshell -c \' select max(drms_job_id) from jobstat_jobs;\'"\n , stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)\n', (9419, 9583), False, 'import subprocess\n'), ((9703, 9726), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (9724, 9726), True, 'import numpy as np\n'), ((10418, 10441), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (10439, 10441), True, 'import numpy as np\n'), ((10275, 10291), 'json.dumps', 'json.dumps', (['info'], {}), '(info)\n', (10285, 10291), False, 'import json\n')] |
"""Useful functions."""
import os
import numpy as np
import pandas as pd
import pkgutil
# TODO: this is the only dependency that requires a compiler. It does not ship a
# pre-compiled wheel. Perhaps we can write a python/numpy implementation?
from ushuffle import shuffle
def make_directory(dirpath, verbose=1):
"""make a directory.
Parameters
----------
dirpath : string
String of path to directory.
Returns
-------
Examples
--------
>>> dirpath = './results'
>>> make_directory(dirpath)
"""
if not os.path.isdir(dirpath):
os.mkdir(dirpath)
print("making directory: " + dirpath)
def import_model(model_name):
"""Import a model from model_zoo.
Parameters
----------
model_name : string
Name of model in model_zoo to import.
Returns
-------
imported model
Examples
--------
>>> model_name = 'deepbind'
>>> model = import_model(model_name)
"""
# Import model from the zoo as singular animal
# Equivalent of `from model_zoo import model_name as animal` where model_name is evaluated at runtime
animal = __import__(
"cipher.model_zoo." + model_name, globals(), locals(), [model_name], 0
)
return animal
def list_model_zoo():
"""List models in cipher.model_zoo.
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> list_model_zoo()
>>>
"""
# get names of files in model_zoo and filter out files that begin with "__"
loader = pkgutil.get_loader(model_zoo)
names = []
for name in loader.contents():
name = name.split('.')[0]
if name[:2] != '__':
names.append(name)
# print to std out
print('Model list: ')
for name in names:
print(" %s"%(name))
def convert_one_hot(sequences, alphabet="ACGT") -> np.ndarray:
"""Convert flat array of sequences to one-hot representation.
**Important**: all letters in `sequences` *must* be contained in `alphabet`, and
all sequences must have the same length.
Parameters
----------
sequences : numpy.ndarray of strings
The array of strings. Should be one-dimensional.
alphabet : str
The alphabet of the sequences.
Returns
-------
Numpy array of sequences in one-hot representation. The shape of this array is
`(len(sequences), len(sequences[0]), len(alphabet))`.
Examples
--------
>>> one_hot(["TGCA"], alphabet="ACGT")
array([[[0., 0., 0., 1.],
[0., 0., 1., 0.],
[0., 1., 0., 0.],
[1., 0., 0., 0.]]])
"""
sequences = np.asanyarray(sequences)
if sequences.ndim != 1:
raise ValueError("array of sequences must be one-dimensional.")
n_sequences = sequences.shape[0]
sequence_len = len(sequences[0])
# Unpack strings into 2D array, where each point has one character.
s = np.zeros((n_sequences, sequence_len), dtype="U1")
for i in range(n_sequences):
s[i] = list(sequences[i])
# Make an integer array from the string array.
pre_onehot = np.zeros(s.shape, dtype=np.uint8)
for i, letter in enumerate(alphabet):
# do nothing on 0 because array is initialized with zeros.
if i:
pre_onehot[s == letter] = i
# create one-hot representation
n_classes = len(alphabet)
return np.eye(n_classes)[pre_onehot]
def convert_onehot_to_sequence(one_hot, alphabet="ACGT"):
"""Convert DNA/RNA sequences from one-hot representation to
string representation.
Parameters
----------
one_hot : <numpy.ndarray>
one_hot encoded sequence with shape (N, L, A)
alphabet : <str>
DNA = 'ACGT'
Returns
-------
sequences : <numpy.ndarray>
A numpy vector of sequences in string representation.
Example
-------
>>> one_hot = np.array(
[[[1., 0., 0., 0.],
[1., 0., 0., 0.],
[0., 0., 1., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.]],
[[0., 0., 0., 1.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 1., 0., 0.],
[1., 0., 0., 0.]]]
)
>>> sequences = convert_onehot_to_sequence(one_hot)
>>> sequences
array([['A', 'A', 'G', 'A', 'C'],
['T', 'C', 'G', 'C', 'A']], dtype=object)
"""
assert alphabet in ["ACGT", "ACGU"], "Enter a valid alphabet"
# convert alphabet to dictionary
alphabet_dict = {i: a for i, a in enumerate(list(alphabet))}
# get indices of one-hot
seq_indices = np.argmax(one_hot, axis=2) # (N, L)
# convert index to sequence
sequences = []
for seq_index in seq_indices:
seq = pd.Series(seq_index).map(alphabet_dict)
sequences.append(seq)
return np.asarray(sequences)
def shuffle_onehot(one_hot, k=1):
"""Shuffle one-hot represented sequences while preserving k-let frequencies.
Parameters
----------
one_hot : np.ndarray
One_hot encoded sequence with shape (N, L, A).
k : int, optional
k of k-let frequencies to preserve. For example, with k = 2, dinucleotide
shuffle is performed. The default is k = 1 (i.e., single-nucleotide shuffle).
Returns
-------
np.ndarray
One-hot represented shuffled sequences, of the same shape as one_hot.
Examples
--------
>>> seqs = ["ACGT", "GTCA"]
>>> one_hot = convert_one_hot(seqs)
>>> one_hot
array([[[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]],
[[0., 0., 1., 0.],
[0., 0., 0., 1.],
[0., 1., 0., 0.],
[1., 0., 0., 0.]]])
>>> one_hot_shuffled = shuffle_onehot(one_hot)
>>> one_hot_shuffled
array([[[0., 0., 0., 1.],
[0., 1., 0., 0.],
[1., 0., 0., 0.],
[0., 0., 1., 0.]],
[[1., 0., 0., 0.],
[0., 0., 1., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.]]])
"""
if k == 1:
L = one_hot.shape[1] # one_hot has shape (N, L, A)
rng = np.random.default_rng()
one_hot_shuffled = []
for x in one_hot:
perm = rng.permutation(L)
x_shuffled = x[perm, :]
one_hot_shuffled.append(x_shuffled)
one_hot_shuffled = np.array(one_hot_shuffled)
return one_hot_shuffled
elif k >= 2:
# convert one_hot to pandas Series of letters, then string letters together
# (for each Series)
seqs = [seq.str.cat() for seq in convert_onehot_to_sequence(one_hot)]
seqs_shuffled = []
for i, seq in enumerate(seqs):
seq = seq.upper()
# dinucleotide shuffle
seq_shuffled = shuffle(bytes(seq, "utf-8"), k).decode("utf-8")
seqs_shuffled.append(seq_shuffled)
one_hot_shuffled = convert_one_hot(seqs_shuffled)
return one_hot_shuffled
else:
raise ValueError("k must be an integer greater than or equal to 1")
def shuffle_sequences(sequences, k=1):
"""Shuffle one-hot represented sequences while preserving k-let frequencies.
Parameters
----------
one_hot : np.ndarray
One_hot encoded sequence with shape (N, L, A)
k : int, optional
k of k-let frequencies to preserve. For example, with k = 2, dinucleotide
shuffle is performed. The default is k = 1 (i.e., single-nucleotide shuffle).
Returns
-------
np.ndarray
One-hot represented shuffled sequences, of the same shape as one_hot.
Examples
--------
>>> seqs = ["AGCGTTCAA", "TACGAATCG"]
>>> seqs_shuffled = shuffle_sequences(seqs, k=2) # dinucleotide shuffle
>>> seqs_shuffled
['AAGTTCGCA', 'TCGATAACG']
"""
sequences_shuffled = []
for i, seq in enumerate(sequences):
seq = seq.upper()
seq_shuffled = shuffle(bytes(seq, "utf-8"), k).decode("utf-8")
sequences_shuffled.append(seq_shuffled)
return sequences_shuffled
| [
"os.mkdir",
"pkgutil.get_loader",
"numpy.argmax",
"os.path.isdir",
"numpy.asarray",
"numpy.asanyarray",
"numpy.zeros",
"numpy.random.default_rng",
"numpy.array",
"pandas.Series",
"numpy.eye"
] | [((1576, 1605), 'pkgutil.get_loader', 'pkgutil.get_loader', (['model_zoo'], {}), '(model_zoo)\n', (1594, 1605), False, 'import pkgutil\n'), ((2693, 2717), 'numpy.asanyarray', 'np.asanyarray', (['sequences'], {}), '(sequences)\n', (2706, 2717), True, 'import numpy as np\n'), ((2973, 3022), 'numpy.zeros', 'np.zeros', (['(n_sequences, sequence_len)'], {'dtype': '"""U1"""'}), "((n_sequences, sequence_len), dtype='U1')\n", (2981, 3022), True, 'import numpy as np\n'), ((3159, 3192), 'numpy.zeros', 'np.zeros', (['s.shape'], {'dtype': 'np.uint8'}), '(s.shape, dtype=np.uint8)\n', (3167, 3192), True, 'import numpy as np\n'), ((4650, 4676), 'numpy.argmax', 'np.argmax', (['one_hot'], {'axis': '(2)'}), '(one_hot, axis=2)\n', (4659, 4676), True, 'import numpy as np\n'), ((4868, 4889), 'numpy.asarray', 'np.asarray', (['sequences'], {}), '(sequences)\n', (4878, 4889), True, 'import numpy as np\n'), ((564, 586), 'os.path.isdir', 'os.path.isdir', (['dirpath'], {}), '(dirpath)\n', (577, 586), False, 'import os\n'), ((596, 613), 'os.mkdir', 'os.mkdir', (['dirpath'], {}), '(dirpath)\n', (604, 613), False, 'import os\n'), ((3434, 3451), 'numpy.eye', 'np.eye', (['n_classes'], {}), '(n_classes)\n', (3440, 3451), True, 'import numpy as np\n'), ((6203, 6226), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (6224, 6226), True, 'import numpy as np\n'), ((6434, 6460), 'numpy.array', 'np.array', (['one_hot_shuffled'], {}), '(one_hot_shuffled)\n', (6442, 6460), True, 'import numpy as np\n'), ((4787, 4807), 'pandas.Series', 'pd.Series', (['seq_index'], {}), '(seq_index)\n', (4796, 4807), True, 'import pandas as pd\n')] |
"""
cv2.erode() method :
cv2.erode() method is used to perform erosion on the image. The
basic idea of erosion is just like soil erosion only, it erodes away the boundaries
of foreground object (Always try to keep foreground in white). It is normally
performed on binary images. It needs two inputs, one is our original image,
second one is called structuring element or kernel which decides the nature of
operation. A PIXEL IN THE ORIGINAL IMAGE (EITHER 1 OR 0) WILL BE considered 1 only if
all the pixels under the kernel is 1, otherwise it is eroded (made to zero).
"""
# Python program to explain cv2.erode() method
# importing cv2
import cv2
# importing numpy
import numpy as np
# path
path = r'../images/3.jpeg'
# Reading an image in default mode
image = cv2.imread(path)
# Window name in which image is displayed
window_name = 'Image'
# Creating kernel
kernel = np.ones((6, 6), np.uint8)
# Using cv2.erode() method
image1 = cv2.erode(image, kernel, cv2.BORDER_WRAP)
# stacking image side-by-side
res = np.hstack((image, image1))
# Displaying the image
cv2.imshow(window_name, res)
# de-allocate any associated memory usage
if cv2.waitKey(0) & 0xff == 27:
cv2.destroyAllWindows()
| [
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.ones",
"numpy.hstack",
"cv2.imread",
"cv2.erode",
"cv2.imshow"
] | [((799, 815), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (809, 815), False, 'import cv2\n'), ((909, 934), 'numpy.ones', 'np.ones', (['(6, 6)', 'np.uint8'], {}), '((6, 6), np.uint8)\n', (916, 934), True, 'import numpy as np\n'), ((972, 1013), 'cv2.erode', 'cv2.erode', (['image', 'kernel', 'cv2.BORDER_WRAP'], {}), '(image, kernel, cv2.BORDER_WRAP)\n', (981, 1013), False, 'import cv2\n'), ((1051, 1077), 'numpy.hstack', 'np.hstack', (['(image, image1)'], {}), '((image, image1))\n', (1060, 1077), True, 'import numpy as np\n'), ((1102, 1130), 'cv2.imshow', 'cv2.imshow', (['window_name', 'res'], {}), '(window_name, res)\n', (1112, 1130), False, 'import cv2\n'), ((1210, 1233), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1231, 1233), False, 'import cv2\n'), ((1177, 1191), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1188, 1191), False, 'import cv2\n')] |
from dxtorch.dxtensor import Tensor
from .module import Module
import numpy as np
class Flatten(Module):
def __init__(self, start_dim: int = 1, end_dim: int = -1) -> None:
super().__init__()
self.start_dim = start_dim
self.end_dim = end_dim
def forward(self, x: Tensor) -> Tensor:
start_dim = self.start_dim
end_dim = self.end_dim
if end_dim == -1:
end_dim = x.ndim - 1
shape = (x.shape[:self.start_dim] +
(np.prod(x.shape[self.start_dim:self.end_dim+1]),) + x.shape[self.end_dim+1:])
return x.reshape(shape)
| [
"numpy.prod"
] | [((461, 510), 'numpy.prod', 'np.prod', (['x.shape[self.start_dim:self.end_dim + 1]'], {}), '(x.shape[self.start_dim:self.end_dim + 1])\n', (468, 510), True, 'import numpy as np\n')] |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
flows.py
This data library represents network flows
It stores cummulative information (not individual packets)
about flows in a MongoDB collection
"""
# For Python 2.x compatibility:
from __future__ import division
# General imports:
import sys
# For CSV operations:
import csv
# For packet methods:
import socket
# For flows dictionary:
from collections import OrderedDict
# For math operations:
import numpy as np
# Import dpkt for packet parsing:
import dpkt
# For logging configuration:
from baseclass import BaseClass
# For flow hashing:
import nethash
class Flows(BaseClass):
"""
The Flows class represents cummulative information about flows
(not individual packets)
"""
def __init__(self, config, mode):
"""
Initialise the Flows Class
Args:
config: Config class object
mode: the mode in which the packets should be organised
into flow records. 'u' is for unidirectional, 'b' is for
bidirectional.
"""
# Required for BaseClass:
self.config = config
# Set up Logging with inherited base class method:
self.configure_logging(__name__, "flows_logging_level_s",
"flows_logging_level_c")
# Mode is u for unidirectional or b for bidirectional:
self.mode = mode
# Python dictionaries to hold current and archived flow records:
self.flow_cache = OrderedDict()
self.flow_archive = OrderedDict()
# Create a Flow object for flow operations:
self.flow = Flow(config, self.logger, self.flow_cache, self.flow_archive, mode)
# Counter for packets that we ignored for various reasons:
self.packets_ignored = 0
# Counter for all the processed packets:
self.packets_processed = 0
def ingest_pcap(self, dpkt_reader):
"""
ingest packet data from dpkt reader of pcap file
into flows.
Args:
dpkt_reader: dpkt pcap reader object (dpkt.pcap.Reader)
"""
infoFrequency = self.config.get_value("infoFrequency")
# Process each packet in the pcap:
for timestamp, packet in dpkt_reader:
# Instantiate an instance of Packet class with packet info:
packet = Packet(self.logger, timestamp, packet, self.mode)
if packet.ingested:
# Update the flow with packet info:
self.flow.update(packet)
self.packets_processed += 1
if self.packets_processed % infoFrequency == 0:
self.logger.info("Already processed %d packets", self.packets_processed)
else:
self.packets_ignored += 1
def ingest_packet(self, hdr, packet):
"""
ingest a packet from pcapy (live capture) into flows.
"""
# Get timestamp from header:
sec, ms = hdr.getts()
timestamp = sec + ms / 1000000
# Instantiate an instance of Packet class with packet info:
packet = Packet(self.logger, timestamp, packet, self.mode)
infoFrequency = self.config.get_value("infoFrequency")
if packet.ingested:
# Update the flow with packet info:
self.flow.update(packet)
self.packets_processed += 1
if self.packets_processed % infoFrequency == 0:
self.logger.info("Already processed %d packets", self.packets_processed)
else:
self.packets_ignored += 1
def write(self, file_name):
"""
Write all flow records out to CSV file
"""
with open(file_name, mode='w') as csv_file:
if self.mode == 'u':
# Unidirectional fields:
fieldnames = ['src_ip', 'src_port', 'dst_ip', 'dst_port',
'proto', 'pktTotalCount', 'octetTotalCount',
'min_ps', 'max_ps', 'avg_ps', 'std_dev_ps',
'flowStart', 'flowEnd', 'flowDuration',
'min_piat', 'max_piat', 'avg_piat', 'std_dev_piat']
else:
# Bidirectional fields:
fieldnames = ['src_ip', 'src_port', 'dst_ip', 'dst_port',
'proto', 'pktTotalCount', 'octetTotalCount',
'min_ps', 'max_ps', 'avg_ps', 'std_dev_ps',
'flowStart', 'flowEnd', 'flowDuration',
'min_piat', 'max_piat', 'avg_piat', 'std_dev_piat',
'f_pktTotalCount', 'f_octetTotalCount',
'f_min_ps', 'f_max_ps', 'f_avg_ps', 'f_std_dev_ps',
'f_flowStart', 'f_flowEnd', 'f_flowDuration',
'f_min_piat', 'f_max_piat', 'f_avg_piat',
'f_std_dev_piat',
'b_pktTotalCount', 'b_octetTotalCount',
'b_min_ps', 'b_max_ps', 'b_avg_ps', 'b_std_dev_ps',
'b_flowStart', 'b_flowEnd', 'b_flowDuration',
'b_min_piat', 'b_max_piat', 'b_avg_piat',
'b_std_dev_piat'
]
writer = csv.DictWriter(csv_file, fieldnames=fieldnames, extrasaction='ignore')
# Write header:
writer.writeheader()
# Write archive flows as rows:
for flow_dict in self.flow_archive.items():
writer.writerow(flow_dict[1])
# Write current flows as rows:
for flow_dict in self.flow_cache.items():
writer.writerow(flow_dict[1])
def stats(self):
"""
Log the stats for flows
"""
self.logger.info("Result statistics")
self.logger.info("-----------------")
self.logger.info("Flow Records: %s", len(self.flow_cache))
self.logger.info("Additional Archived Flow Records: %s", len(self.flow_archive))
self.logger.info("Ignored Packets: %s", self.packets_ignored)
self.logger.info("Processed Packets: %s", self.packets_processed)
class Flow(object):
"""
An object that represents summary for an individual flow
Designed to be instantiated once by the Flows class
and set to different flow context by packet object
"""
def __init__(self, config, logger, flow_cache, flow_archive, mode):
"""
Initialise with references to logger and flow_cache dictionary
and mode of operation.
Parameters:
logger: logger object
flow_cache: reference to dictionary of flows
mode: b (bidirectional) or u (unidirectional).
"""
self.logger = logger
self.flow_cache = flow_cache
self.flow_archive = flow_archive
self.mode = mode
# Get value from config:
self.flow_expiration = config.get_value("flow_expiration")
self.logger.info("Flows will expire after %s seconds of inactivity", self.flow_expiration)
self.logger.debug("Flow object instantiated in mode=%s", mode)
def update(self, packet):
"""
Add or update flow in in flow_cache dictionary
"""
if packet.flow_hash in self.flow_cache:
# Found existing flow in dict, update it:
if self._is_current_flow(packet, self.flow_cache[packet.flow_hash]):
# Update standard flow parameters:
self._update_found(packet)
if self.mode == 'b':
# Also update bidirectional flow parameters:
self._update_found_bidir(packet)
else:
# Expired flow so archive it:
self._archive_flow(packet)
# Delete from dict:
self.flow_cache.pop(packet.flow_hash, None)
# Now create as a new flow based on current packet:
self._create_new(packet)
if self.mode == 'b':
self._create_new_bidir(packet)
else:
# Flow doesn't exist yet, create it:
self._create_new(packet)
if self.mode == 'b':
self._create_new_bidir(packet)
def _update_found(self, packet):
"""
Update existing flow in flow_cache dictionary with standard
(non-bidirectional) parameters
"""
flow_hash = packet.flow_hash
flow_dict = self.flow_cache[flow_hash]
# Store size of this packet:
flow_dict['length'].append(packet.length)
# Update the count of packets and octets:
flow_dict['pktTotalCount'] += 1
flow_dict['octetTotalCount'] += packet.length
# Update the min/max/avg/std_dev of the packet sizes:
flow_dict['min_ps'] = min(flow_dict['length'])
flow_dict['max_ps'] = max(flow_dict['length'])
flow_dict['avg_ps'] = flow_dict['octetTotalCount'] / flow_dict['pktTotalCount']
flow_dict['std_dev_ps'] = np.std(flow_dict['length'])
# Store the timestamps of the newly captured packet:
flow_dict['times'].append(packet.timestamp)
# As we have now at least 2 packets in the flow, we can calculate the packet-inter-arrival-time.
# We decrement the packet counter every single time, otherwise it would start from 2
# The first piat will be the current timestamp minus the timestamp of the previous packet:
flow_dict['iats'].append(flow_dict['times'][-1] \
- flow_dict['times'][-2])
# Update the flow end/duration (the start does not change)
flow_dict['flowEnd'] = packet.timestamp
flow_dict['flowDuration'] = (packet.timestamp - flow_dict['flowStart'])
# at last update the min/max/avg/std_dev of packet-inter-arrival-times
flow_dict['min_piat'] = min(flow_dict['iats'])
flow_dict['max_piat'] = max(flow_dict['iats'])
flow_dict['avg_piat'] = sum(flow_dict['iats']) / (flow_dict['pktTotalCount'] - 1)
flow_dict['std_dev_piat'] = np.std(flow_dict['iats'])
def _update_found_bidir(self, packet):
"""
Update existing flow in flow_cache dictionary with
bidirectional parameters (separately to standard parameters)
"""
flow_hash = packet.flow_hash
flow_dict = self.flow_cache[flow_hash]
# Determine packet direction (f=forward, r=reverse):
direction = self.packet_dir(packet, flow_dict)
# Update keys dependant on the direction (f or b):
if direction == 'f':
# Forward (f) direction
# Store size of this packet:
flow_dict['f_length'].append(packet.length)
# Update the count of packets and octets:
flow_dict['f_pktTotalCount'] += 1
flow_dict['f_octetTotalCount'] += packet.length
# Update the min/max/avg/std_dev of the packet sizes:
flow_dict['f_min_ps'] = min(flow_dict['f_length'])
flow_dict['f_max_ps'] = max(flow_dict['f_length'])
flow_dict['f_avg_ps'] = flow_dict['f_octetTotalCount'] / flow_dict['f_pktTotalCount']
flow_dict['f_std_dev_ps'] = np.std(flow_dict['f_length'])
# Store the timestamps of the newly captured packets:
flow_dict['f_times'].append(packet.timestamp)
# Do inter-packet arrival time if have at least 2 packets:
if (flow_dict['f_pktTotalCount'] > 1):
flow_dict['f_iats'].append(flow_dict['f_times'][-1] \
- flow_dict['f_times'][-2])
# Update the flow end/duration (the start does not change)
flow_dict['f_flowEnd'] = packet.timestamp
flow_dict['f_flowDuration'] = (packet.timestamp - flow_dict['f_flowStart'])
# at last update the min/max/avg/std_dev of packet-inter-arrival-times
flow_dict['f_min_piat'] = min(flow_dict['f_iats'])
flow_dict['f_max_piat'] = max(flow_dict['f_iats'])
flow_dict['f_avg_piat'] = sum(flow_dict['f_iats']) / (flow_dict['f_pktTotalCount'] - 1)
flow_dict['f_std_dev_piat'] = np.std(flow_dict['f_iats'])
else:
# Backward (b) direction
# Note: this may be the first time we've see backwards dir packet.
# Store size of this packet:
flow_dict['b_length'].append(packet.length)
# Update the count of packets and octets:
flow_dict['b_pktTotalCount'] += 1
flow_dict['b_octetTotalCount'] += packet.length
# Update the min/max/avg/std_dev of the packet sizes:
flow_dict['b_min_ps'] = min(flow_dict['b_length'])
flow_dict['b_max_ps'] = max(flow_dict['b_length'])
flow_dict['b_avg_ps'] = flow_dict['b_octetTotalCount'] / flow_dict['b_pktTotalCount']
flow_dict['b_std_dev_ps'] = np.std(flow_dict['b_length'])
# Store the timestamps of the newly captured packets:
flow_dict['b_times'].append(packet.timestamp)
# Do inter-packet arrival time if have at least 2 packets:
if (flow_dict['b_pktTotalCount'] < 2):
# First time, so set some stuff:
flow_dict['b_flowStart'] = packet.timestamp
else:
# Not first time:
flow_dict['b_iats'].append(flow_dict['b_times'][-1] \
- flow_dict['b_times'][-2])
flow_dict['b_flowDuration'] = (packet.timestamp - flow_dict['b_flowStart'])
# Update the min/max/avg/std_dev of packet-inter-arrival-times:
flow_dict['b_min_piat'] = min(flow_dict['b_iats'])
flow_dict['b_max_piat'] = max(flow_dict['b_iats'])
flow_dict['b_avg_piat'] = sum(flow_dict['b_iats']) / (flow_dict['b_pktTotalCount'] - 1)
flow_dict['b_std_dev_piat'] = np.std(flow_dict['b_iats'])
# Update the flow end/duration (the start does not change):
flow_dict['b_flowEnd'] = packet.timestamp
def _create_new(self, packet):
"""
Create new flow in flow_cache dictionary with standard
(non-bidirectional) parameters
"""
flow_hash = packet.flow_hash
# Create new key etc in flow dict for this flow:
# Initialise the new flow key:
self.flow_cache[flow_hash] = {}
flow_dict = self.flow_cache[flow_hash]
# Store the flow parameters for packet header values:
flow_dict['src_ip'] = packet.ip_src
flow_dict['dst_ip'] = packet.ip_dst
flow_dict['proto'] = packet.proto
flow_dict['src_port'] = packet.tp_src
flow_dict['dst_port'] = packet.tp_dst
# Store the size of the first packet:
flow_dict['length'] = []
flow_dict['length'].append(packet.length)
# Store the packet size and number of octets:
flow_dict['pktTotalCount'] = 1
flow_dict['octetTotalCount'] = packet.length
# Set the min/max/avg/std_dev of packet sizes
# (in case there will be no more packets belonging to the flow):
flow_dict['min_ps'] = packet.length
flow_dict['max_ps'] = packet.length
flow_dict['avg_ps'] = packet.length
flow_dict['std_dev_ps'] = np.std(flow_dict['length'])
# Store the timestamps of the packets:
flow_dict['times'] = []
flow_dict['times'].append(packet.timestamp)
flow_dict['iats'] = []
# store the flow start/end/duration
flow_dict['flowStart'] = packet.timestamp
flow_dict['flowEnd'] = packet.timestamp
flow_dict['flowDuration'] = 0
# Set the min/max/avg/std_dev of packet-inter arrival times
# (in case there will be no more packets belonging to the flow):
flow_dict['min_piat'] = 0
flow_dict['max_piat'] = 0
flow_dict['avg_piat'] = 0
flow_dict['std_dev_piat'] = 0
def _create_new_bidir(self, packet):
"""
Add bidir parameters to new flow in flow_cache dictionary
"""
flow_hash = packet.flow_hash
flow_dict = self.flow_cache[flow_hash]
# Set up keys in preparation:
flow_dict['f_length'] = []
flow_dict['f_times'] = []
flow_dict['f_iats'] = []
flow_dict['b_length'] = []
flow_dict['b_times'] = []
flow_dict['b_iats'] = []
flow_dict['b_pktTotalCount'] = 0
flow_dict['b_octetTotalCount'] = 0
flow_dict['b_min_ps'] = 0
flow_dict['b_max_ps'] = 0
flow_dict['b_avg_ps'] = 0
flow_dict['b_std_dev_ps'] = 0
flow_dict['b_flowStart'] = 0
flow_dict['b_flowEnd'] = 0
flow_dict['b_flowDuration'] = 0
flow_dict['b_min_piat'] = 0
flow_dict['b_max_piat'] = 0
flow_dict['b_avg_piat'] = 0
flow_dict['b_std_dev_piat'] = 0
# Determine packet direction (f=forward, r=reverse):
direction = self.packet_dir(packet, flow_dict)
# Update keys dependant on the direction (f or b):
if direction == 'f':
# Forward (f) direction
# Store the size of the first packet:
flow_dict['f_length'].append(packet.length)
# Store the packet size and number of octets:
flow_dict['f_pktTotalCount'] = 1
flow_dict['f_octetTotalCount'] = packet.length
# Set the min/max/avg/std_dev of packet sizes
# (in case there will be no more packets belonging to the flow):
flow_dict['f_min_ps'] = packet.length
flow_dict['f_max_ps'] = packet.length
flow_dict['f_avg_ps'] = packet.length
flow_dict['f_std_dev_ps'] = np.std(flow_dict['f_length'])
# Store the timestamps of the packets:
flow_dict['f_times'].append(packet.timestamp)
# store the flow start/end/duration
flow_dict['f_flowStart'] = packet.timestamp
flow_dict['f_flowEnd'] = packet.timestamp
flow_dict['f_flowDuration'] = 0
# Set the min/max/avg/std_dev of packet-inter arrival times
# (in case there will be no more packets belonging to the flow):
flow_dict['f_min_piat'] = 0
flow_dict['f_max_piat'] = 0
flow_dict['f_avg_piat'] = 0
flow_dict['f_std_dev_piat'] = 0
else:
# Backward (b) direction
# Store the size of the first packet:
flow_dict['b_length'].append(packet.length)
# Store the packet size and number of octets:
flow_dict['b_pktTotalCount'] = 1
flow_dict['b_octetTotalCount'] = packet.length
# Set the min/max/avg/std_dev of packet sizes
# (in case there will be no more packets belonging to the flow):
flow_dict['b_min_ps'] = packet.length
flow_dict['b_max_ps'] = packet.length
flow_dict['b_avg_ps'] = packet.length
flow_dict['b_std_dev_ps'] = np.std(flow_dict['b_length'])
# Store the timestamps of the packets:
flow_dict['b_times'].append(packet.timestamp)
# store the flow start/end/duration
flow_dict['b_flowStart'] = packet.timestamp
flow_dict['b_flowEnd'] = packet.timestamp
flow_dict['b_flowDuration'] = 0
# Set the min/max/avg/std_dev of packet-inter arrival times
# (in case there will be no more packets belonging to the flow):
flow_dict['b_min_piat'] = 0
flow_dict['b_max_piat'] = 0
flow_dict['b_avg_piat'] = 0
flow_dict['b_std_dev_piat'] = 0
def _is_current_flow(self, packet, flow_dict):
"""
Check if flow is current or has expired.
Only check if the flow hash is already known
True = flow has not expired
False = flow has expired, i.e. PIAT from previous packet
in flow is greater than flow expiration threshold
"""
if flow_dict['iats']:
if (packet.timestamp - flow_dict['times'][-1]) > self.flow_expiration:
# Flow has expired:
return False
else:
# Flow has not expired:
return True
elif flow_dict['pktTotalCount'] == 1:
# Was only 1 packet so no PIAT so use packet timestamp
if (packet.timestamp - flow_dict['flowStart']) > self.flow_expiration:
# Flow has expired:
return False
else:
# Flow has not expired:
return True
else:
# No packets???
self.logger.warning("Strange condition...")
return True
def _archive_flow(self, packet):
"""
Move a flow record to archive dictionary, indexed by a
longer more unique key
"""
flow_hash = packet.flow_hash
flow_dict = self.flow_cache[flow_hash]
start_timestamp = flow_dict['flowStart']
ip_src = flow_dict['src_ip']
ip_dst = flow_dict['dst_ip']
proto = flow_dict['proto']
tp_src = flow_dict['src_port']
tp_dst = flow_dict['dst_port']
# Create new more-specific hash key for archiving:
if self.mode == 'b':
if proto == 6 or proto == 17:
# Generate a directional 6-tuple flow_hash:
new_hash = nethash.hash_b6((ip_src,
ip_dst, proto, tp_src,
tp_dst, start_timestamp))
else:
# Generate a directional 4-tuple flow_hash:
new_hash = nethash.hash_b4((ip_src,
ip_dst, proto,
start_timestamp))
elif self.mode == 'u':
if proto == 6 or proto == 17:
# Generate a directional 6-tuple flow_hash:
new_hash = nethash.hash_u6((ip_src,
ip_dst, proto, tp_src,
tp_dst, start_timestamp))
else:
# Generate a directional 4-tuple flow_hash:
new_hash = nethash.hash_u4((ip_src,
ip_dst, proto,
start_timestamp))
# Check key isn't already used in archive:
if new_hash in self.flow_archive:
self.logger.warning("archive duplicate flow key=%s", new_hash)
return
# Copy to flow archive:
self.flow_archive[new_hash] = flow_dict
# Delete from current flows:
def packet_dir(self, packet, flow_dict):
"""
Determine packet direction (f=forward, r=reverse)
"""
if packet.ip_src == flow_dict['src_ip']:
return 'f'
elif packet.ip_src == flow_dict['dst_ip']:
return 'b'
else:
self.logger.critical("Uh oh, something went wrong. Exiting")
sys.exit()
class Packet(object):
"""
An object that represents a packet
"""
def __init__(self, logger, timestamp, packet, mode):
"""
Parameters:
timestamp: when packet was recorded
packet: dpkt object
mode: b (bidirectional) or u (unidirectional). Used for
hash calculation
"""
self.logger = logger
#*** Initialise packet variables:
self.flow_hash = 0
self.timestamp = timestamp
# self.length = len(packet)
self.ip_src = 0
self.ip_dst = 0
self.proto = 0
self.tp_src = 0
self.tp_dst = 0
self.tp_flags = 0
self.tp_seq_src = 0
self.tp_seq_dst = 0
self.ingested = False
try:
# Read packet into dpkt to parse headers:
eth = dpkt.ethernet.Ethernet(packet)
except:
# Skip Packet if unable to parse:
self.logger.error("failed to unpack packet, skipping...")
return
# Get the IP packet
ip = eth.data
# Get the length of IPv4 packet:
if isinstance(eth.data, dpkt.ip.IP):
self.length = ip.len
# Get the length of IPv6 packet:
elif isinstance(eth.data, dpkt.ip6.IP6):
self.length = len(ip.data)
# Ignore if non-IP packet:
else:
return
# Handle IPv4 and IPv6:
try:
self.ip_src = socket.inet_ntop(socket.AF_INET, ip.src)
self.ip_dst = socket.inet_ntop(socket.AF_INET, ip.dst)
except ValueError:
self.ip_src = socket.inet_ntop(socket.AF_INET6, ip.src)
self.ip_dst = socket.inet_ntop(socket.AF_INET6, ip.dst)
# Transport layer:
self.proto = ip.p
if ip.p == 6:
# TCP
tcp = ip.data
self.tp_src = tcp.sport
self.tp_dst = tcp.dport
self.tp_flags = tcp.flags
self.tp_seq_src = tcp.seq
self.tp_seq_dst = tcp.ack
elif ip.p == 17:
# UDP
udp = ip.data
self.tp_src = udp.sport
self.tp_dst = udp.dport
self.tp_flags = ""
self.tp_seq_src = 0
self.tp_seq_dst = 0
else:
# Not a transport layer that we understand, keep going:
pass
if mode == 'b':
if self.proto == 6 or self.proto == 17:
# Generate a directional 5-tuple flow_hash:
self.flow_hash = nethash.hash_b5((self.ip_src,
self.ip_dst, self.proto, self.tp_src,
self.tp_dst))
else:
# Generate a directional 3-tuple flow_hash:
self.flow_hash = nethash.hash_b3((self.ip_src,
self.ip_dst, self.proto))
elif mode == 'u':
if self.proto == 6 or self.proto == 17:
# Generate a directional 5-tuple flow_hash:
self.flow_hash = nethash.hash_u5((self.ip_src,
self.ip_dst, self.proto, self.tp_src,
self.tp_dst))
else:
# Generate a directional 3-tuple flow_hash:
self.flow_hash = nethash.hash_u3((self.ip_src,
self.ip_dst, self.proto))
else:
logger.critical("unsupported mode=%s", mode)
sys.exit()
# Yay, packet has been ingested:
self.ingested = True
def tcp_fin(self):
"""
Does the current packet have the TCP FIN flag set?
"""
return self.tp_flags & dpkt.tcp.TH_FIN != 0
def tcp_syn(self):
"""
Does the current packet have the TCP SYN flag set?
"""
return self.tp_flags & dpkt.tcp.TH_SYN != 0
def tcp_rst(self):
"""
Does the current packet have the TCP RST flag set?
"""
return self.tp_flags & dpkt.tcp.TH_RST != 0
def tcp_psh(self):
"""
Does the current packet have the TCP PSH flag set?
"""
return self.tp_flags & dpkt.tcp.TH_PUSH != 0
def tcp_ack(self):
"""
Does the current packet have the TCP ACK flag set?
"""
return self.tp_flags & dpkt.tcp.TH_ACK != 0
def tcp_urg(self):
"""
Does the current packet have the TCP URG flag set?
"""
return self.tp_flags & dpkt.tcp.TH_URG != 0
def tcp_ece(self):
"""
Does the current packet have the TCP ECE flag set?
"""
return self.tp_flags & dpkt.tcp.TH_ECE != 0
def tcp_cwr(self):
"""
Does the current packet have the TCP CWR flag set?
"""
return self.tp_flags & dpkt.tcp.TH_CWR != 0
| [
"nethash.hash_b6",
"nethash.hash_b5",
"nethash.hash_b3",
"nethash.hash_u4",
"numpy.std",
"nethash.hash_u6",
"dpkt.ethernet.Ethernet",
"socket.inet_ntop",
"nethash.hash_u3",
"nethash.hash_u5",
"collections.OrderedDict",
"nethash.hash_b4",
"sys.exit",
"csv.DictWriter"
] | [((2009, 2022), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2020, 2022), False, 'from collections import OrderedDict\n'), ((2051, 2064), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2062, 2064), False, 'from collections import OrderedDict\n'), ((9605, 9632), 'numpy.std', 'np.std', (["flow_dict['length']"], {}), "(flow_dict['length'])\n", (9611, 9632), True, 'import numpy as np\n'), ((10649, 10674), 'numpy.std', 'np.std', (["flow_dict['iats']"], {}), "(flow_dict['iats'])\n", (10655, 10674), True, 'import numpy as np\n'), ((15888, 15915), 'numpy.std', 'np.std', (["flow_dict['length']"], {}), "(flow_dict['length'])\n", (15894, 15915), True, 'import numpy as np\n'), ((5836, 5906), 'csv.DictWriter', 'csv.DictWriter', (['csv_file'], {'fieldnames': 'fieldnames', 'extrasaction': '"""ignore"""'}), "(csv_file, fieldnames=fieldnames, extrasaction='ignore')\n", (5850, 5906), False, 'import csv\n'), ((11782, 11811), 'numpy.std', 'np.std', (["flow_dict['f_length']"], {}), "(flow_dict['f_length'])\n", (11788, 11811), True, 'import numpy as np\n'), ((12744, 12771), 'numpy.std', 'np.std', (["flow_dict['f_iats']"], {}), "(flow_dict['f_iats'])\n", (12750, 12771), True, 'import numpy as np\n'), ((13489, 13518), 'numpy.std', 'np.std', (["flow_dict['b_length']"], {}), "(flow_dict['b_length'])\n", (13495, 13518), True, 'import numpy as np\n'), ((18314, 18343), 'numpy.std', 'np.std', (["flow_dict['f_length']"], {}), "(flow_dict['f_length'])\n", (18320, 18343), True, 'import numpy as np\n'), ((19612, 19641), 'numpy.std', 'np.std', (["flow_dict['b_length']"], {}), "(flow_dict['b_length'])\n", (19618, 19641), True, 'import numpy as np\n'), ((24531, 24561), 'dpkt.ethernet.Ethernet', 'dpkt.ethernet.Ethernet', (['packet'], {}), '(packet)\n', (24553, 24561), False, 'import dpkt\n'), ((25153, 25193), 'socket.inet_ntop', 'socket.inet_ntop', (['socket.AF_INET', 'ip.src'], {}), '(socket.AF_INET, ip.src)\n', (25169, 25193), False, 'import socket\n'), ((25220, 25260), 'socket.inet_ntop', 'socket.inet_ntop', (['socket.AF_INET', 'ip.dst'], {}), '(socket.AF_INET, ip.dst)\n', (25236, 25260), False, 'import socket\n'), ((14500, 14527), 'numpy.std', 'np.std', (["flow_dict['b_iats']"], {}), "(flow_dict['b_iats'])\n", (14506, 14527), True, 'import numpy as np\n'), ((22029, 22102), 'nethash.hash_b6', 'nethash.hash_b6', (['(ip_src, ip_dst, proto, tp_src, tp_dst, start_timestamp)'], {}), '((ip_src, ip_dst, proto, tp_src, tp_dst, start_timestamp))\n', (22044, 22102), False, 'import nethash\n'), ((22288, 22345), 'nethash.hash_b4', 'nethash.hash_b4', (['(ip_src, ip_dst, proto, start_timestamp)'], {}), '((ip_src, ip_dst, proto, start_timestamp))\n', (22303, 22345), False, 'import nethash\n'), ((23678, 23688), 'sys.exit', 'sys.exit', ([], {}), '()\n', (23686, 23688), False, 'import sys\n'), ((25314, 25355), 'socket.inet_ntop', 'socket.inet_ntop', (['socket.AF_INET6', 'ip.src'], {}), '(socket.AF_INET6, ip.src)\n', (25330, 25355), False, 'import socket\n'), ((25382, 25423), 'socket.inet_ntop', 'socket.inet_ntop', (['socket.AF_INET6', 'ip.dst'], {}), '(socket.AF_INET6, ip.dst)\n', (25398, 25423), False, 'import socket\n'), ((26234, 26320), 'nethash.hash_b5', 'nethash.hash_b5', (['(self.ip_src, self.ip_dst, self.proto, self.tp_src, self.tp_dst)'], {}), '((self.ip_src, self.ip_dst, self.proto, self.tp_src, self.\n tp_dst))\n', (26249, 26320), False, 'import nethash\n'), ((26507, 26562), 'nethash.hash_b3', 'nethash.hash_b3', (['(self.ip_src, self.ip_dst, self.proto)'], {}), '((self.ip_src, self.ip_dst, self.proto))\n', (26522, 26562), False, 'import nethash\n'), ((27226, 27236), 'sys.exit', 'sys.exit', ([], {}), '()\n', (27234, 27236), False, 'import sys\n'), ((22586, 22659), 'nethash.hash_u6', 'nethash.hash_u6', (['(ip_src, ip_dst, proto, tp_src, tp_dst, start_timestamp)'], {}), '((ip_src, ip_dst, proto, tp_src, tp_dst, start_timestamp))\n', (22601, 22659), False, 'import nethash\n'), ((22845, 22902), 'nethash.hash_u4', 'nethash.hash_u4', (['(ip_src, ip_dst, proto, start_timestamp)'], {}), '((ip_src, ip_dst, proto, start_timestamp))\n', (22860, 22902), False, 'import nethash\n'), ((26774, 26860), 'nethash.hash_u5', 'nethash.hash_u5', (['(self.ip_src, self.ip_dst, self.proto, self.tp_src, self.tp_dst)'], {}), '((self.ip_src, self.ip_dst, self.proto, self.tp_src, self.\n tp_dst))\n', (26789, 26860), False, 'import nethash\n'), ((27047, 27102), 'nethash.hash_u3', 'nethash.hash_u3', (['(self.ip_src, self.ip_dst, self.proto)'], {}), '((self.ip_src, self.ip_dst, self.proto))\n', (27062, 27102), False, 'import nethash\n')] |
from scipy.integrate import odeint
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import networkx as nx
class SavingsCoreBest:
def __init__(
self,
adjacency=None,
savings_rate=None,
capital=None,
tau=3,
phi=0.01,
eps=0.01,
P=1.0,
b=1.0,
d=0.1,
alpha=2.0 / 3,
r_b=0,
test=False,
e_trajectory_output=True,
macro_trajectory_output=True,
delta_s=0.0,
w_future=0.0,
# for fixed households:
pfixed=0.0, # expected fraction of households with fixed savings rate
rfixed=0.0, # their discount rate used for choosing their savings rate
sfixed=None, # their savings rate, defaults to original RCK optimal
# savings rate given discount rate rfixed
# exploration:
pexplore=0.0,
):
# copying threshold
self.delta_s = delta_s
# future thinking parameter
self.pfuture = w_future
# General Parameters
# turn output for debugging on or off
self.debug = test
# toggle e_trajectory output
self.e_trajectory_output = e_trajectory_output
self.macro_trajectory_output = macro_trajectory_output
# movie output, default off
self.movie = False
# General Variables
# System Time
self.t = 0.0
# Step counter for output
self.steps = 0
# list to save e_trajectory of output variables
self.e_trajectory = []
# list to save macroscopic quantities to compare with
# moment closure / pair based proxy approach
self.macro_trajectory = []
# dictionary for final state
self.final_state = {}
# Household parameters
# mean waiting time between social updates
self.tau = tau
# the std of labor distribution, corresponds to sigma_L
self.phi = phi
# modulo of the maximum imitation error, corresponds to gamma
self.eps = eps
# number of households
self.n = adjacency.number_of_nodes()
# waiting times between savings rate change events for each household
self.waiting_times = np.random.exponential(scale=self.tau, size=self.n)
# adjacency matrix between households
self.neighbors = nx.adj_matrix(adjacency).toarray()
self.G = adjacency
# investment_decisions as indices of possible_opinions
self.savings_rate = np.array(savings_rate)
# household capital in clean capital
if capital is None:
self.capital = np.ones(self.n)
else:
self.capital = capital
# household income (for social update)
self.income = np.zeros(self.n)
# for Cobb Douglas economics:
# Solow residual
self.b = b
# labor elasticity
self.alpha = alpha
# capital elasticity
self.beta = 1.0 - self.alpha
# capital depreciation rate
self.d = d
# population growth rate
self.r_b = r_b
# total capital (supply)
self.K = self.capital.sum()
# total labor (supply)
self.P = np.random.normal(P, P * self.phi, self.n)
self.Psum = sum(self.P)
while any(self.P < 0):
self.P = np.random.normal(P, P * self.phi, self.n)
# Production
self.Y = 0.0
# wage
self.w = 0.0
# capital rent
self.r = 0.0
if self.e_trajectory_output:
self.init_e_trajectory()
if self.macro_trajectory_output:
self.init_macro_trajectory()
self.s_trajectory = pd.DataFrame(columns=range(self.n))
# fixed households:
self.pfixed = pfixed
self.is_fixed = np.random.rand(self.n) < pfixed
print("using", self.is_fixed.sum(), "fixed households")
if sfixed is None:
self.rfixed = rfixed
sfixed = self.beta * d / (rfixed + d)
print(f"fixed households have discount rate {rfixed:.3f}")
print(f"and use the corresp. equil. savings rate of the orig. RCK model, {sfixed:.3f}")
self.savings_rate[self.is_fixed] = self.sfixed = sfixed
# exploration:
self.pexplore = pexplore
def c_future(self, sj, Li, Ki=0):
"""simple estimation future consumption
* estimates how the consumption of the world looks one tau into the future
* based on the current visible savings rates, r,w, delta and own labor Li and capital Ki"""
r = self.r
w = self.w
delta = self.d
tau = self.tau
eta_j = r * sj - delta
return (1.0 - sj) * (
r * (Ki + sj * w * Li / eta_j) * np.exp(eta_j * tau)
+ w * Li * (1.0 - r * sj / eta_j)
)
def run(self, t_max=200.0):
"""
run model for t<t_max
Parameter
---------
t_max : float
The maximum time the system is integrated [Default: 100]
before run() exits.
"""
for t_max_i in tqdm(np.linspace(0, t_max, 1000)):
while self.t < t_max_i:
# 1 find update candidate and respective update time
(candidate, neighbor, _, update_time) = self.find_update_candidates()
# 2 integrate economic model until t=update_time:
self.update_economy(update_time)
# 3 update savings rate
self.update_savings_rate(candidate, neighbor)
# save final state to dictionary
self.final_state = {
"adjacency": self.neighbors,
"savings_rate": self.savings_rate,
"capital": self.capital,
"tau": self.tau,
"phi": self.phi,
"eps": self.eps,
"P": self.P,
"b": self.b,
"d": self.d,
"test": self.debug,
}
return self.final_state
def economy_dot(self, x0, t):
"""
economic model assuming Cobb-Douglas production:
Y = b P^pi K^kappa
and no profits:
Y - w P - r K = 0,
Parameters:
-----------
x0 : list[float]
state vector of the system of length
N + 1. First N entries are
household capital [0:n],
the last entry is total population.
t : float
the system time.
Returns:
--------
x1 : list[floats]
updated state vector of the system of length
N + 1. First N entries are changes
household capital [n:2N],
the last entry is the change in total population
"""
capital = x0[:self.n] # np.where(x0[0:self.n] > 0, x0[0:self.n], np.full(self.n, self.epsilon.eps))
P = self.Psum
K = capital.sum()
self.w = self.b * self.alpha * P ** (self.alpha - 1) * K ** self.beta
self.r = self.b * self.beta * P ** self.alpha * K ** (self.beta - 1)
self.K = K
self.income = self.r * self.capital + self.w * self.P
P_dot = self.r_b * P
capital_dot = self.savings_rate * self.income - self.capital * self.d
return list(capital_dot) + [P_dot]
def init_movie(self):
self.fig = plt.figure(figsize=(6, 6), dpi=100)
self.sss = np.linspace(0, 1, 101)[1:-1]
(self.curve,) = plt.semilogy(self.sss, 1e2 + 0 * self.sss, "b-", alpha=0.1)
(self.line_K,) = plt.semilogy(
[0, 1], [self.capital.mean(), self.capital.mean()], "k-", alpha=0.5
)
s = np.average(self.savings_rate, weights=self.income)
(self.line_s,) = plt.semilogy([s, s], [1e0, 1e10], "k-", alpha=0.5)
plt.semilogy([self.sfixed, self.sfixed], [1e0, 1e10], "k--", alpha=0.5)
(self.scattermax,) = plt.semilogy(
[self.savings_rate[0]], [self.capital[0]], "b.", alpha=0.1, ms=30
)
(self.scatter,) = plt.semilogy(
self.savings_rate[:-3], self.capital[:-3], "k.", alpha=0.1, ms=5
)
(self.scatterleaves,) = plt.semilogy(
self.savings_rate[-3:], self.capital[-3:], "g.", alpha=1, ms=7
)
(self.scatterhub,) = plt.semilogy(
[self.savings_rate[0]], [self.capital[0]], "r.", alpha=1, ms=10
)
ax = self.ax = plt.gca()
ax.set_xlabel("savings rate")
ax.set_ylabel("capital")
ax.set_xlim(0, 1)
ax.set_ylim(1e2, 1e6)
self.figno = 0
self.lastframet = self.t
# explanation: consumption = (1-s) I = (1-s) (r K + w L) --> K(s|C) = (C/(1-s) - w L)/r
def update_movie(self):
if self.t >= self.lastframet + self.tau / 10:
s = np.average(self.savings_rate, weights=self.income)
self.curve.set_ydata(
(self.consumption.max() / (1 - self.sss) - self.w * self.P[0]) / self.r
)
self.line_s.set_xdata([s, s])
self.line_K.set_ydata([self.capital.mean(), self.capital.mean()])
m = np.argmax(self.consumption)
self.scattermax.set_xdata([self.savings_rate[m]])
self.scattermax.set_ydata([self.capital[m]])
self.scatter.set_xdata(self.savings_rate[1:-3])
self.scatter.set_ydata(self.capital[1:-3])
self.scatterleaves.set_xdata(self.savings_rate[-3:])
self.scatterleaves.set_ydata(self.capital[-3:])
self.scatterhub.set_xdata([self.savings_rate[0]])
self.scatterhub.set_ydata([self.capital[0]])
plt.title("t = %.1f" % self.t)
self.fig.canvas.draw()
self.fig.canvas.flush_events()
plt.savefig(str(self.movie) + f"_{self.figno:5d}.png", quality=1)
self.figno += 1
self.lastframet = self.t
def update_economy(self, update_time):
"""
Integrates the economic equations of the
model until the system time equals the update time.
Also keeps track of the capital return rates and estimates
the time derivatives of capital return rates trough linear
regression.
Finally, appends the current system state to the system e_trajectory.
Parameters:
-----------
self : object
instance of the model class
update_time : float
time until which system is integrated
"""
dt = [self.t, update_time]
x0 = list(self.capital) + [self.Psum]
# integrate the system
x1 = odeint(self.economy_dot, x0, dt, mxhnil=1, mxstep=5000000)[1]
self.capital = np.where(x1[0 : self.n] > 0, x1[0 : self.n], np.zeros(self.n))
self.t = update_time
self.steps += 1
# calculate economic output:
self.Y = self.b * self.K ** self.beta * self.Psum ** self.alpha
self.consumption = self.income * (1 - self.savings_rate)
# output economic data
if self.e_trajectory_output:
self.update_e_trajectory()
if self.macro_trajectory_output:
self.update_macro_trajectory()
if self.movie:
self.update_movie()
def find_update_candidates(self):
# find household with min waiting time
candidate = np.random.randint(self.n)
update_time = self.t + np.random.exponential(scale=self.tau / self.n)
# load neighborhood of household i
neighbors = list(
self.G.neighbors(candidate)
)
# choose best neighbor of candidate
func_vals = (1.0 - self.savings_rate[neighbors]) * self.income[neighbors]
# IF COPY based on highest CAPITAL:
# func_vals = self.capital[neighbors]
if self.pfuture != 0:
cfut = self.c_future(
sj=self.savings_rate[neighbors],
Li=self.P[candidate],
Ki=self.capital[candidate],
)
# debugging #########################################################
if self.debug:
a = self.c_future(0.1, Li=self.P[candidate], Ki=self.capital[candidate])
b = self.c_future(0.5, Li=self.P[candidate], Ki=self.capital[candidate])
c = self.c_future(0.9, Li=self.P[candidate], Ki=self.capital[candidate])
abc = [np.round(10 * max(x), 2) for x in [a, b, c]]
print(np.round(10 * max(func_vals), 2), abc)
###################################################################
func_vals = func_vals + self.pfuture * cfut
if self.is_fixed[candidate]:
# copy from yourself:
neighbor = candidate
else:
neighbor = neighbors[np.argmax(func_vals)]
return candidate, neighbor, neighbors, update_time
def update_savings_rate(self, candidate, neighbor):
if self.is_fixed[candidate]:
return 0
if np.random.rand() < self.pexplore:
self.savings_rate[candidate] = np.random.rand()
return 0
if self.fitness(neighbor) > self.fitness(candidate):
if (
abs(self.savings_rate[candidate] - self.savings_rate[neighbor])
>= self.delta_s
):
self.savings_rate[candidate] = self.savings_rate[
neighbor
] + np.random.uniform(-self.eps, self.eps)
while (self.savings_rate[candidate] > 1) or (
self.savings_rate[candidate] < 0
):
# need savings_rate to stay in [0,1]
self.savings_rate[candidate] = (
self.savings_rate[neighbor]
) + np.random.uniform(-self.eps, self.eps)
return 0
def fitness(self, agent):
return self.income[agent] * (1 - self.savings_rate[agent])
def init_e_trajectory(self):
element = [
"time",
"w",
"r",
"Y",
"indiv_savings_rate",
"indiv_capital",
"indiv_consumption",
]
self.e_trajectory.append(element)
self.w = self.b * self.alpha * self.Psum ** self.beta * self.K ** self.beta
self.r = self.b * self.beta * self.Psum ** self.alpha * self.K ** self.alpha
self.income = self.r * self.capital + self.w * self.P
self.update_e_trajectory()
def update_e_trajectory(self):
element = [
self.t,
self.w,
self.r,
self.Y,
self.savings_rate,
self.capital,
self.income * (1 - self.savings_rate),
]
self.e_trajectory.append(element)
def get_e_trajectory(self):
# make up DataFrame from micro data
columns = self.e_trajectory[0]
trj = pd.DataFrame(self.e_trajectory[1:], columns=columns)
trj = trj.set_index("time")
return trj
def init_macro_trajectory(self):
element = ["time", "wage", "r", "capital", "consumption", "Y"]
self.macro_trajectory.append(element)
self.w = self.b * self.alpha * self.Psum ** self.beta * self.K ** self.beta
self.r = self.b * self.beta * self.Psum ** self.alpha * self.K ** self.alpha
self.income = self.r * self.capital + self.w * self.P
self.update_macro_trajectory()
def update_macro_trajectory(self):
element = [
self.t,
self.w,
self.r,
self.capital.sum(),
(self.income * (1 - self.savings_rate)).sum(),
self.Y,
]
self.macro_trajectory.append(element)
def get_macro_trajectory(self):
# make up DataFrame from micro data
columns = self.macro_trajectory.pop(0)
trj = pd.DataFrame(self.macro_trajectory, columns=columns)
trj = trj.set_index("time")
return trj
| [
"matplotlib.pyplot.title",
"numpy.argmax",
"numpy.random.exponential",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"numpy.exp",
"numpy.random.normal",
"matplotlib.pyplot.gca",
"pandas.DataFrame",
"networkx.adj_matrix",
"scipy.integrate.odeint",
"numpy.linspace",
"matp... | [((2390, 2440), 'numpy.random.exponential', 'np.random.exponential', ([], {'scale': 'self.tau', 'size': 'self.n'}), '(scale=self.tau, size=self.n)\n', (2411, 2440), True, 'import numpy as np\n'), ((2665, 2687), 'numpy.array', 'np.array', (['savings_rate'], {}), '(savings_rate)\n', (2673, 2687), True, 'import numpy as np\n'), ((2924, 2940), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (2932, 2940), True, 'import numpy as np\n'), ((3373, 3414), 'numpy.random.normal', 'np.random.normal', (['P', '(P * self.phi)', 'self.n'], {}), '(P, P * self.phi, self.n)\n', (3389, 3414), True, 'import numpy as np\n'), ((7564, 7599), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)', 'dpi': '(100)'}), '(figsize=(6, 6), dpi=100)\n', (7574, 7599), True, 'import matplotlib.pyplot as plt\n'), ((7672, 7733), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['self.sss', '(100.0 + 0 * self.sss)', '"""b-"""'], {'alpha': '(0.1)'}), "(self.sss, 100.0 + 0 * self.sss, 'b-', alpha=0.1)\n", (7684, 7733), True, 'import matplotlib.pyplot as plt\n'), ((7873, 7923), 'numpy.average', 'np.average', (['self.savings_rate'], {'weights': 'self.income'}), '(self.savings_rate, weights=self.income)\n', (7883, 7923), True, 'import numpy as np\n'), ((7949, 8008), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['[s, s]', '[1.0, 10000000000.0]', '"""k-"""'], {'alpha': '(0.5)'}), "([s, s], [1.0, 10000000000.0], 'k-', alpha=0.5)\n", (7961, 8008), True, 'import matplotlib.pyplot as plt\n'), ((8008, 8093), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['[self.sfixed, self.sfixed]', '[1.0, 10000000000.0]', '"""k--"""'], {'alpha': '(0.5)'}), "([self.sfixed, self.sfixed], [1.0, 10000000000.0], 'k--', alpha=0.5\n )\n", (8020, 8093), True, 'import matplotlib.pyplot as plt\n'), ((8109, 8188), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['[self.savings_rate[0]]', '[self.capital[0]]', '"""b."""'], {'alpha': '(0.1)', 'ms': '(30)'}), "([self.savings_rate[0]], [self.capital[0]], 'b.', alpha=0.1, ms=30)\n", (8121, 8188), True, 'import matplotlib.pyplot as plt\n'), ((8237, 8315), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['self.savings_rate[:-3]', 'self.capital[:-3]', '"""k."""'], {'alpha': '(0.1)', 'ms': '(5)'}), "(self.savings_rate[:-3], self.capital[:-3], 'k.', alpha=0.1, ms=5)\n", (8249, 8315), True, 'import matplotlib.pyplot as plt\n'), ((8370, 8446), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['self.savings_rate[-3:]', 'self.capital[-3:]', '"""g."""'], {'alpha': '(1)', 'ms': '(7)'}), "(self.savings_rate[-3:], self.capital[-3:], 'g.', alpha=1, ms=7)\n", (8382, 8446), True, 'import matplotlib.pyplot as plt\n'), ((8498, 8575), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['[self.savings_rate[0]]', '[self.capital[0]]', '"""r."""'], {'alpha': '(1)', 'ms': '(10)'}), "([self.savings_rate[0]], [self.capital[0]], 'r.', alpha=1, ms=10)\n", (8510, 8575), True, 'import matplotlib.pyplot as plt\n'), ((8621, 8630), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8628, 8630), True, 'import matplotlib.pyplot as plt\n'), ((11553, 11578), 'numpy.random.randint', 'np.random.randint', (['self.n'], {}), '(self.n)\n', (11570, 11578), True, 'import numpy as np\n'), ((15261, 15313), 'pandas.DataFrame', 'pd.DataFrame', (['self.e_trajectory[1:]'], {'columns': 'columns'}), '(self.e_trajectory[1:], columns=columns)\n', (15273, 15313), True, 'import pandas as pd\n'), ((16226, 16278), 'pandas.DataFrame', 'pd.DataFrame', (['self.macro_trajectory'], {'columns': 'columns'}), '(self.macro_trajectory, columns=columns)\n', (16238, 16278), True, 'import pandas as pd\n'), ((2789, 2804), 'numpy.ones', 'np.ones', (['self.n'], {}), '(self.n)\n', (2796, 2804), True, 'import numpy as np\n'), ((3499, 3540), 'numpy.random.normal', 'np.random.normal', (['P', '(P * self.phi)', 'self.n'], {}), '(P, P * self.phi, self.n)\n', (3515, 3540), True, 'import numpy as np\n'), ((3967, 3989), 'numpy.random.rand', 'np.random.rand', (['self.n'], {}), '(self.n)\n', (3981, 3989), True, 'import numpy as np\n'), ((5284, 5311), 'numpy.linspace', 'np.linspace', (['(0)', 't_max', '(1000)'], {}), '(0, t_max, 1000)\n', (5295, 5311), True, 'import numpy as np\n'), ((7619, 7641), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(101)'], {}), '(0, 1, 101)\n', (7630, 7641), True, 'import numpy as np\n'), ((9009, 9059), 'numpy.average', 'np.average', (['self.savings_rate'], {'weights': 'self.income'}), '(self.savings_rate, weights=self.income)\n', (9019, 9059), True, 'import numpy as np\n'), ((9332, 9359), 'numpy.argmax', 'np.argmax', (['self.consumption'], {}), '(self.consumption)\n', (9341, 9359), True, 'import numpy as np\n'), ((9850, 9880), 'matplotlib.pyplot.title', 'plt.title', (["('t = %.1f' % self.t)"], {}), "('t = %.1f' % self.t)\n", (9859, 9880), True, 'import matplotlib.pyplot as plt\n'), ((10822, 10880), 'scipy.integrate.odeint', 'odeint', (['self.economy_dot', 'x0', 'dt'], {'mxhnil': '(1)', 'mxstep': '(5000000)'}), '(self.economy_dot, x0, dt, mxhnil=1, mxstep=5000000)\n', (10828, 10880), False, 'from scipy.integrate import odeint\n'), ((10953, 10969), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (10961, 10969), True, 'import numpy as np\n'), ((11610, 11656), 'numpy.random.exponential', 'np.random.exponential', ([], {'scale': '(self.tau / self.n)'}), '(scale=self.tau / self.n)\n', (11631, 11656), True, 'import numpy as np\n'), ((13205, 13221), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (13219, 13221), True, 'import numpy as np\n'), ((13282, 13298), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (13296, 13298), True, 'import numpy as np\n'), ((2512, 2536), 'networkx.adj_matrix', 'nx.adj_matrix', (['adjacency'], {}), '(adjacency)\n', (2525, 2536), True, 'import networkx as nx\n'), ((12997, 13017), 'numpy.argmax', 'np.argmax', (['func_vals'], {}), '(func_vals)\n', (13006, 13017), True, 'import numpy as np\n'), ((4929, 4948), 'numpy.exp', 'np.exp', (['(eta_j * tau)'], {}), '(eta_j * tau)\n', (4935, 4948), True, 'import numpy as np\n'), ((13710, 13748), 'numpy.random.uniform', 'np.random.uniform', (['(-self.eps)', 'self.eps'], {}), '(-self.eps, self.eps)\n', (13727, 13748), True, 'import numpy as np\n'), ((14135, 14173), 'numpy.random.uniform', 'np.random.uniform', (['(-self.eps)', 'self.eps'], {}), '(-self.eps, self.eps)\n', (14152, 14173), True, 'import numpy as np\n')] |
from Bio import SeqIO
from collections import Counter
import numpy as np
import pandas as pd
from pathlib import Path
from typing import Dict, List, Tuple
import matplotlib.pyplot as plt
def sample_records(genome_loc: Path, genome_red_loc: Path, num_records: int):
""" Samples n reads from a fasta file and saves them to a new file.
:param genome_loc: path to the unreduced file
:param genome_red_loc: path to the reduced file
:num_records: number of reads to sample
"""
sequence = SeqIO.parse(genome_loc.absolute(), 'fasta')
sequence = list(sequence)
genome_red_loc.touch()
handle = genome_red_loc.open('w')
for _ in range(num_records):
index = np.random.randint(len(sequence))
SeqIO.write(sequence[index], handle, 'fasta')
def get_k_mers(genome_red_loc: Path, k: int) -> List[str]:
""" Samples k-mers from a fasta file (preferrably the reduced one).
See also https://en.wikipedia.org/wiki/K-mer
:param genome_loc: path to the fasta file
:param k: length of of the n-mer
:return: a list of n-mers
"""
# Read
sequence = SeqIO.parse(genome_red_loc, 'fasta')
# sequence = list(sequence)
sequence = list(sequence)[:10]
# Combine
text = ''
for s in sequence:
text += str(s.seq)
text = text.upper()
# make k-mers
kmers_list = []
for i in range(len(text)):
if i+k >= len(text):
break
kmers_list.append(text[i:i+k])
return kmers_list
def get_k_mers_24(genome_red_loc: Path, k: int, tandem_repeats=False) -> List[str]:
""" Samples k-mers from a fasta file (preferrably the reduced one), but this time
only for tandem repeat regions or non tandem repeat regions.
:param genome_loc: path to the fasta file
:param k: length of of the n-mer
:param tandem_repeats: get only tandem repeats or non-tandem repeats
:return: a list of n-mers
"""
def k_mer_statistics(genome_red_loc: Path, K: int, delta=1.e-10) -> Tuple:
""" Calculates relative k-mer frequencies and conditional k-mer probabilities
on the provided fasta file.
:param genome_red_loc: path to the fasta file
:param K: upper bound of the k of k-mers
:param delta: threshold for probability mass loss, defaults to 1.e-10
:return: lists of relative frequencies and conditional probabilities
"""
Kmer_rel_freq = {}
Kmer_cond_prob = {}
for i in range(1,K+1):
kmers_list = get_k_mers(genome_red_loc, i)
# print(Kmer_cond_prob)
# relative frequency
rel_freq = {}
for kmer in kmers_list:
rel_freq[kmer] = kmers_list.count(kmer)/len(kmers_list)
Kmer_rel_freq[i] = rel_freq # all rel freq for one k-mer
if i == 1: # condition prob is equal to relative frequency K=1
cond_prob = rel_freq
Kmer_cond_prob[i] = cond_prob
else:
# conditional probability
cond_prob = {}
for kmer in kmers_list:
cond_prob[kmer] = rel_freq[kmer]/Kmer_rel_freq[i-1][kmer[:-1]]
Kmer_cond_prob[i] = cond_prob # all cond prob for one k-mer
return Kmer_rel_freq, Kmer_cond_prob
def k_mer_statistics_24(genome_red_loc: Path, K: int, tandem_repeats=False, delta=1.e-10) -> Tuple:
""" Calculates relative k-mer frequencies and conditional k-mer probabilities
on the provided fasta file, but this time only for tandem repeat regions
or non tandem repeat regions.
:param genome_red_loc: path to the fasta file
:param K: upper bound of the k of k-mers
:param tandem_repeats: get only tandem repeats or non-tandem repeats
:param delta: threshold for probability mass loss, defaults to 1.e-10
:return: lists of relative frequencies and conditional probabilities
"""
def conditional_entropy(rel_freqs: Dict, cond_probs: Dict) -> float:
""" Calculates the conditional entropy of a corpus given by relative k-mer frequencies
and conditional k-mer probabilities
:param rel_freqs: (a dictionary of) relative frequencies
:param cond_probs: (a dictionary of) conditional probabilities
:return: the conditional entropy of the corpus
"""
con_ent = 0
for k in rel_freqs.keys():
con_ent += -rel_freqs[k]*np.log2(cond_probs[k])
return con_ent
def plot_k_mers(rel_freqs, n=10, k=5):
""" Plots n most frequent k-mers vs. their frequency.
:param rel_freqs: the list of relative frequency dicts
:param n: the number of most frequent k-mers to plot
:param k: the k of k-mers
"""
fig, ax = plt.subplots(k)
for i in range(1, k+1):
# for each k
rel_freq = rel_freqs[i]
kmers_sorted = {key: val for key, val in sorted(rel_freq.items(), key=lambda item: item[1], reverse=True)}
# Pick-out the most frequent top n kmers
kmers = list(kmers_sorted.keys())[:n]
freq = list(kmers_sorted.values())[:n]
ax[i-1].loglog(kmers, freq)
ax[i-1].set_xlabel("kmers")
ax[i-1].set_ylabel("frequency")
ax[i-1].set_title("K="+str(i))
# plt.xticks(rotation=60, ha='right')
plt.show()
def plot_conditional_entropies(H_ks:List[float]):
""" Plots conditional entropy vs. k-mer length
:param H_ks: the conditional entropy scores
"""
plt.plot(range(1,len(H_ks)+1), H_ks)
plt.xlabel("k-mer length")
plt.ylabel("Conditional entropy")
plt.show() | [
"Bio.SeqIO.parse",
"matplotlib.pyplot.show",
"Bio.SeqIO.write",
"numpy.log2",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots"
] | [((1127, 1163), 'Bio.SeqIO.parse', 'SeqIO.parse', (['genome_red_loc', '"""fasta"""'], {}), "(genome_red_loc, 'fasta')\n", (1138, 1163), False, 'from Bio import SeqIO\n'), ((4647, 4662), 'matplotlib.pyplot.subplots', 'plt.subplots', (['k'], {}), '(k)\n', (4659, 4662), True, 'import matplotlib.pyplot as plt\n'), ((5224, 5234), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5232, 5234), True, 'import matplotlib.pyplot as plt\n'), ((5449, 5475), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""k-mer length"""'], {}), "('k-mer length')\n", (5459, 5475), True, 'import matplotlib.pyplot as plt\n'), ((5480, 5513), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Conditional entropy"""'], {}), "('Conditional entropy')\n", (5490, 5513), True, 'import matplotlib.pyplot as plt\n'), ((5518, 5528), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5526, 5528), True, 'import matplotlib.pyplot as plt\n'), ((744, 789), 'Bio.SeqIO.write', 'SeqIO.write', (['sequence[index]', 'handle', '"""fasta"""'], {}), "(sequence[index], handle, 'fasta')\n", (755, 789), False, 'from Bio import SeqIO\n'), ((4336, 4358), 'numpy.log2', 'np.log2', (['cond_probs[k]'], {}), '(cond_probs[k])\n', (4343, 4358), True, 'import numpy as np\n')] |
import sys
sys.path.insert(0, '..')
from utils import data
import os
import sklearn
import numpy as np
from sklearn.neighbors import (
KNeighborsClassifier,
DistanceMetric
)
import json
from shapely.geometry import Point
import matplotlib.pyplot as plt
import geopandas as gpd
import geoplot as gplt
import mapclassify
import matplotlib.dates as mdates
import matplotlib.colors as mcolors
import pandas as pd
from pandas import read_csv
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from datetime import datetime, date
def import_US_confirmed():
BASE_PATH = './COVID-19/csse_covid_19_data/'
MIN_CASES = 1000
# ------------------------------------------
confirmed = os.path.join(
BASE_PATH,
'csse_covid_19_time_series',
'time_series_covid19_confirmed_US.csv')
confirmed = data.load_csv_data(confirmed)
state_feature = {}
states = list(set(confirmed["Province_State"]))
# print (confirmed.iloc[232,11:confirmed.shape[0]-11])
for idx in range(confirmed.shape[0]):
if confirmed["Province_State"][idx] in list(state_feature.keys()):
state_feature[confirmed["Province_State"][idx]] += confirmed.iloc[idx,11:confirmed.shape[0]-11]
else:
state_feature[confirmed["Province_State"][idx]] = confirmed.iloc[idx,11:confirmed.shape[0]-11]
features = np.asarray(list(state_feature.values()))
targets = np.asarray(list(state_feature.keys()))
return features, targets
def import_US_confirmed_update():
BASE_PATH = './COVID-19/csse_covid_19_data/'
MIN_CASES = 1000
confirmed = os.path.join(
BASE_PATH,
'csse_covid_19_time_series',
'us-states.csv')
confirmed = data.load_csv_data(confirmed)
dates = sorted(list(set(confirmed["date"])))
_confirmed = os.path.join(
BASE_PATH,
'csse_covid_19_time_series',
'time_series_covid19_confirmed_US.csv')
_confirmed = data.load_csv_data(_confirmed)
state_feature = {}
states = list(set(_confirmed["Province_State"]))
state_feature = dict((state,np.zeros(len(dates))) for state in states)
for idx in range(confirmed.shape[0]):
# print ((datetime.strptime(confirmed["date"][idx], '%Y-%m-%d').date() - date(2020,1,21)).days)
state_feature[confirmed["state"][idx]][(datetime.strptime(confirmed["date"][idx], '%Y-%m-%d').date() - date(2020,1,21)).days] =\
confirmed["cases"][idx]
features = np.asarray(list(state_feature.values()))
targets = np.asarray(list(state_feature.keys()))
return features, targets
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back)]
dataX.append(a)
dataY.append(dataset[i + look_back])
return np.array(dataX), np.array(dataY)
def create_dataset_long_term(dataset, num_x, num_y):
dataX, dataY, testX = [], [], []
for i in range(len(dataset)-num_x-num_y+1):
a = dataset[i:(i+num_x)]
testX.append(dataset[i+num_y:i+num_x+num_y])
dataX.append(a)
dataY.append(dataset[i + num_x: i+num_x+num_y])
return np.array(dataX), np.array(dataY), np.array(testX)
def LSTM_forecast(state):
features, states = import_US_confirmed()
features = features[np.where(states == state)[0][0]]
trainX, trainY = create_dataset(features)
testX, testY = create_dataset(features)
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(32, input_shape=(1, 1), activation="relu"))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
timespan = pd.date_range("2020-01-22", periods=117).to_list()
timespan = [str(date.date()) for date in timespan]
# from matplotlib.dates import date2num
# timespan = date2num(timespan)
fig, ax = plt.subplots()
ax.set_xticks(np.arange(len(timespan)))
ax.set_xticklabels(timespan)
# ax.xaxis.set_major_locator(mdates.YearLocator())
# ax.xaxis.set_minor_locator(mdates.MonthLocator())
# ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y'))
ax.plot(timespan, testY, label="True Cases")
ax.plot(timespan, testPredict, label="Predicted Cases by LSTM")
# ax.xaxis.set_major_locator(plt.MaxNLocator(5))
plt.legend()
plt.gcf().autofmt_xdate()
for n, label in enumerate(ax.xaxis.get_ticklabels()):
if n % 30 == 0:
label.set_visible(True)
else:
label.set_visible(False)
plt.show()
def LSTM_forecast_long_term(state,x_days,y_days):
features, states = import_US_confirmed()
features = features[np.where(states == state)[0][0]]
features_update, states_update = import_US_confirmed_update()
features_update = features_update[np.where(states_update == state)[0][0]]
trainX, trainY, testX = create_dataset_long_term(features,x_days,y_days)
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(1024, input_shape=(1, x_days), activation="relu"))
model.add(Dense(y_days))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
timespan = pd.date_range("2020-01-22", "2020-05-19").to_list()
timespan = [str(date.date()) for date in timespan]
timespan_update = pd.date_range("2020-01-21", "2020-06-08").to_list()
timespan_update = [str(date.date()) for date in timespan_update]
# print (testPredict)
forecast_timespan = pd.date_range("2020-05-19", periods = y_days + 1).to_list()
forecast_timespan = [str(date.date()) for date in forecast_timespan]
fig, ax = plt.subplots()
ax.set_xticks(np.arange(len(timespan_update)))
ax.set_xticklabels(timespan_update)
ax.plot(timespan_update,features_update, label="True Cases 2020-05-20 ~ 2020-06-08")
# plt.plot(range(0,x_days), testX[-1][0])
ax.plot(forecast_timespan, np.insert(testPredict[-1], 0, features[-1]), label="Predicted Cases by LSTM 2020-05-20 ~ 2020-06-19")
ax.plot(timespan, features, label = "Trained Cases 2020-01-21 ~ 2020-05-19")
ax.legend()
for n, label in enumerate(ax.xaxis.get_ticklabels()):
if n % 30 == 0:
label.set_visible(True)
else:
label.set_visible(False)
plt.show()
def smooth(x,window_len=11,window='hanning'):
s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
#print(len(s))
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y
if __name__ == "__main__":
# BASE_PATH = './COVID-19/csse_covid_19_data/'
# MIN_CASES = 1000
# # ------------------------------------------
# confirmed = os.path.join(
# BASE_PATH,
# 'csse_covid_19_time_series',
# 'time_series_covid19_confirmed_US.csv')
# confirmed = data.load_csv_data(confirmed)
# features, states = import_US_confirmed()
# features_california = features[np.where(states == "California")[0][0]]
# timespan = pd.date_range("2020-01-22", "2020-05-19").to_list()
# timespan = [str(date.date()) for date in timespan]
# dict_state_feature = dict((states[idx], features[idx]) for idx in range(states.shape[0]))
# trainX, trainY = create_dataset(features_california)
# testX, testY = create_dataset(features_california)
# # reshape input to be [samples, time steps, features]
# trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
# testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
# # create and fit the LSTM network
# model = Sequential()
# model.add(LSTM(4, input_shape=(1, 1), activation="relu"))
# model.add(Dense(1))
# model.compile(loss='mean_squared_error', optimizer='adam')
# model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)
# # make predictions
# trainPredict = model.predict(trainX)
# testPredict = model.predict(testX)
# plt.plot(testY, label="True Cases")
# plt.plot(testPredict, label="Predicted Cases by LSTM")
# plt.legend()
# plt.show()
# LSTM_forecast("Illinois")
# trainX, trainY = create_dataset_long_term(features_california,60,30)
# testX, testY = create_dataset_long_term(features_california,60,30)
# trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
# testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
# # create and fit the LSTM network
# model = Sequential()
# model.add(LSTM(1024, input_shape=(1, 60), activation="relu"))
# model.add(Dense(30))
# model.compile(loss='mean_squared_error', optimizer='adam')
# model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)
# # make predictions
# trainPredict = model.predict(trainX)
# testPredict = model.predict(testX)
# plt.plot(testY[-1], label="True Cases")
# plt.plot(testPredict[-1], label="Predicted Cases by LSTM")
# plt.legend()
# plt.show()
# features_update, states_update = import_US_confirmed_update()
# california_update = features_update[np.where(states_update == "California")[0][0]]
# timespan_update = pd.date_range("2020-01-21", "2020-06-08").to_list()
# timespan_update = [str(date.date()) for date in timespan_update]
# plt.plot(timespan_update[-60:-20], california_update[-60:-20], label="True Cases")
# plt.plot(timespan[-30:], testPredict[-1], label = "LSTM")
# plt.plot()
# LSTM_forecast("California")
LSTM_forecast_long_term("Illinois",75,30)
| [
"numpy.ones",
"os.path.join",
"utils.data.load_csv_data",
"numpy.insert",
"numpy.reshape",
"matplotlib.pyplot.subplots",
"datetime.date.date",
"matplotlib.pyplot.show",
"pandas.date_range",
"matplotlib.pyplot.legend",
"datetime.date",
"datetime.datetime.strptime",
"matplotlib.pyplot.gcf",
... | [((12, 36), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (27, 36), False, 'import sys\n'), ((887, 983), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""csse_covid_19_time_series"""', '"""time_series_covid19_confirmed_US.csv"""'], {}), "(BASE_PATH, 'csse_covid_19_time_series',\n 'time_series_covid19_confirmed_US.csv')\n", (899, 983), False, 'import os\n'), ((1026, 1055), 'utils.data.load_csv_data', 'data.load_csv_data', (['confirmed'], {}), '(confirmed)\n', (1044, 1055), False, 'from utils import data\n'), ((1819, 1888), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""csse_covid_19_time_series"""', '"""us-states.csv"""'], {}), "(BASE_PATH, 'csse_covid_19_time_series', 'us-states.csv')\n", (1831, 1888), False, 'import os\n'), ((1935, 1964), 'utils.data.load_csv_data', 'data.load_csv_data', (['confirmed'], {}), '(confirmed)\n', (1953, 1964), False, 'from utils import data\n'), ((2033, 2129), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""csse_covid_19_time_series"""', '"""time_series_covid19_confirmed_US.csv"""'], {}), "(BASE_PATH, 'csse_covid_19_time_series',\n 'time_series_covid19_confirmed_US.csv')\n", (2045, 2129), False, 'import os\n'), ((2173, 2203), 'utils.data.load_csv_data', 'data.load_csv_data', (['_confirmed'], {}), '(_confirmed)\n', (2191, 2203), False, 'from utils import data\n'), ((3702, 3759), 'numpy.reshape', 'np.reshape', (['trainX', '(trainX.shape[0], 1, trainX.shape[1])'], {}), '(trainX, (trainX.shape[0], 1, trainX.shape[1]))\n', (3712, 3759), True, 'import numpy as np\n'), ((3773, 3827), 'numpy.reshape', 'np.reshape', (['testX', '(testX.shape[0], 1, testX.shape[1])'], {}), '(testX, (testX.shape[0], 1, testX.shape[1]))\n', (3783, 3827), True, 'import numpy as np\n'), ((3886, 3898), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3896, 3898), False, 'from keras.models import Sequential\n'), ((4447, 4461), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4459, 4461), True, 'import matplotlib.pyplot as plt\n'), ((4897, 4909), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4907, 4909), True, 'import matplotlib.pyplot as plt\n'), ((5122, 5132), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5130, 5132), True, 'import matplotlib.pyplot as plt\n'), ((5534, 5591), 'numpy.reshape', 'np.reshape', (['trainX', '(trainX.shape[0], 1, trainX.shape[1])'], {}), '(trainX, (trainX.shape[0], 1, trainX.shape[1]))\n', (5544, 5591), True, 'import numpy as np\n'), ((5605, 5659), 'numpy.reshape', 'np.reshape', (['testX', '(testX.shape[0], 1, testX.shape[1])'], {}), '(testX, (testX.shape[0], 1, testX.shape[1]))\n', (5615, 5659), True, 'import numpy as np\n'), ((5718, 5730), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (5728, 5730), False, 'from keras.models import Sequential\n'), ((6540, 6554), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6552, 6554), True, 'import matplotlib.pyplot as plt\n'), ((7223, 7233), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7231, 7233), True, 'import matplotlib.pyplot as plt\n'), ((3053, 3068), 'numpy.array', 'np.array', (['dataX'], {}), '(dataX)\n', (3061, 3068), True, 'import numpy as np\n'), ((3070, 3085), 'numpy.array', 'np.array', (['dataY'], {}), '(dataY)\n', (3078, 3085), True, 'import numpy as np\n'), ((3413, 3428), 'numpy.array', 'np.array', (['dataX'], {}), '(dataX)\n', (3421, 3428), True, 'import numpy as np\n'), ((3430, 3445), 'numpy.array', 'np.array', (['dataY'], {}), '(dataY)\n', (3438, 3445), True, 'import numpy as np\n'), ((3447, 3462), 'numpy.array', 'np.array', (['testX'], {}), '(testX)\n', (3455, 3462), True, 'import numpy as np\n'), ((3914, 3961), 'keras.layers.LSTM', 'LSTM', (['(32)'], {'input_shape': '(1, 1)', 'activation': '"""relu"""'}), "(32, input_shape=(1, 1), activation='relu')\n", (3918, 3961), False, 'from keras.layers import LSTM\n'), ((3978, 3986), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (3983, 3986), False, 'from keras.layers import Dense\n'), ((5746, 5800), 'keras.layers.LSTM', 'LSTM', (['(1024)'], {'input_shape': '(1, x_days)', 'activation': '"""relu"""'}), "(1024, input_shape=(1, x_days), activation='relu')\n", (5750, 5800), False, 'from keras.layers import LSTM\n'), ((5817, 5830), 'keras.layers.Dense', 'Dense', (['y_days'], {}), '(y_days)\n', (5822, 5830), False, 'from keras.layers import Dense\n'), ((6839, 6882), 'numpy.insert', 'np.insert', (['testPredict[-1]', '(0)', 'features[-1]'], {}), '(testPredict[-1], 0, features[-1])\n', (6848, 6882), True, 'import numpy as np\n'), ((7427, 7451), 'numpy.ones', 'np.ones', (['window_len', '"""d"""'], {}), "(window_len, 'd')\n", (7434, 7451), True, 'import numpy as np\n'), ((4242, 4282), 'pandas.date_range', 'pd.date_range', (['"""2020-01-22"""'], {'periods': '(117)'}), "('2020-01-22', periods=117)\n", (4255, 4282), True, 'import pandas as pd\n'), ((4314, 4325), 'datetime.date.date', 'date.date', ([], {}), '()\n', (4323, 4325), False, 'from datetime import datetime, date\n'), ((4915, 4924), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4922, 4924), True, 'import matplotlib.pyplot as plt\n'), ((6086, 6127), 'pandas.date_range', 'pd.date_range', (['"""2020-01-22"""', '"""2020-05-19"""'], {}), "('2020-01-22', '2020-05-19')\n", (6099, 6127), True, 'import pandas as pd\n'), ((6159, 6170), 'datetime.date.date', 'date.date', ([], {}), '()\n', (6168, 6170), False, 'from datetime import datetime, date\n'), ((6217, 6258), 'pandas.date_range', 'pd.date_range', (['"""2020-01-21"""', '"""2020-06-08"""'], {}), "('2020-01-21', '2020-06-08')\n", (6230, 6258), True, 'import pandas as pd\n'), ((6297, 6308), 'datetime.date.date', 'date.date', ([], {}), '()\n', (6306, 6308), False, 'from datetime import datetime, date\n'), ((6391, 6438), 'pandas.date_range', 'pd.date_range', (['"""2020-05-19"""'], {'periods': '(y_days + 1)'}), "('2020-05-19', periods=y_days + 1)\n", (6404, 6438), True, 'import pandas as pd\n'), ((6481, 6492), 'datetime.date.date', 'date.date', ([], {}), '()\n', (6490, 6492), False, 'from datetime import datetime, date\n'), ((3563, 3588), 'numpy.where', 'np.where', (['(states == state)'], {}), '(states == state)\n', (3571, 3588), True, 'import numpy as np\n'), ((5257, 5282), 'numpy.where', 'np.where', (['(states == state)'], {}), '(states == state)\n', (5265, 5282), True, 'import numpy as np\n'), ((5396, 5428), 'numpy.where', 'np.where', (['(states_update == state)'], {}), '(states_update == state)\n', (5404, 5428), True, 'import numpy as np\n'), ((2618, 2635), 'datetime.date', 'date', (['(2020)', '(1)', '(21)'], {}), '(2020, 1, 21)\n', (2622, 2635), False, 'from datetime import datetime, date\n'), ((2555, 2608), 'datetime.datetime.strptime', 'datetime.strptime', (["confirmed['date'][idx]", '"""%Y-%m-%d"""'], {}), "(confirmed['date'][idx], '%Y-%m-%d')\n", (2572, 2608), False, 'from datetime import datetime, date\n')] |
""" Unittest module for proximal operator. """
import pytest
import numpy as np
import torch
from carpet.checks import check_random_state
from carpet.proximity import (pseudo_soft_th_tensor,
pseudo_soft_th_numpy)
@pytest.mark.parametrize('seed', [None])
@pytest.mark.parametrize('lbda', [0.1, 0.5, 1.0])
@pytest.mark.parametrize('shape', [(1, 10), (10, 10), (100, 10)])
def test_soft_thresholding(seed, shape, lbda):
""" Test the gradient of z. """
z = check_random_state(seed).randn(*shape)
prox_z_ref = pseudo_soft_th_tensor(torch.Tensor(z), lbda, step_size=1.0)
prox_z_ref = prox_z_ref.numpy()
prox_z = pseudo_soft_th_numpy(z, lbda, step_size=1.0)
np.testing.assert_allclose(prox_z_ref, prox_z, rtol=1e-2)
| [
"numpy.testing.assert_allclose",
"carpet.proximity.pseudo_soft_th_numpy",
"carpet.checks.check_random_state",
"torch.Tensor",
"pytest.mark.parametrize"
] | [((246, 285), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seed"""', '[None]'], {}), "('seed', [None])\n", (269, 285), False, 'import pytest\n'), ((287, 335), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lbda"""', '[0.1, 0.5, 1.0]'], {}), "('lbda', [0.1, 0.5, 1.0])\n", (310, 335), False, 'import pytest\n'), ((337, 401), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(1, 10), (10, 10), (100, 10)]'], {}), "('shape', [(1, 10), (10, 10), (100, 10)])\n", (360, 401), False, 'import pytest\n'), ((659, 703), 'carpet.proximity.pseudo_soft_th_numpy', 'pseudo_soft_th_numpy', (['z', 'lbda'], {'step_size': '(1.0)'}), '(z, lbda, step_size=1.0)\n', (679, 703), False, 'from carpet.proximity import pseudo_soft_th_tensor, pseudo_soft_th_numpy\n'), ((709, 766), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['prox_z_ref', 'prox_z'], {'rtol': '(0.01)'}), '(prox_z_ref, prox_z, rtol=0.01)\n', (735, 766), True, 'import numpy as np\n'), ((572, 587), 'torch.Tensor', 'torch.Tensor', (['z'], {}), '(z)\n', (584, 587), False, 'import torch\n'), ((493, 517), 'carpet.checks.check_random_state', 'check_random_state', (['seed'], {}), '(seed)\n', (511, 517), False, 'from carpet.checks import check_random_state\n')] |
# create by <NAME>, minor adjusted by <NAME>
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import fsolve
from scipy.spatial.distance import pdist
from scipy.linalg import eig
from scipy.integrate import odeint
from sklearn.neighbors import NearestNeighbors
from .scVectorField import vectorfield
from .utils import (
update_dict,
form_triu_matrix,
index_condensed_matrix,
inverse_norm,
)
from .utils_vecCalc import vector_field_function, vecfld_from_adata
from ..external.ddhodge import ddhoge
from ..tools.vector_calculus import curl, divergence
def remove_redundant_points(X, tol=1e-4, output_discard=False):
X = np.atleast_2d(X)
discard = np.zeros(len(X), dtype=bool)
if X.shape[0] > 1:
dist = pdist(X)
for i in range(len(X)):
for j in range(i + 1, len(X)):
if dist[index_condensed_matrix(len(X), i, j)] < tol:
discard[j] = True
X = X[~discard]
if output_discard:
return X, discard
else:
return X
def find_fixed_points(X0, func_vf, tol_redundant=1e-4, full_output=False):
X = []
J = []
fval = []
for x0 in X0:
if full_output:
x, info_dict, _, _ = fsolve(func_vf, x0, full_output=True)
fval.append(info_dict["fvec"])
# compute Jacobian
Q = info_dict["fjac"]
R = form_triu_matrix(info_dict["r"])
J.append(Q.T @ R)
else:
x = fsolve(func_vf, x0)
X.append(x)
X = np.array(X)
if full_output:
J = np.array(J)
fval = np.array(fval)
if tol_redundant is not None:
if full_output:
X, discard = remove_redundant_points(X, tol_redundant, output_discard=True)
J = J[~discard]
fval = fval[~discard]
else:
X = remove_redundant_points(X, tol_redundant)
if full_output:
return X, J, fval
else:
return X
def pac_onestep(x0, func, v0, ds=0.01):
x01 = x0 + v0 * ds
F = lambda x: np.array([func(x), (x - x0).dot(v0) - ds])
x1 = fsolve(F, x01)
return x1
def continuation(x0, func, s_max, ds=0.01, v0=None, param_axis=0, param_direction=1):
ret = [x0]
if v0 is None: # initialize tangent predictor
v = np.zeros_like(x0)
v[param_axis] = param_direction
else:
v = v0
s = 0
while s <= s_max:
x1 = ret[-1]
x = pac_onestep(x1, func, v, ds)
ret.append(x)
s += ds
# compute tangent predictor
v = x - x1
v /= np.linalg.norm(v)
return np.array(ret)
def clip_curves(curves, domain, tol_discont=None):
ret = []
for cur in curves:
clip_away = np.zeros(len(cur), dtype=bool)
for i, p in enumerate(cur):
for j in range(len(domain)):
if p[j] < domain[j][0] or p[j] > domain[j][1]:
clip_away[i] = True
break
if tol_discont is not None and i > 0:
d = np.linalg.norm(p - cur[i - 1])
if d > tol_discont:
clip_away[i] = True
# clip curve and assemble
i_start = 0
while i_start < len(cur) - 1:
if not clip_away[i_start]:
for i_end in range(i_start, len(cur)):
if clip_away[i_end]:
break
ret.append(
cur[i_start:i_end]
) # a tiny bit of the end could be chopped off
i_start = i_end
else:
i_start += 1
return ret
def compute_nullclines_2d(X0, fdx, fdy, x_range, y_range, s_max=None, ds=None):
if s_max is None:
s_max = 5 * ((x_range[1] - x_range[0]) + (y_range[1] - y_range[0]))
if ds is None:
ds = s_max / 1e3
NCx = []
NCy = []
for x0 in X0:
# initialize tangent predictor
theta = np.random.rand() * 2 * np.pi
v0 = [np.cos(theta), np.sin(theta)]
v0 /= np.linalg.norm(v0)
# nullcline continuation
NCx.append(continuation(x0, fdx, s_max, ds, v0=v0))
NCx.append(continuation(x0, fdx, s_max, ds, v0=-v0))
NCy.append(continuation(x0, fdy, s_max, ds, v0=v0))
NCy.append(continuation(x0, fdy, s_max, ds, v0=-v0))
NCx = clip_curves(NCx, [x_range, y_range], ds * 10)
NCy = clip_curves(NCy, [x_range, y_range], ds * 10)
return NCx, NCy
def compute_separatrices(Xss, Js, func, x_range, y_range, t=50, n_sample=500, eps=1e-6):
ret = []
for i, x in enumerate(Xss):
print(x)
J = Js[i]
w, v = eig(J)
I_stable = np.where(np.real(w) < 0)[0]
print(I_stable)
for j in I_stable: # I_unstable
u = np.real(v[j])
u = u / np.linalg.norm(u)
print("u=%f, %f" % (u[0], u[1]))
# Parameters for building separatrix
T = np.linspace(0, t, n_sample)
# all_sep_a, all_sep_b = None, None
# Build upper right branch of separatrix
ab_upper = odeint(lambda x, _: -func(x), x + eps * u, T)
# Build lower left branch of separatrix
ab_lower = odeint(lambda x, _: -func(x), x - eps * u, T)
sep = np.vstack((ab_lower[::-1], ab_upper))
ret.append(sep)
ret = clip_curves(ret, [x_range, y_range])
return ret
def set_test_points_on_curve(curve, interval):
P = [curve[0]]
dist = 0
for i in range(1, len(curve)):
dist += np.linalg.norm(curve[i] - curve[i - 1])
if dist >= interval:
P.append(curve[i])
dist = 0
return np.array(P)
def find_intersection_2d(curve1, curve2, tol_redundant=1e-4):
P = []
for i in range(len(curve1) - 1):
for j in range(len(curve2) - 1):
p1 = curve1[i]
p2 = curve1[i + 1]
p3 = curve2[j]
p4 = curve2[j + 1]
denom = np.linalg.det([p1 - p2, p3 - p4])
if denom != 0:
t = np.linalg.det([p1 - p3, p3 - p4]) / denom
u = -np.linalg.det([p1 - p2, p1 - p3]) / denom
if t >= 0 and t <= 1 and u >= 0 and u <= 1:
P.append(p1 + t * (p2 - p1))
if tol_redundant is not None:
remove_redundant_points(P, tol=tol_redundant)
return np.array(P)
def find_fixed_points_nullcline(
func, NCx, NCy, sample_interval=0.5, tol_redundant=1e-4, full_output=False
):
test_Px = []
for i in range(len(NCx)):
test_Px.append(set_test_points_on_curve(NCx[i], sample_interval))
test_Py = []
for i in range(len(NCy)):
test_Py.append(set_test_points_on_curve(NCy[i], sample_interval))
int_P = []
for i in range(len(test_Px)):
for j in range(len(test_Py)):
p = find_intersection_2d(test_Px[i], test_Py[j], tol_redundant)
for k in range(len(p)):
int_P.append(p[k])
int_P = np.array(int_P)
if full_output:
P, J, _ = find_fixed_points(int_P, func, tol_redundant, full_output=True)
return P, J
else:
P = find_fixed_points(int_P, func, tol_redundant)
return P
def is_outside(X, domain):
is_outside = np.zeros(X.shape[0], dtype=bool)
for k in range(X.shape[1]):
o = np.logical_or(X[:, k] < domain[k][0], X[:, k] > domain[k][1])
is_outside = np.logical_or(is_outside, o)
return is_outside
def calc_fft(x):
out = np.fft.rfft(x)
n = len(x)
xFFT = abs(out)/n*2
freq = np.arange(int(n/2))/n
return xFFT[:int(n/2)], freq
def dup_osc_idx(x, n_dom=3, tol=0.05):
l = int(np.floor(len(x) / n_dom))
y1 = x[(n_dom-2)*l : (n_dom-1)*l]
y2 = x[(n_dom-1)*l : n_dom*l]
def calc_fft_k(x):
ret = []
for k in range(x.shape[1]):
xFFT, _ = calc_fft(x[:, k])
ret.append(xFFT[1:])
return np.hstack(ret)
xFFt1 = calc_fft_k(y1)
xFFt2 = calc_fft_k(y2)
diff = np.linalg.norm(xFFt1 - xFFt2)/len(xFFt1)
if diff <= tol:
idx = (n_dom-1)*l
else:
idx = None
return idx, diff
def dup_osc_idx_iter(x, max_iter=5, **kwargs):
stop = False
idx = len(x)
j = 0
D = []
while (not stop):
i, d = dup_osc_idx(x[:idx], **kwargs)
D.append(d)
if i is None:
stop = True
else:
idx = i
j += 1
if j >= max_iter:
stop = True
D = np.array(D)
return idx, D
class FixedPoints:
def __init__(self, X=None, J=None):
self.X = X or []
self.J = J or []
self.eigvals = []
def get_X(self):
return np.array(self.X)
def get_J(self):
return np.array(self.J)
def add_fixed_points(self, X, J, tol_redundant=1e-4):
for i, x in enumerate(X):
redundant = False
if tol_redundant is not None and len(self.X) > 0:
for y in self.X:
if np.linalg.norm(x - y) <= tol_redundant:
redundant = True
if not redundant:
self.X.append(x)
self.J.append(J[i])
def compute_eigvals(self):
self.eigvals = []
for i in range(len(self.J)):
w, _ = eig(self.J[i])
self.eigvals.append(w)
def is_stable(self):
if len(self.eigvals) != len(self.X):
self.compute_eigvals()
stable = np.ones(len(self.eigvals), dtype=bool)
for i, w in enumerate(self.eigvals):
if np.any(np.real(w) >= 0):
stable[i] = False
return stable
def is_saddle(self):
is_stable = self.is_stable()
saddle = np.zeros(len(self.eigvals), dtype=bool)
for i, w in enumerate(self.eigvals):
if not is_stable[i] and np.any(np.real(w) < 0):
saddle[i] = True
return saddle, is_stable
class VectorField2D:
def __init__(self, func, func_vx=None, func_vy=None, X_data=None, k=50):
self.func = func
def func_dim(x, func, dim):
y = func(x)
if y.ndim == 1:
y = y[dim]
else:
y = y[:, dim].flatten()
return y
if func_vx is None:
self.fx = lambda x: func_dim(x, self.func, 0)
else:
self.fx = func_vx
if func_vy is None:
self.fy = lambda x: func_dim(x, self.func, 1)
else:
self.fy = func_vy
self.Xss = FixedPoints()
self.X_data = X_data
self.k = k
self.NCx = None
self.NCy = None
def get_num_fixed_points(self):
return len(self.Xss.get_X())
def get_fixed_points(self, get_types=True):
X = self.Xss.get_X()
if not get_types:
return X
else:
is_saddle, is_stable = self.Xss.is_saddle()
# -1 -- stable, 0 -- saddle, 1 -- unstable
ftype = np.ones(len(X))
for i in range(len(ftype)):
if is_saddle[i]:
ftype[i] = 0
elif is_stable[i]:
ftype[i] = -1
return X, ftype
def get_Xss_confidence(self):
X = self.X_data
X = X.A if sp.issparse(X) else X
Xss = self.Xss.get_X()
alg = 'ball_tree' if Xss.shape[1] > 10 else 'kd_tree'
if X.shape[0] > 200000 and X.shape[1] > 2:
from pynndescent import NNDescent
nbrs = NNDescent(X, metric='euclidean', n_neighbors=min(self.k, X.shape[0] - 1), n_jobs=-1, random_state=19491001)
_, dist = nbrs.query(Xss, k=min(self.k, X.shape[0] - 1))
else:
alg = 'ball_tree' if X.shape[1] > 10 else 'kd_tree'
nbrs = NearestNeighbors(n_neighbors=min(self.k, X.shape[0] - 1), algorithm=alg, n_jobs=-1).fit(X)
dist, _ = nbrs.kneighbors(Xss)
dist_m = dist.mean(1)
confidence = 1 - dist_m / dist_m.max()
return confidence
def find_fixed_points_by_sampling(
self, n, x_range, y_range, lhs=True, tol_redundant=1e-4
):
if lhs:
from .sampling import lhsclassic
X0 = lhsclassic(n, 2)
else:
X0 = np.random.rand(n, 2)
X0[:, 0] = X0[:, 0] * (x_range[1] - x_range[0]) + x_range[0]
X0[:, 1] = X0[:, 1] * (y_range[1] - y_range[0]) + y_range[0]
X, J, _ = find_fixed_points(
X0, self.func, tol_redundant=tol_redundant, full_output=True
)
# remove points that are outside the domain
outside = is_outside(X, [x_range, y_range])
self.Xss.add_fixed_points(X[~outside], J[~outside], tol_redundant)
def find_nearest_fixed_point(self, x, x_range, y_range, tol_redundant=1e-4):
X, J, _ = find_fixed_points(
x, self.func, tol_redundant=tol_redundant, full_output=True
)
# remove point if outside the domain
outside = is_outside(X, [x_range, y_range])[0]
if not outside:
self.Xss.add_fixed_points(X, J, tol_redundant)
def compute_nullclines(
self, x_range, y_range, find_new_fixed_points=False, tol_redundant=1e-4
):
# compute arguments
s_max = 5 * ((x_range[1] - x_range[0]) + (y_range[1] - y_range[0]))
ds = s_max / 1e3
self.NCx, self.NCy = compute_nullclines_2d(
self.Xss.get_X(), self.fx, self.fy, x_range, y_range, s_max=s_max, ds=ds
)
if find_new_fixed_points:
sample_interval = ds * 10
X, J = find_fixed_points_nullcline(
self.func, self.NCx, self.NCy, sample_interval, tol_redundant, True
)
outside = is_outside(X, [x_range, y_range])
self.Xss.add_fixed_points(X[~outside], J[~outside], tol_redundant)
def output_to_dict(self, dict_vf):
dict_vf["NCx"] = self.NCx
dict_vf["NCy"] = self.NCy
dict_vf["Xss"] = self.Xss.get_X()
dict_vf["confidence"] = self.get_Xss_confidence()
dict_vf["J"] = self.Xss.get_J()
return dict_vf
def topography(adata, basis="umap", layer=None, X=None, dims=None, n=25, VecFld=None):
"""Map the topography of the single cell vector field in (first) two dimensions.
Parameters
----------
adata: :class:`~anndata.AnnData`
an Annodata object.
basis: `str` (default: `trimap`)
The reduced dimension embedding of cells to visualize.
layer: `str` or None (default: None)
Which layer of the data will be used for vector field function reconstruction. This will be used in conjunction
with X.
X: 'np.ndarray' (dimension: n_obs x n_features)
Original data.
dims: `list` or `None` (default: `None`)
The dimensions that will be used for vector field reconstruction.
n: `int` (default: `10`)
Number of samples for calculating the fixed points.
VecFld: `dictionary` or None (default: None)
The reconstructed vector field function.
Returns
-------
adata: :class:`~anndata.AnnData`
`AnnData` object that is updated with the `VecFld` or 'VecFld_' + basis dictionary in the `uns` attribute.
The `VecFld2D` key stores an instance of the VectorField2D class which presumably has fixed points, nullcline,
separatrix, computed and stored.
"""
if VecFld is None:
VecFld, func = vecfld_from_adata(adata, basis)
else:
func = lambda x: vector_field_function(x, VecFld)
if dims is None:
dims = [0, 1]
X_basis = adata.obsm["X_" + basis][:, dims] if X is None else X[:, dims]
min_, max_ = X_basis.min(0), X_basis.max(0)
xlim = [min_[0] - (max_[0] - min_[0]) * 0.1, max_[0] + (max_[0] - min_[0]) * 0.1]
ylim = [min_[1] - (max_[1] - min_[1]) * 0.1, max_[1] + (max_[1] - min_[1]) * 0.1]
vecfld = VectorField2D(func, X_data=X_basis)
vecfld.find_fixed_points_by_sampling(n, xlim, ylim)
if vecfld.get_num_fixed_points() > 0:
vecfld.compute_nullclines(xlim, ylim, find_new_fixed_points=True)
# sep = compute_separatrices(vecfld.Xss.get_X(), vecfld.Xss.get_J(), vecfld.func, xlim, ylim)
#
if layer is None:
if "VecFld_" + basis in adata.uns_keys():
adata.uns["VecFld_" + basis].update(
{"VecFld": VecFld, "VecFld2D": vecfld, "xlim": xlim, "ylim": ylim}
)
else:
adata.uns["VecFld_" + basis] = {
"VecFld": VecFld,
"VecFld2D": vecfld,
"xlim": xlim,
"ylim": ylim,
}
else:
vf_key = "VecFld" if layer == "X" else "VecFld_" + layer
if "VecFld" in adata.uns_keys():
adata.uns[vf_key].update(
{"VecFld": VecFld, "VecFld2D": vecfld, "xlim": xlim, "ylim": ylim}
)
else:
adata.uns[vf_key] = {
"VecFld": VecFld,
"VecFld2D": vecfld,
"xlim": xlim,
"ylim": ylim,
}
return adata
def VectorField(
adata,
basis=None,
layer="X",
dims=None,
genes=None,
normalize=False,
grid_velocity=False,
grid_num=50,
velocity_key="velocity_S",
method="SparseVFC",
return_vf_object=False,
map_topography=True,
pot_curl_div=False,
cores=1,
**kwargs,
):
"""Learn a function of high dimensional vector field from sparse single cell samples in the entire space robustly.
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object that contains embedding and velocity data
basis: `str` or None (default: `None`)
The embedding data to use. The vector field function will be learned on the low dimensional embedding and can be then
projected back to the high dimensional space.
layer: `str` or None (default: `X`)
Which layer of the data will be used for vector field function reconstruction. The layer once provided, will override
the `basis` argument and then learn the vector field function in high dimensional space.
dims: `int`, `list` or None (default: None)
The dimensions that will be used for reconstructing vector field functions. If it is an `int` all dimension from
the first dimension to `dims` will be used; if it is a list, the dimensions in the list will be used.
genes: `list` or None (default: None)
The gene names whose gene expression will be used for vector field reconstruction. By default (when genes is
set to None), the genes used for velocity embedding (var.use_for_velocity) will be used for vector field reconstruction.
Note that the genes to be used need to have velocity calculated.
normalize: 'bool' (default: False)
Logic flag to determine whether to normalize the data to have zero means and unit covariance. This is often
required for raw dataset (for example, raw UMI counts and RNA velocity values in high dimension). But it is
normally not required for low dimensional embeddings by PCA or other non-linear dimension reduction methods.
grid_velocity: `bool` (default: False)
Whether to generate grid velocity. Note that by default it is set to be False, but for datasets with embedding
dimension less than 4, the grid velocity will still be generated. Please note that number of total grids in
the space increases exponentially as the number of dimensions increases. So it may quickly lead to lack of
memory, for example, it cannot allocate the array with grid_num set to be 50 and dimension is 6 (50^6 total
grids) on 32 G memory computer. Although grid velocity may not be generated, the vector field function can still
be learned for thousands of dimensions and we can still predict the transcriptomic cell states over long time period.
grid_num: `int` (default: 50)
The number of grids in each dimension for generating the grid velocity.
velocity_key: `str` (default: `velocity_S`)
The key from the adata layer that corresponds to the velocity matrix.
method: `str` (default: `sparseVFC`)
Method that is used to reconstruct the vector field functionally. Currently only SparseVFC supported but other
improved approaches are under development.
return_vf_object: `bool` (default: `False`)
Whether or not to include an instance of a vectorfield class in the the `VecFld` dictionary in the `uns`
attribute.
map_topography: `bool` (default: `True`)
Whether to quantify the topography of the 2D vector field.
pot_curl_div: `bool` (default: `False`)
Whether to calculate potential, curl or divergence for each cell. Potential can be calculated for any basis
while curl and divergence is by default only applied to 2D basis. However, divergence is applicable for any
dimension while curl is generally only defined for 2/3 D systems.
cores: `int` (default: 1):
Number of cores to run the ddhodge function. If cores is set to be > 1, multiprocessing will be used to parallel
the ddhodge calculation.
kwargs:
Other additional parameters passed to the vectorfield class.
Returns
-------
adata: :class:`~anndata.AnnData`
`AnnData` object that is updated with the `VecFld` dictionary in the `uns` attribute.
"""
if basis is not None:
X = adata.obsm["X_" + basis].copy()
V = adata.obsm["velocity_" + basis].copy()
if np.isscalar(dims):
X, V = X[:, :dims], V[:, :dims]
elif type(dims) is list:
X, V = X[:, dims], V[:, dims]
else:
valid_genes = (
list(set(genes).intersection(adata.var.index))
if genes is not None
else adata.var_names[adata.var.use_for_velocity]
)
if layer == "X":
X = adata[:, valid_genes].X.copy()
X = np.expm1(X)
else:
X = inverse_norm(adata, adata.layers[layer])
V = adata[:, valid_genes].layers[velocity_key].copy()
if sp.issparse(X):
X, V = X.A, V.A
Grid = None
if X.shape[1] < 4 or grid_velocity:
# smart way for generating high dimensional grids and convert into a row matrix
min_vec, max_vec = (
X.min(0),
X.max(0),
)
min_vec = min_vec - 0.01 * np.abs(max_vec - min_vec)
max_vec = max_vec + 0.01 * np.abs(max_vec - min_vec)
Grid_list = np.meshgrid(
*[np.linspace(i, j, grid_num) for i, j in zip(min_vec, max_vec)]
)
Grid = np.array([i.flatten() for i in Grid_list]).T
if X is None:
raise Exception(
f"X is None. Make sure you passed the correct X or {basis} dimension reduction method."
)
elif V is None:
raise Exception("V is None. Make sure you passed the correct V.")
vf_kwargs = {
"M": None,
"a": 5,
"beta": None,
"ecr": 1e-5,
"gamma": 0.9,
"lambda_": 3,
"minP": 1e-5,
"MaxIter": 30,
"theta": 0.75,
"div_cur_free_kernels": False,
"velocity_based_sampling": True,
"sigma": 0.8,
"eta": 0.5,
"seed": 0,
}
vf_kwargs = update_dict(vf_kwargs, kwargs)
VecFld = vectorfield(X, V, Grid, **vf_kwargs)
vf_dict = VecFld.fit(normalize=normalize, method=method, **kwargs)
vf_key = "VecFld" if basis is None else "VecFld_" + basis
if basis is not None:
key = "velocity_" + basis + '_' + method
adata.obsm[key] = vf_dict['VecFld']['V']
adata.obsm['X_' + basis + '_' + method] = vf_dict['VecFld']['X']
vf_dict['dims'] = dims
adata.uns[vf_key] = vf_dict
else:
key = velocity_key + '_' + method
adata.layers[key] = sp.csr_matrix((adata.shape))
adata.layers[key][:, np.where(adata.var.use_for_velocity)[0]] = vf_dict['VecFld']['V']
vf_dict['layer'] = layer
vf_dict['genes'] = genes
vf_dict['velocity_key'] = velocity_key
adata.uns[vf_key] = vf_dict
if X.shape[1] == 2 and map_topography:
tp_kwargs = {"n": 25}
tp_kwargs = update_dict(tp_kwargs, kwargs)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
adata = topography(
adata, basis=basis, X=X, layer=layer, dims=[0, 1], VecFld=vf_dict['VecFld'], **tp_kwargs
)
if pot_curl_div:
if basis in ["pca", 'umap', 'tsne', 'diffusion_map', 'trimap']:
ddhoge(adata, basis=basis, cores=cores)
if X.shape[1] == 2: curl(adata, basis=basis)
divergence(adata, basis=basis)
if return_vf_object:
return VecFld
| [
"numpy.fft.rfft",
"numpy.abs",
"scipy.sparse.issparse",
"numpy.sin",
"scipy.spatial.distance.pdist",
"numpy.linalg.norm",
"numpy.atleast_2d",
"numpy.zeros_like",
"warnings.simplefilter",
"scipy.optimize.fsolve",
"numpy.expm1",
"warnings.catch_warnings",
"numpy.real",
"numpy.linspace",
"n... | [((674, 690), 'numpy.atleast_2d', 'np.atleast_2d', (['X'], {}), '(X)\n', (687, 690), True, 'import numpy as np\n'), ((1554, 1565), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1562, 1565), True, 'import numpy as np\n'), ((2130, 2144), 'scipy.optimize.fsolve', 'fsolve', (['F', 'x01'], {}), '(F, x01)\n', (2136, 2144), False, 'from scipy.optimize import fsolve\n'), ((2638, 2651), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (2646, 2651), True, 'import numpy as np\n'), ((5708, 5719), 'numpy.array', 'np.array', (['P'], {}), '(P)\n', (5716, 5719), True, 'import numpy as np\n'), ((6403, 6414), 'numpy.array', 'np.array', (['P'], {}), '(P)\n', (6411, 6414), True, 'import numpy as np\n'), ((7022, 7037), 'numpy.array', 'np.array', (['int_P'], {}), '(int_P)\n', (7030, 7037), True, 'import numpy as np\n'), ((7291, 7323), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {'dtype': 'bool'}), '(X.shape[0], dtype=bool)\n', (7299, 7323), True, 'import numpy as np\n'), ((7530, 7544), 'numpy.fft.rfft', 'np.fft.rfft', (['x'], {}), '(x)\n', (7541, 7544), True, 'import numpy as np\n'), ((8540, 8551), 'numpy.array', 'np.array', (['D'], {}), '(D)\n', (8548, 8551), True, 'import numpy as np\n'), ((772, 780), 'scipy.spatial.distance.pdist', 'pdist', (['X'], {}), '(X)\n', (777, 780), False, 'from scipy.spatial.distance import pdist\n'), ((1598, 1609), 'numpy.array', 'np.array', (['J'], {}), '(J)\n', (1606, 1609), True, 'import numpy as np\n'), ((1625, 1639), 'numpy.array', 'np.array', (['fval'], {}), '(fval)\n', (1633, 1639), True, 'import numpy as np\n'), ((2325, 2342), 'numpy.zeros_like', 'np.zeros_like', (['x0'], {}), '(x0)\n', (2338, 2342), True, 'import numpy as np\n'), ((2609, 2626), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (2623, 2626), True, 'import numpy as np\n'), ((4068, 4086), 'numpy.linalg.norm', 'np.linalg.norm', (['v0'], {}), '(v0)\n', (4082, 4086), True, 'import numpy as np\n'), ((4680, 4686), 'scipy.linalg.eig', 'eig', (['J'], {}), '(J)\n', (4683, 4686), False, 'from scipy.linalg import eig\n'), ((5576, 5615), 'numpy.linalg.norm', 'np.linalg.norm', (['(curve[i] - curve[i - 1])'], {}), '(curve[i] - curve[i - 1])\n', (5590, 5615), True, 'import numpy as np\n'), ((7368, 7429), 'numpy.logical_or', 'np.logical_or', (['(X[:, k] < domain[k][0])', '(X[:, k] > domain[k][1])'], {}), '(X[:, k] < domain[k][0], X[:, k] > domain[k][1])\n', (7381, 7429), True, 'import numpy as np\n'), ((7451, 7479), 'numpy.logical_or', 'np.logical_or', (['is_outside', 'o'], {}), '(is_outside, o)\n', (7464, 7479), True, 'import numpy as np\n'), ((7969, 7983), 'numpy.hstack', 'np.hstack', (['ret'], {}), '(ret)\n', (7978, 7983), True, 'import numpy as np\n'), ((8059, 8088), 'numpy.linalg.norm', 'np.linalg.norm', (['(xFFt1 - xFFt2)'], {}), '(xFFt1 - xFFt2)\n', (8073, 8088), True, 'import numpy as np\n'), ((8743, 8759), 'numpy.array', 'np.array', (['self.X'], {}), '(self.X)\n', (8751, 8759), True, 'import numpy as np\n'), ((8797, 8813), 'numpy.array', 'np.array', (['self.J'], {}), '(self.J)\n', (8805, 8813), True, 'import numpy as np\n'), ((21932, 21949), 'numpy.isscalar', 'np.isscalar', (['dims'], {}), '(dims)\n', (21943, 21949), True, 'import numpy as np\n'), ((22513, 22527), 'scipy.sparse.issparse', 'sp.issparse', (['X'], {}), '(X)\n', (22524, 22527), True, 'import scipy.sparse as sp\n'), ((24270, 24296), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['adata.shape'], {}), '(adata.shape)\n', (24283, 24296), True, 'import scipy.sparse as sp\n'), ((1251, 1288), 'scipy.optimize.fsolve', 'fsolve', (['func_vf', 'x0'], {'full_output': '(True)'}), '(func_vf, x0, full_output=True)\n', (1257, 1288), False, 'from scipy.optimize import fsolve\n'), ((1506, 1525), 'scipy.optimize.fsolve', 'fsolve', (['func_vf', 'x0'], {}), '(func_vf, x0)\n', (1512, 1525), False, 'from scipy.optimize import fsolve\n'), ((4024, 4037), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4030, 4037), True, 'import numpy as np\n'), ((4039, 4052), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4045, 4052), True, 'import numpy as np\n'), ((4815, 4828), 'numpy.real', 'np.real', (['v[j]'], {}), '(v[j])\n', (4822, 4828), True, 'import numpy as np\n'), ((4978, 5005), 'numpy.linspace', 'np.linspace', (['(0)', 't', 'n_sample'], {}), '(0, t, n_sample)\n', (4989, 5005), True, 'import numpy as np\n'), ((5316, 5353), 'numpy.vstack', 'np.vstack', (['(ab_lower[::-1], ab_upper)'], {}), '((ab_lower[::-1], ab_upper))\n', (5325, 5353), True, 'import numpy as np\n'), ((6009, 6042), 'numpy.linalg.det', 'np.linalg.det', (['[p1 - p2, p3 - p4]'], {}), '([p1 - p2, p3 - p4])\n', (6022, 6042), True, 'import numpy as np\n'), ((9349, 9363), 'scipy.linalg.eig', 'eig', (['self.J[i]'], {}), '(self.J[i])\n', (9352, 9363), False, 'from scipy.linalg import eig\n'), ((11345, 11359), 'scipy.sparse.issparse', 'sp.issparse', (['X'], {}), '(X)\n', (11356, 11359), True, 'import scipy.sparse as sp\n'), ((12330, 12350), 'numpy.random.rand', 'np.random.rand', (['n', '(2)'], {}), '(n, 2)\n', (12344, 12350), True, 'import numpy as np\n'), ((22355, 22366), 'numpy.expm1', 'np.expm1', (['X'], {}), '(X)\n', (22363, 22366), True, 'import numpy as np\n'), ((24683, 24708), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (24706, 24708), False, 'import warnings\n'), ((24722, 24753), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (24743, 24753), False, 'import warnings\n'), ((3068, 3098), 'numpy.linalg.norm', 'np.linalg.norm', (['(p - cur[i - 1])'], {}), '(p - cur[i - 1])\n', (3082, 3098), True, 'import numpy as np\n'), ((3981, 3997), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3995, 3997), True, 'import numpy as np\n'), ((4849, 4866), 'numpy.linalg.norm', 'np.linalg.norm', (['u'], {}), '(u)\n', (4863, 4866), True, 'import numpy as np\n'), ((22820, 22845), 'numpy.abs', 'np.abs', (['(max_vec - min_vec)'], {}), '(max_vec - min_vec)\n', (22826, 22845), True, 'import numpy as np\n'), ((22881, 22906), 'numpy.abs', 'np.abs', (['(max_vec - min_vec)'], {}), '(max_vec - min_vec)\n', (22887, 22906), True, 'import numpy as np\n'), ((4715, 4725), 'numpy.real', 'np.real', (['w'], {}), '(w)\n', (4722, 4725), True, 'import numpy as np\n'), ((6090, 6123), 'numpy.linalg.det', 'np.linalg.det', (['[p1 - p3, p3 - p4]'], {}), '([p1 - p3, p3 - p4])\n', (6103, 6123), True, 'import numpy as np\n'), ((9629, 9639), 'numpy.real', 'np.real', (['w'], {}), '(w)\n', (9636, 9639), True, 'import numpy as np\n'), ((22955, 22982), 'numpy.linspace', 'np.linspace', (['i', 'j', 'grid_num'], {}), '(i, j, grid_num)\n', (22966, 22982), True, 'import numpy as np\n'), ((24328, 24364), 'numpy.where', 'np.where', (['adata.var.use_for_velocity'], {}), '(adata.var.use_for_velocity)\n', (24336, 24364), True, 'import numpy as np\n'), ((6153, 6186), 'numpy.linalg.det', 'np.linalg.det', (['[p1 - p2, p1 - p3]'], {}), '([p1 - p2, p1 - p3])\n', (6166, 6186), True, 'import numpy as np\n'), ((9055, 9076), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - y)'], {}), '(x - y)\n', (9069, 9076), True, 'import numpy as np\n'), ((9911, 9921), 'numpy.real', 'np.real', (['w'], {}), '(w)\n', (9918, 9921), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
'''
log_names = ['log.txt']
for log_name in log_names:
data = np.loadtxt(log_name, skiprows=1)
losses = []
curr = 0
loss = 0
for ind in data:
if ind[0] == curr:
loss += ind[2] + ind[3] + ind[4]
else:
losses.append(loss)
loss = 0
curr += 1
losses.append(loss)
x = range(len(losses))
plt.scatter(x, losses, s=0.8)
plt.xlabel('epochs')
plt.ylabel('training losses')
plt.title('MLVAE on double univatiate normal data, n = 1500, T = 100')
plt.show()
'''
errors = np.loadtxt('errors.txt')
etas = np.array(range(80)) + 10
plt.scatter(etas, errors, s=0.9)
plt.axvline(x=65)
plt.xlabel('etas')
plt.ylabel('squared errors')
plt.title('Approach 2. Squared errors for a single data X_0 in n=1500 theta=1')
plt.show() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((627, 651), 'numpy.loadtxt', 'np.loadtxt', (['"""errors.txt"""'], {}), "('errors.txt')\n", (637, 651), True, 'import numpy as np\n'), ((684, 716), 'matplotlib.pyplot.scatter', 'plt.scatter', (['etas', 'errors'], {'s': '(0.9)'}), '(etas, errors, s=0.9)\n', (695, 716), True, 'import matplotlib.pyplot as plt\n'), ((717, 734), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(65)'}), '(x=65)\n', (728, 734), True, 'import matplotlib.pyplot as plt\n'), ((735, 753), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""etas"""'], {}), "('etas')\n", (745, 753), True, 'import matplotlib.pyplot as plt\n'), ((754, 782), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""squared errors"""'], {}), "('squared errors')\n", (764, 782), True, 'import matplotlib.pyplot as plt\n'), ((783, 862), 'matplotlib.pyplot.title', 'plt.title', (['"""Approach 2. Squared errors for a single data X_0 in n=1500 theta=1"""'], {}), "('Approach 2. Squared errors for a single data X_0 in n=1500 theta=1')\n", (792, 862), True, 'import matplotlib.pyplot as plt\n'), ((863, 873), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (871, 873), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import random
import scipy.stats
import math
import tensorflow as tf
from .utils import calc_cross_distances
class DistributionWrapper:
def __init__(self, sample_fn, pdf_fn):
self.sample_fn = sample_fn
self.pdf_fn = pdf_fn
def sample(self, *args):
return self.sample_fn(*args)
def pdf(self, *args):
return self.pdf_fn(*args)
class BoundedDistributionAdapter:
def __init__(self, base_distribution, supp):
self.base_distribution = base_distribution
self.supp = supp
def sample(self, batch_size):
ps = self.base_distribution.sample(batch_size)
pdfs = self.supp.pdf(ps)
qs = self.supp.sample(batch_size)
return tf.where(tf.expand_dims(pdfs > 0, 1), ps, qs)
def pdf(self, ps):
base_pdfs = self.base_distribution.pdf(ps)
supp_pdfs = self.supp.pdf(ps)
return tf.where(supp_pdfs > 0, base_pdfs, 0)
class TranslatedDistributionAdapter:
def __init__(self, base_distribution, delta):
self.base_distribution = base_distribution
self.delta = delta
def sample(self, batch_size):
ps = self.base_distribution.sample(batch_size)
ps = ps + tf.expand_dims(self.delta, 0)
return ps
def pdf(self, ps):
return self.base_distribution.pdf(ps - tf.expand_dims(self.delta, 0))
# numpy versions, without batching
def uniform_1d_np(start, end):
return DistributionWrapper(
sample_fn=lambda: np.random.random() * (end - start) + start,
pdf_fn=lambda x: 1 / (end - start) if (start <= x and x <= end) else 0.0)
def gaussian_1d_np(mean, sdv):
N = scipy.stats.norm(loc=mean, scale=sdv)
return DistributionWrapper(
sample_fn=N.rvs,
pdf_fn=N.pdf)
def uniform_rectangle_np(x_start, x_end, y_start, y_end):
nu_x = uniform_1d_np(x_start, x_end)
nu_y = uniform_1d_np(y_start, y_end)
def sample():
return np.array([nu_x.sample(), nu_y.sample()])
return DistributionWrapper(
sample_fn=sample,
pdf_fn=lambda p: nu_x.pdf(p[0]) * nu_y.pdf(p[1]))
def uniform_annulus_np(center, r1, r2):
area = math.pi * (r2 ** 2 - r1 ** 2)
def sample():
angle = random.uniform(0, 2 * math.pi)
l = random.uniform(r1 ** 2, r2 ** 2)
l = math.sqrt(l)
return center + l * np.array([math.cos(angle), math.sin(angle)])
def inside(p):
d = np.dot(p - center, p - center)
return r1 ** 2 <= d and d <= r2 ** 2
return DistributionWrapper(
sample_fn=sample,
pdf_fn=lambda p: 1 / area if inside(p) else 0.0)
def uniform_disk_np(center, radius):
return uniform_annulus_np(center, 0, radius)
def gaussian_np(mean, cov):
N = scipy.stats.multivariate_normal(mean, cov)
return DistributionWrapper(sample_fn=N.rvs, pdf_fn=N.pdf)
def ellipse_np(center, a, b, T):
center = np.array(center, dtype=float)
T = np.array(T, dtype=float)
def sample():
succeed = False
speed_bound = math.sqrt(a ** 2 + b ** 2)
while not succeed:
t = random.uniform(0, 2 * math.pi)
p = np.array([a * math.cos(t), b * math.sin(t)])
speed = math.sqrt((a * math.sin(t)) ** 2 + (b * math.cos(t)) ** 2)
if random.uniform(0, speed_bound) < speed:
succeed = True
break
return center + np.dot(T, p)
return DistributionWrapper(sample_fn=sample, pdf_fn=None)
def rectangle_frame_np(extent):
# a rectangular frame, with given extent
x0, x1, y0, y1 = extent
lx = x1 - x0
ly = y1 - y0
circumference = 2 * (lx + ly)
base = np.array([x0, y0])
def sample():
t = random.uniform(0, circumference)
ox = 0
oy = 0
if t < lx:
ox = t
elif t < 2 * lx:
ox = t - lx
oy = ly
elif t < 2 * lx + ly:
oy = t - 2 * lx
else:
oy = t - 2 * lx - ly
ox = lx
return base + np.array([ox, oy])
return DistributionWrapper(sample_fn=sample, pdf_fn=None)
# tensorflow versions, with batching
def uniform_rectangle_tf(extent, float_dtype=tf.float32):
x0, x1, y0, y1 = extent
density = tf.cast(1 / ((x1 - x0) * (y1 - y0)), float_dtype)
def sample(batch_size):
xs = tf.random.uniform(shape=(batch_size,), minval=x0, maxval=x1, dtype=float_dtype)
ys = tf.random.uniform(shape=(batch_size,), minval=y0, maxval=y1, dtype=float_dtype)
return tf.stack([xs, ys], axis=1)
def pdf(ps):
ps = tf.cast(ps, float_dtype)
mask_x = (x0 <= ps[:, 0]) & (ps[:, 0] <= x1)
mask_y = (y0 <= ps[:, 1]) & (ps[:, 1] <= y1)
mask = mask_x & mask_y
return tf.where(mask, density, 0)
return DistributionWrapper(sample_fn=sample, pdf_fn=pdf)
def uniform_annulus_tf(center, inner_radius, outer_radius, float_dtype=tf.float32):
area = tf.cast(math.pi * (outer_radius ** 2 - inner_radius ** 2), float_dtype)
density = tf.cast(1 / area, float_dtype)
def sample(batch_size):
angles = tf.random.uniform(shape=(batch_size,), minval=0, maxval=2*math.pi, dtype=float_dtype)
rs = tf.random.uniform(shape=(batch_size,), minval=inner_radius**2, maxval=outer_radius**2, dtype=float_dtype)
rs = tf.sqrt(rs)
ps = tf.expand_dims(rs, 1) * tf.stack([tf.math.cos(angles), tf.math.sin(angles)], axis=1)
ps += tf.expand_dims(tf.convert_to_tensor(center), axis=0)
return ps
def pdf(ps):
ps = tf.cast(ps, float_dtype)
d = tf.reduce_sum((ps - tf.expand_dims(center, 0)) ** 2, -1)
mask = (inner_radius ** 2 <= d) & (d <= outer_radius ** 2)
return tf.where(mask, density, 0)
return DistributionWrapper(sample_fn=sample, pdf_fn=pdf)
def uniform_disk_tf(center, radius, float_dtype=tf.float32):
return uniform_annulus_tf(center, 0, radius, float_dtype)
def gaussian_tf(point_dim, mean, normal_A, float_dtype=tf.float32):
mean = tf.cast(mean, float_dtype)
normal_A = tf.cast(normal_A, float_dtype)
cov = tf.matmul(normal_A, normal_A, transpose_b=True)
cov_inv = tf.linalg.inv(cov)
det_cov = tf.linalg.det(cov)
coeff = 1 / ((math.sqrt(2 * math.pi)) ** point_dim)
coeff *= 1 / math.sqrt(det_cov)
def sample(batch_size):
zs = tf.random.normal(shape=[batch_size, point_dim], dtype=float_dtype)
zs = tf.linalg.matvec(
tf.tile(tf.expand_dims(normal_A, 0), [batch_size, 1, 1]),
zs)
zs = tf.expand_dims(mean, 0) + zs
return zs
def pdf(ps):
ps = tf.cast(ps, float_dtype)
ps_centered = ps - tf.expand_dims(mean, 0)
batch_size = tf.shape(ps)[0]
tmp = tf.linalg.matvec(
tf.tile(tf.expand_dims(cov_inv, 0), [batch_size, 1, 1]),
ps_centered)
tmp = tf.reduce_sum(ps_centered * tmp, -1)
tmp = -0.5 * tmp
result = coeff * tf.exp(tmp)
return result
return DistributionWrapper(sample_fn=sample, pdf_fn=pdf)
def monochromatic_image_tf(img, extent, uniform_noise, float_dtype=tf.float32):
x0, x1, y0, y1 = extent
N, M = img.shape # img is N x M
hx = tf.cast((x1 - x0) / N, float_dtype)
hy = tf.cast((y1 - y0) / M, float_dtype)
x_flat = tf.reshape(tf.tile(tf.expand_dims(hx * (tf.range(N, dtype=float_dtype) + 0.5), 1), [1, M]), [-1])
y_flat = tf.reshape(tf.tile(tf.expand_dims(hy * (tf.range(M, dtype=float_dtype) + 0.5), 0), [N, 1]), [-1])
img_flat = tf.cast(tf.reshape(img, [-1]), float_dtype)
bar_end = tf.cumsum(img_flat)
bar_start = tf.cumsum(img_flat, exclusive=True)
total_p = bar_end[-1]
noise_uniform = uniform_rectangle_tf([-hx/2, hx/2, -hy/2, hy/2], float_dtype)
def sample(batch_size):
ds = tf.random.uniform(shape=(batch_size,), minval=0, maxval=total_p, dtype=float_dtype)
mask = tf.expand_dims(ds, 1) < tf.expand_dims(bar_end, 0)
mask &= tf.expand_dims(ds, 1) >= tf.expand_dims(bar_start, 0)
# mask - [batch_size, NM]
xs = tf.linalg.matvec(tf.cast(mask, float_dtype), x_flat) # [batch_size]
ys = tf.linalg.matvec(tf.cast(mask, float_dtype), y_flat) # [batch_size]
ps = tf.stack([xs, ys], 1)
if uniform_noise:
# uniform noise
ps += noise_uniform.sample(batch_size)
return ps
def pdf(ps):
# compute only uniform density
ps = tf.cast(ps, float_dtype)
xis = tf.clip_by_value(tf.cast(tf.math.round(ps[:, 0] / hx - 0.5), tf.int32), 0, N - 1)
yjs = tf.clip_by_value(tf.cast(tf.math.round(ps[:, 1] / hy - 0.5), tf.int32), 0, M - 1)
inds = N * xis + yjs
ws = tf.gather(img_flat, inds, axis=0) / total_p
result = ws / (hx * hy)
return result
return DistributionWrapper(sample_fn=sample, pdf_fn=pdf)
def point_set_2d_tf(ps, gaussian_noise=0.01, fast_pdf=True, float_dtype=tf.float32):
ps = tf.cast(ps, float_dtype)
noise_gaussian = gaussian_tf(2, [0, 0], gaussian_noise * np.identity(2), float_dtype)
noise_gaussian_1d = gaussian_tf(1, [0], gaussian_noise * np.identity(1), float_dtype)
def sample(batch_size):
inds = tf.random.uniform(shape=[batch_size], minval=0, maxval=ps.shape[0], dtype=tf.int32)
qs = tf.stack([tf.gather(ps[:, 0], inds), tf.gather(ps[:, 1], inds)], axis=1)
qs += noise_gaussian.sample(batch_size)
return qs
def pdf(qs):
qs = tf.cast(qs, float_dtype)
if fast_pdf:
dist = calc_cross_distances(qs, ps) # |Q| x |P|
# unnormalized pdf, as this is only used for visualization
min_dist = tf.reduce_min(dist, axis=1) # |Q|
min_dist = tf.sqrt(tf.maximum(0.0, min_dist)) # numerical issue
noise_pdfs = noise_gaussian_1d.pdf(tf.expand_dims(min_dist, 1)) # |Q|
return noise_pdfs
else:
q_minus_p = tf.expand_dims(qs, 0) - tf.expand_dims(ps, 1) # |P| x |Q| x 2
noise_pdfs = noise_gaussian.pdf(tf.reshape(q_minus_p, [-1, 2])) # |PQ|
noise_pdfs = tf.reshape(noise_pdfs, [tf.shape(ps)[0], tf.shape(qs)[0]]) # |P| x |Q|
return tf.reduce_mean(noise_pdfs, 0)
return DistributionWrapper(sample_fn=sample, pdf_fn=pdf)
# TODO: remove this, and use mixture_tf instead
def composite_tf(nu1, nu2, w1, w2, float_dtype=tf.float32):
def sample(batch_size):
ps1 = nu1.sample(batch_size)
ps2 = nu2.sample(batch_size)
darts = tf.random.uniform(shape=[batch_size], minval=0, maxval=w1+w2, dtype=float_dtype)
ps = tf.where(tf.expand_dims(darts < w1, 1), ps1, ps2)
return ps
def pdf(ps):
ps = tf.cast(ps, float_dtype)
return w1 * nu1.pdf(ps) + w2 * nu2.pdf(ps)
return DistributionWrapper(sample_fn=sample, pdf_fn=pdf)
def mixture_tf(dim, nu_list, w_list, float_dtype=tf.float32):
n_components = len(nu_list)
w_list = tf.convert_to_tensor(w_list)
w_list = tf.cast(w_list, float_dtype)
w_sum = tf.reduce_sum(w_list)
w_log = tf.math.log(w_list)
def sample(batch_size):
samples_list = tf.stack([nu_list[i].sample(batch_size) for i in range(n_components)], axis=1) # BxCxD
component_id = tf.random.categorical(tf.expand_dims(w_log, 0), batch_size) # 1xB
component_id = tf.squeeze(component_id, 0) # B
inds_0 = tf.tile(tf.expand_dims(component_id, 1), [1, dim]) # BxD
inds_1 = tf.tile(tf.expand_dims(tf.range(dim), 0), [batch_size, 1]) # BxD
inds = tf.stack([tf.cast(inds_0, tf.int32), inds_1], axis=2) # BxDx2
ps = tf.gather_nd(samples_list, inds, batch_dims=1) # BxD
return ps
def pdf(ps):
ps = tf.cast(ps, float_dtype)
pdf_list = tf.stack([nu_list[i].pdf(ps) for i in range(n_components)], axis=1) # BxC
pdfs = tf.reduce_sum(pdf_list * tf.expand_dims(w_list, 0), 1) # B
return pdfs
return DistributionWrapper(sample_fn=sample, pdf_fn=pdf)
# TODO: merge this and point_set_2d_tf
def empirical_tf(ps, gaussian_noise=0.0, float_dtype=tf.float32, max_sub=5000):
dim = ps.shape[1]
print('Create empirical distribution with {} points'.format(ps.shape[0]))
ps = tf.cast(ps, float_dtype)
num_points = ps.shape[0]
if gaussian_noise > 0:
noise_gaussian_1d = gaussian_tf(1, [0], gaussian_noise * np.identity(1), float_dtype)
else:
# this is only in computing pdf
noise_gaussian_1d = gaussian_tf(1, [0], 0.01 * np.identity(1), float_dtype)
# ps can be very large! so just take a subset for computing pdf
if num_points > max_sub:
ps_sub_inds = tf.random.shuffle(tf.range(num_points))
ps_sub_inds = ps_sub_inds[:max_sub]
ps_sub = tf.gather(ps, ps_sub_inds, axis=0)
else:
ps_sub = ps
# In autograph, this will retrace for different batch_size
def sample(batch_size):
if batch_size <= num_points:
inds = tf.random.shuffle(tf.range(num_points))
inds = inds[:batch_size]
else:
inds = tf.random.shuffle(tf.range(num_points))
additional_inds = tf.random.uniform(shape=[batch_size - num_points], minval=0, maxval=ps.shape[0], dtype=tf.int32)
inds = tf.concat([inds, additional_inds], axis=0)
qs = tf.gather(ps, inds, axis=0)
if gaussian_noise > 0: # ok to trace once
qs += tf.reshape(noise_gaussian_1d.sample(batch_size * dim), [batch_size, dim])
return qs
def pdf(qs):
dist = calc_cross_distances(ps_sub, qs) # |P| x |Q|
min_dist = tf.reduce_min(dist, axis=0) # |Q|
min_dist = tf.sqrt(tf.maximum(0.0, min_dist))
noise_pdfs = noise_gaussian_1d.pdf(tf.reshape(min_dist, [-1, 1])) # |Q|
return noise_pdfs
return DistributionWrapper(sample_fn=sample, pdf_fn=pdf)
def uniform_nd_tf(extent, float_dtype=tf.float32):
dim = len(extent)
volume = 1.0
for e in extent:
volume *= e[1] - e[0]
density = tf.cast(1 / volume, float_dtype) if volume > 1e-6 else 0.0
def sample(batch_size):
coords = [tf.random.uniform(shape=[batch_size], minval=e[0], maxval=e[1], dtype=float_dtype) for e in extent]
coords = tf.stack(coords, axis=1)
return coords
def pdf(qs):
masks = [tf.logical_and(qs[:, i] >= extent[i][0], qs[:, i] <= extent[i][1]) for i in range(dim)]
masks = tf.stack(masks, axis=1) # NxD
masks = tf.reduce_all(masks, 1) # N
return tf.where(masks, volume, 0)
return DistributionWrapper(sample_fn=sample, pdf_fn=pdf)
def uniform_bbox_tf(components, a_min, a_max, float_dtype=tf.float32):
point_dim = components.shape[1]
components = tf.convert_to_tensor(components, float_dtype)
a_min = tf.convert_to_tensor(a_min, float_dtype)
a_max = tf.convert_to_tensor(a_max, float_dtype)
volume = tf.reduce_prod(a_max - a_min)
density = 1 / volume
def sample(batch_size):
rs = tf.random.uniform(shape=[batch_size, point_dim], dtype=float_dtype)
rs = tf.expand_dims(a_min, 0) + rs * tf.expand_dims(a_max - a_min, 0)
ps = tf.matmul(rs, components)
return ps
def pdf(qs):
# qs - NxD
ws = tf.matmul(qs, components, transpose_b=False) # NxD
masks_min = tf.reduce_all(tf.expand_dims(a_min, 0) <= ws, axis=1) # N
masks_max = tf.reduce_all(tf.expand_dims(a_max, 0) >= ws, axis=1) # N
masks = tf.logical_and(masks_min, masks_max) # N
return tf.where(masks, density, 0)
return DistributionWrapper(sample_fn=sample, pdf_fn=pdf)
| [
"tensorflow.reduce_sum",
"tensorflow.cumsum",
"tensorflow.gather_nd",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.reduce_all",
"tensorflow.matmul",
"tensorflow.linalg.det",
"tensorflow.linalg.inv",
"tensorflow.reduce_prod",
"tensorflow.sqrt",
"tensorflow.math.log",
"tensorflow.ra... | [((2899, 2928), 'numpy.array', 'np.array', (['center'], {'dtype': 'float'}), '(center, dtype=float)\n', (2907, 2928), True, 'import numpy as np\n'), ((2937, 2961), 'numpy.array', 'np.array', (['T'], {'dtype': 'float'}), '(T, dtype=float)\n', (2945, 2961), True, 'import numpy as np\n'), ((3660, 3678), 'numpy.array', 'np.array', (['[x0, y0]'], {}), '([x0, y0])\n', (3668, 3678), True, 'import numpy as np\n'), ((4248, 4297), 'tensorflow.cast', 'tf.cast', (['(1 / ((x1 - x0) * (y1 - y0)))', 'float_dtype'], {}), '(1 / ((x1 - x0) * (y1 - y0)), float_dtype)\n', (4255, 4297), True, 'import tensorflow as tf\n'), ((4946, 5017), 'tensorflow.cast', 'tf.cast', (['(math.pi * (outer_radius ** 2 - inner_radius ** 2))', 'float_dtype'], {}), '(math.pi * (outer_radius ** 2 - inner_radius ** 2), float_dtype)\n', (4953, 5017), True, 'import tensorflow as tf\n'), ((5032, 5062), 'tensorflow.cast', 'tf.cast', (['(1 / area)', 'float_dtype'], {}), '(1 / area, float_dtype)\n', (5039, 5062), True, 'import tensorflow as tf\n'), ((6020, 6046), 'tensorflow.cast', 'tf.cast', (['mean', 'float_dtype'], {}), '(mean, float_dtype)\n', (6027, 6046), True, 'import tensorflow as tf\n'), ((6062, 6092), 'tensorflow.cast', 'tf.cast', (['normal_A', 'float_dtype'], {}), '(normal_A, float_dtype)\n', (6069, 6092), True, 'import tensorflow as tf\n'), ((6103, 6150), 'tensorflow.matmul', 'tf.matmul', (['normal_A', 'normal_A'], {'transpose_b': '(True)'}), '(normal_A, normal_A, transpose_b=True)\n', (6112, 6150), True, 'import tensorflow as tf\n'), ((6165, 6183), 'tensorflow.linalg.inv', 'tf.linalg.inv', (['cov'], {}), '(cov)\n', (6178, 6183), True, 'import tensorflow as tf\n'), ((6198, 6216), 'tensorflow.linalg.det', 'tf.linalg.det', (['cov'], {}), '(cov)\n', (6211, 6216), True, 'import tensorflow as tf\n'), ((7232, 7267), 'tensorflow.cast', 'tf.cast', (['((x1 - x0) / N)', 'float_dtype'], {}), '((x1 - x0) / N, float_dtype)\n', (7239, 7267), True, 'import tensorflow as tf\n'), ((7277, 7312), 'tensorflow.cast', 'tf.cast', (['((y1 - y0) / M)', 'float_dtype'], {}), '((y1 - y0) / M, float_dtype)\n', (7284, 7312), True, 'import tensorflow as tf\n'), ((7608, 7627), 'tensorflow.cumsum', 'tf.cumsum', (['img_flat'], {}), '(img_flat)\n', (7617, 7627), True, 'import tensorflow as tf\n'), ((7644, 7679), 'tensorflow.cumsum', 'tf.cumsum', (['img_flat'], {'exclusive': '(True)'}), '(img_flat, exclusive=True)\n', (7653, 7679), True, 'import tensorflow as tf\n'), ((8991, 9015), 'tensorflow.cast', 'tf.cast', (['ps', 'float_dtype'], {}), '(ps, float_dtype)\n', (8998, 9015), True, 'import tensorflow as tf\n'), ((10984, 11012), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['w_list'], {}), '(w_list)\n', (11004, 11012), True, 'import tensorflow as tf\n'), ((11026, 11054), 'tensorflow.cast', 'tf.cast', (['w_list', 'float_dtype'], {}), '(w_list, float_dtype)\n', (11033, 11054), True, 'import tensorflow as tf\n'), ((11067, 11088), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['w_list'], {}), '(w_list)\n', (11080, 11088), True, 'import tensorflow as tf\n'), ((11101, 11120), 'tensorflow.math.log', 'tf.math.log', (['w_list'], {}), '(w_list)\n', (11112, 11120), True, 'import tensorflow as tf\n'), ((12255, 12279), 'tensorflow.cast', 'tf.cast', (['ps', 'float_dtype'], {}), '(ps, float_dtype)\n', (12262, 12279), True, 'import tensorflow as tf\n'), ((14760, 14805), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['components', 'float_dtype'], {}), '(components, float_dtype)\n', (14780, 14805), True, 'import tensorflow as tf\n'), ((14818, 14858), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['a_min', 'float_dtype'], {}), '(a_min, float_dtype)\n', (14838, 14858), True, 'import tensorflow as tf\n'), ((14871, 14911), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['a_max', 'float_dtype'], {}), '(a_max, float_dtype)\n', (14891, 14911), True, 'import tensorflow as tf\n'), ((14925, 14954), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['(a_max - a_min)'], {}), '(a_max - a_min)\n', (14939, 14954), True, 'import tensorflow as tf\n'), ((907, 944), 'tensorflow.where', 'tf.where', (['(supp_pdfs > 0)', 'base_pdfs', '(0)'], {}), '(supp_pdfs > 0, base_pdfs, 0)\n', (915, 944), True, 'import tensorflow as tf\n'), ((2227, 2257), 'random.uniform', 'random.uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (2241, 2257), False, 'import random\n'), ((2270, 2302), 'random.uniform', 'random.uniform', (['(r1 ** 2)', '(r2 ** 2)'], {}), '(r1 ** 2, r2 ** 2)\n', (2284, 2302), False, 'import random\n'), ((2315, 2327), 'math.sqrt', 'math.sqrt', (['l'], {}), '(l)\n', (2324, 2327), False, 'import math\n'), ((2432, 2462), 'numpy.dot', 'np.dot', (['(p - center)', '(p - center)'], {}), '(p - center, p - center)\n', (2438, 2462), True, 'import numpy as np\n'), ((3026, 3052), 'math.sqrt', 'math.sqrt', (['(a ** 2 + b ** 2)'], {}), '(a ** 2 + b ** 2)\n', (3035, 3052), False, 'import math\n'), ((3709, 3741), 'random.uniform', 'random.uniform', (['(0)', 'circumference'], {}), '(0, circumference)\n', (3723, 3741), False, 'import random\n'), ((4339, 4418), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '(batch_size,)', 'minval': 'x0', 'maxval': 'x1', 'dtype': 'float_dtype'}), '(shape=(batch_size,), minval=x0, maxval=x1, dtype=float_dtype)\n', (4356, 4418), True, 'import tensorflow as tf\n'), ((4432, 4511), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '(batch_size,)', 'minval': 'y0', 'maxval': 'y1', 'dtype': 'float_dtype'}), '(shape=(batch_size,), minval=y0, maxval=y1, dtype=float_dtype)\n', (4449, 4511), True, 'import tensorflow as tf\n'), ((4527, 4553), 'tensorflow.stack', 'tf.stack', (['[xs, ys]'], {'axis': '(1)'}), '([xs, ys], axis=1)\n', (4535, 4553), True, 'import tensorflow as tf\n'), ((4584, 4608), 'tensorflow.cast', 'tf.cast', (['ps', 'float_dtype'], {}), '(ps, float_dtype)\n', (4591, 4608), True, 'import tensorflow as tf\n'), ((4761, 4787), 'tensorflow.where', 'tf.where', (['mask', 'density', '(0)'], {}), '(mask, density, 0)\n', (4769, 4787), True, 'import tensorflow as tf\n'), ((5108, 5200), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '(batch_size,)', 'minval': '(0)', 'maxval': '(2 * math.pi)', 'dtype': 'float_dtype'}), '(shape=(batch_size,), minval=0, maxval=2 * math.pi, dtype=\n float_dtype)\n', (5125, 5200), True, 'import tensorflow as tf\n'), ((5207, 5321), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '(batch_size,)', 'minval': '(inner_radius ** 2)', 'maxval': '(outer_radius ** 2)', 'dtype': 'float_dtype'}), '(shape=(batch_size,), minval=inner_radius ** 2, maxval=\n outer_radius ** 2, dtype=float_dtype)\n', (5224, 5321), True, 'import tensorflow as tf\n'), ((5326, 5337), 'tensorflow.sqrt', 'tf.sqrt', (['rs'], {}), '(rs)\n', (5333, 5337), True, 'import tensorflow as tf\n'), ((5551, 5575), 'tensorflow.cast', 'tf.cast', (['ps', 'float_dtype'], {}), '(ps, float_dtype)\n', (5558, 5575), True, 'import tensorflow as tf\n'), ((5727, 5753), 'tensorflow.where', 'tf.where', (['mask', 'density', '(0)'], {}), '(mask, density, 0)\n', (5735, 5753), True, 'import tensorflow as tf\n'), ((6290, 6308), 'math.sqrt', 'math.sqrt', (['det_cov'], {}), '(det_cov)\n', (6299, 6308), False, 'import math\n'), ((6351, 6417), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[batch_size, point_dim]', 'dtype': 'float_dtype'}), '(shape=[batch_size, point_dim], dtype=float_dtype)\n', (6367, 6417), True, 'import tensorflow as tf\n'), ((6634, 6658), 'tensorflow.cast', 'tf.cast', (['ps', 'float_dtype'], {}), '(ps, float_dtype)\n', (6641, 6658), True, 'import tensorflow as tf\n'), ((6895, 6931), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(ps_centered * tmp)', '(-1)'], {}), '(ps_centered * tmp, -1)\n', (6908, 6931), True, 'import tensorflow as tf\n'), ((7558, 7579), 'tensorflow.reshape', 'tf.reshape', (['img', '[-1]'], {}), '(img, [-1])\n', (7568, 7579), True, 'import tensorflow as tf\n'), ((7831, 7919), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '(batch_size,)', 'minval': '(0)', 'maxval': 'total_p', 'dtype': 'float_dtype'}), '(shape=(batch_size,), minval=0, maxval=total_p, dtype=\n float_dtype)\n', (7848, 7919), True, 'import tensorflow as tf\n'), ((8260, 8281), 'tensorflow.stack', 'tf.stack', (['[xs, ys]', '(1)'], {}), '([xs, ys], 1)\n', (8268, 8281), True, 'import tensorflow as tf\n'), ((8477, 8501), 'tensorflow.cast', 'tf.cast', (['ps', 'float_dtype'], {}), '(ps, float_dtype)\n', (8484, 8501), True, 'import tensorflow as tf\n'), ((9239, 9327), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '[batch_size]', 'minval': '(0)', 'maxval': 'ps.shape[0]', 'dtype': 'tf.int32'}), '(shape=[batch_size], minval=0, maxval=ps.shape[0], dtype=\n tf.int32)\n', (9256, 9327), True, 'import tensorflow as tf\n'), ((9506, 9530), 'tensorflow.cast', 'tf.cast', (['qs', 'float_dtype'], {}), '(qs, float_dtype)\n', (9513, 9530), True, 'import tensorflow as tf\n'), ((10545, 10632), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '[batch_size]', 'minval': '(0)', 'maxval': '(w1 + w2)', 'dtype': 'float_dtype'}), '(shape=[batch_size], minval=0, maxval=w1 + w2, dtype=\n float_dtype)\n', (10562, 10632), True, 'import tensorflow as tf\n'), ((10738, 10762), 'tensorflow.cast', 'tf.cast', (['ps', 'float_dtype'], {}), '(ps, float_dtype)\n', (10745, 10762), True, 'import tensorflow as tf\n'), ((11371, 11398), 'tensorflow.squeeze', 'tf.squeeze', (['component_id', '(0)'], {}), '(component_id, 0)\n', (11381, 11398), True, 'import tensorflow as tf\n'), ((11650, 11696), 'tensorflow.gather_nd', 'tf.gather_nd', (['samples_list', 'inds'], {'batch_dims': '(1)'}), '(samples_list, inds, batch_dims=1)\n', (11662, 11696), True, 'import tensorflow as tf\n'), ((11752, 11776), 'tensorflow.cast', 'tf.cast', (['ps', 'float_dtype'], {}), '(ps, float_dtype)\n', (11759, 11776), True, 'import tensorflow as tf\n'), ((12785, 12819), 'tensorflow.gather', 'tf.gather', (['ps', 'ps_sub_inds'], {'axis': '(0)'}), '(ps, ps_sub_inds, axis=0)\n', (12794, 12819), True, 'import tensorflow as tf\n'), ((13351, 13378), 'tensorflow.gather', 'tf.gather', (['ps', 'inds'], {'axis': '(0)'}), '(ps, inds, axis=0)\n', (13360, 13378), True, 'import tensorflow as tf\n'), ((13636, 13663), 'tensorflow.reduce_min', 'tf.reduce_min', (['dist'], {'axis': '(0)'}), '(dist, axis=0)\n', (13649, 13663), True, 'import tensorflow as tf\n'), ((14049, 14081), 'tensorflow.cast', 'tf.cast', (['(1 / volume)', 'float_dtype'], {}), '(1 / volume, float_dtype)\n', (14056, 14081), True, 'import tensorflow as tf\n'), ((14271, 14295), 'tensorflow.stack', 'tf.stack', (['coords'], {'axis': '(1)'}), '(coords, axis=1)\n', (14279, 14295), True, 'import tensorflow as tf\n'), ((14457, 14480), 'tensorflow.stack', 'tf.stack', (['masks'], {'axis': '(1)'}), '(masks, axis=1)\n', (14465, 14480), True, 'import tensorflow as tf\n'), ((14503, 14526), 'tensorflow.reduce_all', 'tf.reduce_all', (['masks', '(1)'], {}), '(masks, 1)\n', (14516, 14526), True, 'import tensorflow as tf\n'), ((14546, 14572), 'tensorflow.where', 'tf.where', (['masks', 'volume', '(0)'], {}), '(masks, volume, 0)\n', (14554, 14572), True, 'import tensorflow as tf\n'), ((15022, 15089), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '[batch_size, point_dim]', 'dtype': 'float_dtype'}), '(shape=[batch_size, point_dim], dtype=float_dtype)\n', (15039, 15089), True, 'import tensorflow as tf\n'), ((15181, 15206), 'tensorflow.matmul', 'tf.matmul', (['rs', 'components'], {}), '(rs, components)\n', (15190, 15206), True, 'import tensorflow as tf\n'), ((15275, 15319), 'tensorflow.matmul', 'tf.matmul', (['qs', 'components'], {'transpose_b': '(False)'}), '(qs, components, transpose_b=False)\n', (15284, 15319), True, 'import tensorflow as tf\n'), ((15498, 15534), 'tensorflow.logical_and', 'tf.logical_and', (['masks_min', 'masks_max'], {}), '(masks_min, masks_max)\n', (15512, 15534), True, 'import tensorflow as tf\n'), ((15554, 15581), 'tensorflow.where', 'tf.where', (['masks', 'density', '(0)'], {}), '(masks, density, 0)\n', (15562, 15581), True, 'import tensorflow as tf\n'), ((742, 769), 'tensorflow.expand_dims', 'tf.expand_dims', (['(pdfs > 0)', '(1)'], {}), '(pdfs > 0, 1)\n', (756, 769), True, 'import tensorflow as tf\n'), ((1220, 1249), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.delta', '(0)'], {}), '(self.delta, 0)\n', (1234, 1249), True, 'import tensorflow as tf\n'), ((3096, 3126), 'random.uniform', 'random.uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (3110, 3126), False, 'import random\n'), ((3399, 3411), 'numpy.dot', 'np.dot', (['T', 'p'], {}), '(T, p)\n', (3405, 3411), True, 'import numpy as np\n'), ((4026, 4044), 'numpy.array', 'np.array', (['[ox, oy]'], {}), '([ox, oy])\n', (4034, 4044), True, 'import numpy as np\n'), ((5351, 5372), 'tensorflow.expand_dims', 'tf.expand_dims', (['rs', '(1)'], {}), '(rs, 1)\n', (5365, 5372), True, 'import tensorflow as tf\n'), ((5465, 5493), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['center'], {}), '(center)\n', (5485, 5493), True, 'import tensorflow as tf\n'), ((6235, 6257), 'math.sqrt', 'math.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (6244, 6257), False, 'import math\n'), ((6556, 6579), 'tensorflow.expand_dims', 'tf.expand_dims', (['mean', '(0)'], {}), '(mean, 0)\n', (6570, 6579), True, 'import tensorflow as tf\n'), ((6686, 6709), 'tensorflow.expand_dims', 'tf.expand_dims', (['mean', '(0)'], {}), '(mean, 0)\n', (6700, 6709), True, 'import tensorflow as tf\n'), ((6731, 6743), 'tensorflow.shape', 'tf.shape', (['ps'], {}), '(ps)\n', (6739, 6743), True, 'import tensorflow as tf\n'), ((6982, 6993), 'tensorflow.exp', 'tf.exp', (['tmp'], {}), '(tmp)\n', (6988, 6993), True, 'import tensorflow as tf\n'), ((7930, 7951), 'tensorflow.expand_dims', 'tf.expand_dims', (['ds', '(1)'], {}), '(ds, 1)\n', (7944, 7951), True, 'import tensorflow as tf\n'), ((7954, 7980), 'tensorflow.expand_dims', 'tf.expand_dims', (['bar_end', '(0)'], {}), '(bar_end, 0)\n', (7968, 7980), True, 'import tensorflow as tf\n'), ((7997, 8018), 'tensorflow.expand_dims', 'tf.expand_dims', (['ds', '(1)'], {}), '(ds, 1)\n', (8011, 8018), True, 'import tensorflow as tf\n'), ((8022, 8050), 'tensorflow.expand_dims', 'tf.expand_dims', (['bar_start', '(0)'], {}), '(bar_start, 0)\n', (8036, 8050), True, 'import tensorflow as tf\n'), ((8115, 8141), 'tensorflow.cast', 'tf.cast', (['mask', 'float_dtype'], {}), '(mask, float_dtype)\n', (8122, 8141), True, 'import tensorflow as tf\n'), ((8196, 8222), 'tensorflow.cast', 'tf.cast', (['mask', 'float_dtype'], {}), '(mask, float_dtype)\n', (8203, 8222), True, 'import tensorflow as tf\n'), ((8736, 8769), 'tensorflow.gather', 'tf.gather', (['img_flat', 'inds'], {'axis': '(0)'}), '(img_flat, inds, axis=0)\n', (8745, 8769), True, 'import tensorflow as tf\n'), ((9077, 9091), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (9088, 9091), True, 'import numpy as np\n'), ((9167, 9181), 'numpy.identity', 'np.identity', (['(1)'], {}), '(1)\n', (9178, 9181), True, 'import numpy as np\n'), ((9706, 9733), 'tensorflow.reduce_min', 'tf.reduce_min', (['dist'], {'axis': '(1)'}), '(dist, axis=1)\n', (9719, 9733), True, 'import tensorflow as tf\n'), ((10226, 10255), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['noise_pdfs', '(0)'], {}), '(noise_pdfs, 0)\n', (10240, 10255), True, 'import tensorflow as tf\n'), ((10648, 10677), 'tensorflow.expand_dims', 'tf.expand_dims', (['(darts < w1)', '(1)'], {}), '(darts < w1, 1)\n', (10662, 10677), True, 'import tensorflow as tf\n'), ((11304, 11328), 'tensorflow.expand_dims', 'tf.expand_dims', (['w_log', '(0)'], {}), '(w_log, 0)\n', (11318, 11328), True, 'import tensorflow as tf\n'), ((11429, 11460), 'tensorflow.expand_dims', 'tf.expand_dims', (['component_id', '(1)'], {}), '(component_id, 1)\n', (11443, 11460), True, 'import tensorflow as tf\n'), ((12702, 12722), 'tensorflow.range', 'tf.range', (['num_points'], {}), '(num_points)\n', (12710, 12722), True, 'import tensorflow as tf\n'), ((13178, 13279), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '[batch_size - num_points]', 'minval': '(0)', 'maxval': 'ps.shape[0]', 'dtype': 'tf.int32'}), '(shape=[batch_size - num_points], minval=0, maxval=ps.\n shape[0], dtype=tf.int32)\n', (13195, 13279), True, 'import tensorflow as tf\n'), ((13294, 13336), 'tensorflow.concat', 'tf.concat', (['[inds, additional_inds]'], {'axis': '(0)'}), '([inds, additional_inds], axis=0)\n', (13303, 13336), True, 'import tensorflow as tf\n'), ((13697, 13722), 'tensorflow.maximum', 'tf.maximum', (['(0.0)', 'min_dist'], {}), '(0.0, min_dist)\n', (13707, 13722), True, 'import tensorflow as tf\n'), ((13767, 13796), 'tensorflow.reshape', 'tf.reshape', (['min_dist', '[-1, 1]'], {}), '(min_dist, [-1, 1])\n', (13777, 13796), True, 'import tensorflow as tf\n'), ((14154, 14241), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '[batch_size]', 'minval': 'e[0]', 'maxval': 'e[1]', 'dtype': 'float_dtype'}), '(shape=[batch_size], minval=e[0], maxval=e[1], dtype=\n float_dtype)\n', (14171, 14241), True, 'import tensorflow as tf\n'), ((14353, 14419), 'tensorflow.logical_and', 'tf.logical_and', (['(qs[:, i] >= extent[i][0])', '(qs[:, i] <= extent[i][1])'], {}), '(qs[:, i] >= extent[i][0], qs[:, i] <= extent[i][1])\n', (14367, 14419), True, 'import tensorflow as tf\n'), ((15103, 15127), 'tensorflow.expand_dims', 'tf.expand_dims', (['a_min', '(0)'], {}), '(a_min, 0)\n', (15117, 15127), True, 'import tensorflow as tf\n'), ((1339, 1368), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.delta', '(0)'], {}), '(self.delta, 0)\n', (1353, 1368), True, 'import tensorflow as tf\n'), ((3282, 3312), 'random.uniform', 'random.uniform', (['(0)', 'speed_bound'], {}), '(0, speed_bound)\n', (3296, 3312), False, 'import random\n'), ((6473, 6500), 'tensorflow.expand_dims', 'tf.expand_dims', (['normal_A', '(0)'], {}), '(normal_A, 0)\n', (6487, 6500), True, 'import tensorflow as tf\n'), ((6803, 6829), 'tensorflow.expand_dims', 'tf.expand_dims', (['cov_inv', '(0)'], {}), '(cov_inv, 0)\n', (6817, 6829), True, 'import tensorflow as tf\n'), ((8541, 8575), 'tensorflow.math.round', 'tf.math.round', (['(ps[:, 0] / hx - 0.5)'], {}), '(ps[:, 0] / hx - 0.5)\n', (8554, 8575), True, 'import tensorflow as tf\n'), ((8637, 8671), 'tensorflow.math.round', 'tf.math.round', (['(ps[:, 1] / hy - 0.5)'], {}), '(ps[:, 1] / hy - 0.5)\n', (8650, 8671), True, 'import tensorflow as tf\n'), ((9346, 9371), 'tensorflow.gather', 'tf.gather', (['ps[:, 0]', 'inds'], {}), '(ps[:, 0], inds)\n', (9355, 9371), True, 'import tensorflow as tf\n'), ((9373, 9398), 'tensorflow.gather', 'tf.gather', (['ps[:, 1]', 'inds'], {}), '(ps[:, 1], inds)\n', (9382, 9398), True, 'import tensorflow as tf\n'), ((9771, 9796), 'tensorflow.maximum', 'tf.maximum', (['(0.0)', 'min_dist'], {}), '(0.0, min_dist)\n', (9781, 9796), True, 'import tensorflow as tf\n'), ((9863, 9890), 'tensorflow.expand_dims', 'tf.expand_dims', (['min_dist', '(1)'], {}), '(min_dist, 1)\n', (9877, 9890), True, 'import tensorflow as tf\n'), ((9966, 9987), 'tensorflow.expand_dims', 'tf.expand_dims', (['qs', '(0)'], {}), '(qs, 0)\n', (9980, 9987), True, 'import tensorflow as tf\n'), ((9990, 10011), 'tensorflow.expand_dims', 'tf.expand_dims', (['ps', '(1)'], {}), '(ps, 1)\n', (10004, 10011), True, 'import tensorflow as tf\n'), ((10072, 10102), 'tensorflow.reshape', 'tf.reshape', (['q_minus_p', '[-1, 2]'], {}), '(q_minus_p, [-1, 2])\n', (10082, 10102), True, 'import tensorflow as tf\n'), ((11518, 11531), 'tensorflow.range', 'tf.range', (['dim'], {}), '(dim)\n', (11526, 11531), True, 'import tensorflow as tf\n'), ((11585, 11610), 'tensorflow.cast', 'tf.cast', (['inds_0', 'tf.int32'], {}), '(inds_0, tf.int32)\n', (11592, 11610), True, 'import tensorflow as tf\n'), ((11910, 11935), 'tensorflow.expand_dims', 'tf.expand_dims', (['w_list', '(0)'], {}), '(w_list, 0)\n', (11924, 11935), True, 'import tensorflow as tf\n'), ((12401, 12415), 'numpy.identity', 'np.identity', (['(1)'], {}), '(1)\n', (12412, 12415), True, 'import numpy as np\n'), ((12535, 12549), 'numpy.identity', 'np.identity', (['(1)'], {}), '(1)\n', (12546, 12549), True, 'import numpy as np\n'), ((13016, 13036), 'tensorflow.range', 'tf.range', (['num_points'], {}), '(num_points)\n', (13024, 13036), True, 'import tensorflow as tf\n'), ((13126, 13146), 'tensorflow.range', 'tf.range', (['num_points'], {}), '(num_points)\n', (13134, 13146), True, 'import tensorflow as tf\n'), ((15135, 15167), 'tensorflow.expand_dims', 'tf.expand_dims', (['(a_max - a_min)', '(0)'], {}), '(a_max - a_min, 0)\n', (15149, 15167), True, 'import tensorflow as tf\n'), ((15360, 15384), 'tensorflow.expand_dims', 'tf.expand_dims', (['a_min', '(0)'], {}), '(a_min, 0)\n', (15374, 15384), True, 'import tensorflow as tf\n'), ((15438, 15462), 'tensorflow.expand_dims', 'tf.expand_dims', (['a_max', '(0)'], {}), '(a_max, 0)\n', (15452, 15462), True, 'import tensorflow as tf\n'), ((5385, 5404), 'tensorflow.math.cos', 'tf.math.cos', (['angles'], {}), '(angles)\n', (5396, 5404), True, 'import tensorflow as tf\n'), ((5406, 5425), 'tensorflow.math.sin', 'tf.math.sin', (['angles'], {}), '(angles)\n', (5417, 5425), True, 'import tensorflow as tf\n'), ((5608, 5633), 'tensorflow.expand_dims', 'tf.expand_dims', (['center', '(0)'], {}), '(center, 0)\n', (5622, 5633), True, 'import tensorflow as tf\n'), ((1497, 1515), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1513, 1515), True, 'import numpy as np\n'), ((2366, 2381), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (2374, 2381), False, 'import math\n'), ((2383, 2398), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (2391, 2398), False, 'import math\n'), ((3157, 3168), 'math.cos', 'math.cos', (['t'], {}), '(t)\n', (3165, 3168), False, 'import math\n'), ((3174, 3185), 'math.sin', 'math.sin', (['t'], {}), '(t)\n', (3182, 3185), False, 'import math\n'), ((7366, 7396), 'tensorflow.range', 'tf.range', (['N'], {'dtype': 'float_dtype'}), '(N, dtype=float_dtype)\n', (7374, 7396), True, 'import tensorflow as tf\n'), ((7477, 7507), 'tensorflow.range', 'tf.range', (['M'], {'dtype': 'float_dtype'}), '(M, dtype=float_dtype)\n', (7485, 7507), True, 'import tensorflow as tf\n'), ((10160, 10172), 'tensorflow.shape', 'tf.shape', (['ps'], {}), '(ps)\n', (10168, 10172), True, 'import tensorflow as tf\n'), ((10177, 10189), 'tensorflow.shape', 'tf.shape', (['qs'], {}), '(qs)\n', (10185, 10189), True, 'import tensorflow as tf\n'), ((3223, 3234), 'math.sin', 'math.sin', (['t'], {}), '(t)\n', (3231, 3234), False, 'import math\n'), ((3248, 3259), 'math.cos', 'math.cos', (['t'], {}), '(t)\n', (3256, 3259), False, 'import math\n')] |
import numpy as np
import torch
import torch.utils.data
from transformer import Constants as c
from tqdm import tqdm
def paired_collate_fn(insts):
src_insts, tgt_insts = list(zip(*insts))
src_insts = collate_fn(src_insts)
tgt_insts = collate_fn(tgt_insts)
return (*src_insts, *tgt_insts)
def collate_fn(insts):
''' Pad the instance to the max seq length in batch '''
max_len = max(len(inst) for inst in insts)
batch_seq = np.array([
inst + [c.PAD] * (max_len - len(inst))
for inst in insts])
batch_pos = np.array([
[pos_i+1 if w_i != c.PAD else 0
for pos_i, w_i in enumerate(inst)] for inst in batch_seq])
batch_seq = torch.LongTensor(batch_seq)
batch_pos = torch.LongTensor(batch_pos)
return batch_seq, batch_pos
class TranslationDataset(torch.utils.data.Dataset):
def __init__(self, src_word2idx, tgt_word2idx, src_insts=None, tgt_insts=None):
assert src_insts
assert not tgt_insts or (len(src_insts) == len(tgt_insts))
src_idx2word = {idx: word for word, idx in src_word2idx.items()}
self._src_word2idx = src_word2idx
self._src_idx2word = src_idx2word
self._src_insts = src_insts
tgt_idx2word = {idx: word for word, idx in tgt_word2idx.items()}
self._tgt_word2idx = tgt_word2idx
self._tgt_idx2word = tgt_idx2word
self._tgt_insts = tgt_insts
@property
def n_insts(self):
''' Property for dataset size '''
return len(self._src_insts)
@property
def src_vocab_size(self):
''' Property for vocab size '''
return len(self._src_word2idx)
@property
def tgt_vocab_size(self):
''' Property for vocab size '''
return len(self._tgt_word2idx)
@property
def src_word2idx(self):
''' Property for word dictionary '''
return self._src_word2idx
@property
def tgt_word2idx(self):
''' Property for word dictionary '''
return self._tgt_word2idx
@property
def src_idx2word(self):
''' Property for index dictionary '''
return self._src_idx2word
@property
def tgt_idx2word(self):
''' Property for index dictionary '''
return self._tgt_idx2word
def __len__(self):
return self.n_insts
def __getitem__(self, idx):
if self._tgt_insts:
return self._src_insts[idx], self._tgt_insts[idx]
return self._src_insts[idx]
class CodeDocstringDataset(torch.utils.data.Dataset):
def __init__(self, src_vocab, tgt_vocab, src_insts, tgt_insts, src_max_len=512, tgt_max_len=48):
assert len(src_insts) == len(tgt_insts)
self._src_max_len = src_max_len
self._tgt_max_len = tgt_max_len
self._src_word2idx = dict()
self._tgt_word2idx = dict()
for idx, word in [(c.PAD, c.PAD_WORD), (c.UNK, c.UNK_WORD), (c.BOS, c.BOS_WORD), (c.EOS, c.EOS_WORD)]:
self._src_word2idx[word] = idx
self._tgt_word2idx[word] = idx
src_len_init = len(self._src_word2idx)
tgt_len_init = len(self._tgt_word2idx)
for idx, word in enumerate(src_vocab):
self._src_word2idx[word] = idx + src_len_init
for idx, word in enumerate(tgt_vocab):
self._tgt_word2idx[word] = idx + tgt_len_init
self._src_idx2word = {idx: word for word, idx in self._src_word2idx.items()}
self._tgt_idx2word = {idx: word for word, idx in self._tgt_word2idx.items()}
self._src_insts = []
self._tgt_insts = []
for src, tgt in tqdm(zip(src_insts, tgt_insts), total=len(src_insts),
desc="[ Converting Words to Idx ]"):
self._src_insts.append([self.src_find_word2idx(word) for word in src])
self._tgt_insts.append([self.tgt_find_word2idx(word) for word in tgt])
def src_find_word2idx(self, word):
if word in self._src_word2idx:
return self._src_word2idx[word]
return c.UNK
def tgt_find_word2idx(self, word):
if word in self._tgt_word2idx:
return self._tgt_word2idx[word]
return c.UNK
@property
def n_insts(self):
''' Property for dataset size '''
return len(self._src_insts)
@property
def src_vocab_size(self):
''' Property for vocab size '''
return len(self._src_word2idx)
@property
def tgt_vocab_size(self):
''' Property for vocab size '''
return len(self._tgt_word2idx)
@property
def src_word2idx(self):
''' Property for word dictionary '''
return self._src_word2idx
@property
def tgt_word2idx(self):
''' Property for word dictionary '''
return self._tgt_word2idx
@property
def src_idx2word(self):
''' Property for index dictionary '''
return self._src_idx2word
@property
def tgt_idx2word(self):
''' Property for index dictionary '''
return self._tgt_idx2word
def __len__(self):
return self.n_insts
def __getitem__(self, idx):
src = self._src_insts[idx]
tgt = self._tgt_insts[idx]
len_src, len_tgt = len(src), len(tgt)
if len_src > (self._src_max_len - 2):
start = np.random.randint(0, len_src - (self._src_max_len - 2))
src = src[start:start+self._src_max_len - 2]
if len_tgt > (self._tgt_max_len - 2):
start = np.random.randint(0, len_tgt - (self._tgt_max_len - 2))
tgt = tgt[start:start+self._tgt_max_len - 2]
src = [c.BOS] + src + [c.EOS]
tgt = [c.BOS] + tgt + [c.EOS]
return src, tgt
class CodeDocstringDatasetPreprocessed(torch.utils.data.Dataset):
def __init__(self, src_word2idx, tgt_word2idx, src_insts, tgt_insts, src_max_len=512, tgt_max_len=48):
assert len(src_insts) == len(tgt_insts)
self._src_max_len = src_max_len
self._tgt_max_len = tgt_max_len
for idx, word in [(c.PAD, c.PAD_WORD), (c.UNK, c.UNK_WORD), (c.BOS, c.BOS_WORD), (c.EOS, c.EOS_WORD)]:
assert src_word2idx[word] == idx
assert tgt_word2idx[word] == idx
self._src_word2idx = src_word2idx
self._tgt_word2idx = tgt_word2idx
self._src_idx2word = {idx: word for word, idx in self._src_word2idx.items()}
self._tgt_idx2word = {idx: word for word, idx in self._tgt_word2idx.items()}
self._src_insts = src_insts
self._tgt_insts = tgt_insts
def src_find_word2idx(self, word):
if word in self._src_word2idx:
return self._src_word2idx[word]
return c.UNK
def tgt_find_word2idx(self, word):
if word in self._tgt_word2idx:
return self._tgt_word2idx[word]
return c.UNK
@property
def n_insts(self):
''' Property for dataset size '''
return len(self._src_insts)
@property
def src_vocab_size(self):
''' Property for vocab size '''
return len(self._src_word2idx)
@property
def tgt_vocab_size(self):
''' Property for vocab size '''
return len(self._tgt_word2idx)
@property
def src_word2idx(self):
''' Property for word dictionary '''
return self._src_word2idx
@property
def tgt_word2idx(self):
''' Property for word dictionary '''
return self._tgt_word2idx
@property
def src_idx2word(self):
''' Property for index dictionary '''
return self._src_idx2word
@property
def tgt_idx2word(self):
''' Property for index dictionary '''
return self._tgt_idx2word
def __len__(self):
return self.n_insts
def __getitem__(self, idx):
src = self._src_insts[idx]
tgt = self._tgt_insts[idx]
len_src, len_tgt = len(src), len(tgt)
if len_src > (self._src_max_len - 2):
start = np.random.randint(0, len_src - (self._src_max_len - 2))
src = src[start:start+self._src_max_len - 2]
if len_tgt > (self._tgt_max_len - 2):
start = np.random.randint(0, len_tgt - (self._tgt_max_len - 2))
tgt = tgt[start:start+self._tgt_max_len - 2]
src = [c.BOS] + src + [c.EOS]
tgt = [c.BOS] + tgt + [c.EOS]
return src, tgt
| [
"numpy.random.randint",
"torch.LongTensor"
] | [((696, 723), 'torch.LongTensor', 'torch.LongTensor', (['batch_seq'], {}), '(batch_seq)\n', (712, 723), False, 'import torch\n'), ((740, 767), 'torch.LongTensor', 'torch.LongTensor', (['batch_pos'], {}), '(batch_pos)\n', (756, 767), False, 'import torch\n'), ((5300, 5355), 'numpy.random.randint', 'np.random.randint', (['(0)', '(len_src - (self._src_max_len - 2))'], {}), '(0, len_src - (self._src_max_len - 2))\n', (5317, 5355), True, 'import numpy as np\n'), ((5480, 5535), 'numpy.random.randint', 'np.random.randint', (['(0)', '(len_tgt - (self._tgt_max_len - 2))'], {}), '(0, len_tgt - (self._tgt_max_len - 2))\n', (5497, 5535), True, 'import numpy as np\n'), ((7942, 7997), 'numpy.random.randint', 'np.random.randint', (['(0)', '(len_src - (self._src_max_len - 2))'], {}), '(0, len_src - (self._src_max_len - 2))\n', (7959, 7997), True, 'import numpy as np\n'), ((8122, 8177), 'numpy.random.randint', 'np.random.randint', (['(0)', '(len_tgt - (self._tgt_max_len - 2))'], {}), '(0, len_tgt - (self._tgt_max_len - 2))\n', (8139, 8177), True, 'import numpy as np\n')] |
import numpy as np
import autofit as af
import autolens as al
from autolens.lens.subhalo import SubhaloResult
class TestSubhaloResult:
def test__result_derived_properties(self):
lower_limit_lists = [[0.0, 0.0], [0.0, 0.5], [0.5, 0.0], [0.5, 0.5]]
grid_search_result = af.GridSearchResult(
results=None,
grid_priors=[
af.UniformPrior(lower_limit=-2.0, upper_limit=2.0),
af.UniformPrior(lower_limit=-3.0, upper_limit=3.0),
],
lower_limits_lists=lower_limit_lists,
)
subhalo_result = SubhaloResult(
grid_search_result=grid_search_result, result_no_subhalo=1
)
subhalo_array = subhalo_result._subhalo_array_from(
values_native=np.array([[1.0, 2.0], [3.0, 4.0]])
)
assert isinstance(subhalo_array, al.Array2D)
assert (subhalo_array.native == np.array([[3.0, 4.0], [1.0, 2.0]])).all()
| [
"autolens.lens.subhalo.SubhaloResult",
"autofit.UniformPrior",
"numpy.array"
] | [((624, 697), 'autolens.lens.subhalo.SubhaloResult', 'SubhaloResult', ([], {'grid_search_result': 'grid_search_result', 'result_no_subhalo': '(1)'}), '(grid_search_result=grid_search_result, result_no_subhalo=1)\n', (637, 697), False, 'from autolens.lens.subhalo import SubhaloResult\n'), ((812, 846), 'numpy.array', 'np.array', (['[[1.0, 2.0], [3.0, 4.0]]'], {}), '([[1.0, 2.0], [3.0, 4.0]])\n', (820, 846), True, 'import numpy as np\n'), ((397, 447), 'autofit.UniformPrior', 'af.UniformPrior', ([], {'lower_limit': '(-2.0)', 'upper_limit': '(2.0)'}), '(lower_limit=-2.0, upper_limit=2.0)\n', (412, 447), True, 'import autofit as af\n'), ((466, 516), 'autofit.UniformPrior', 'af.UniformPrior', ([], {'lower_limit': '(-3.0)', 'upper_limit': '(3.0)'}), '(lower_limit=-3.0, upper_limit=3.0)\n', (481, 516), True, 'import autofit as af\n'), ((955, 989), 'numpy.array', 'np.array', (['[[3.0, 4.0], [1.0, 2.0]]'], {}), '([[3.0, 4.0], [1.0, 2.0]])\n', (963, 989), True, 'import numpy as np\n')] |
#!/usr/bin/python
import matplotlib.pyplot as plt
import numpy as np
import sys
import string
benchmark = 'class'
mydpi = 600
pltsize = (9, 3.2)
data = {
'class_fine_grained' : {
'2_slices' : [0.864, 0.966, 0.973, 0.871, 0.842, 0.881, 0.85, 0.793, 0.945, 0.956, 0.783, 0.84, 0.847, 0.856, 0.863, 0.861, 0.854, 0.793, 0.711, 0.819, 0.865, 0.956, 0.851, 0.842],
'6_slices' : [0.489, 0.829, 0.914, 0.829, 0.589, 0.776, 0.654, 0.613, 0.802, 0.85, 0.408, 0.913, 0.726, 0.672, 0.758, 0.655, 0.684, 0.627, 0.408, 0.574, 0.651, 0.847, 0.684, 0.615],
},
'class_neighborhoods' : {
'2_slices' : [0.913, 1.0, 1.00, 0.982, 0.891, 0.969, 0.889, 0.824, 0.992, 1.00, 0.798, 0.858, 0.927, 0.93, 0.936, 0.896, 0.934, 0.833, 0.705, 0.92, 0.9, 1.0, 0.891, 0.894],
'6_slices' : [0.594, 0.964, 0.994, 0.992, 0.828, 0.994, 0.843, 0.887, 0.972, 0.994, 0.577, 0.982, 0.951, 0.93, 0.963, 0.922, 0.941, 0.923, 0.56, 0.898, 0.863, 0.997, 0.918, 0.798],
}
}
x_axis_labels = [
'Bearings',
'Bolts',
'Brackets',
'Bushing Liners',
'Bushing',
'Collets',
'Gasket',
'Grommets',
'Headless Screws',
'Hex Screws',
'Keyway Shaft',
'Machine Key',
'Nuts',
'O Rings',
'Pipe Fittings',
'Pipe Joints',
'Pipes',
'Rollers',
'Rotary Shaft',
'Shaft Collar',
'Slotted Screws',
'Socket Screws',
'Thumb Screws',
'Washers'
]
data8b_fine_grained = data[benchmark+"_fine_grained"]['2_slices']
data16b_fine_grained = data[benchmark+"_fine_grained"]['6_slices']
data8b_neighborhoods = data[benchmark+"_neighborhoods"]['2_slices']
data16b_neighborhoods = data[benchmark+"_neighborhoods"]['6_slices']
N = len(data8b_neighborhoods)
index = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots(figsize=pltsize)
ax.margins(0.01, 0.01)
rects1_neighborhoods = ax.bar(index-width/2, data8b_neighborhoods, width, color='#69ceff', hatch='xxxx', edgecolor='black', linewidth=1)
rects2_neighborhoods = ax.bar(index+width/2, data16b_neighborhoods, width, color='#bdbdbd', hatch='....', edgecolor='black', linewidth=1)
rects1_fine_grained = ax.bar(index-width/2, data8b_fine_grained, width, color='#69ceff', edgecolor='black', linewidth=1)
rects2_fine_grained = ax.bar(index+width/2, data16b_fine_grained, width, color='#bdbdbd', edgecolor='black', linewidth=1)
# ax.set_yscale('log')
ax.set_ylim([0, 1.05])
ax.set_yticks(np.arange(0, 1.05, step=0.2))
ax.set_ylabel('FEI index', fontsize=11)
ax.set_xticks(index)
ax.set_xlabel('FabWave Class', fontsize=11)
ax.set_xticklabels(x_axis_labels, rotation='90')
ax.tick_params(axis='both', which='major', labelsize=9)
ax.legend((rects1_fine_grained[0], rects2_fine_grained[0], rects1_neighborhoods[0], rects2_neighborhoods[0]), ['Fine-grained 2 slices', 'Fine-grained 6 slices', 'Neighborhoods 2 slices', 'Neighborhoods 6 slices'], fontsize=9, ncol=4, bbox_to_anchor=(0, 1.02, 1, 0.2), loc='lower center')
# ax.legend((rects1_fine_grained[0], rects2_fine_grained[0], rects1_neighborhoods[0], rects2_neighborhoods[0]), ['fine_grained 2 slices', 'fine_grained 4 slices', 'Neighborhoods 2 slices', 'Neighborhoods 4 slices'], fontsize=9, ncol=4, loc='lower center', framealpha=0.9)
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height, '%2.1f' % (height), ha='center', va='bottom', fontsize=9)
# autolabel(rects1_neighborhoods)
# autolabel(rects2_neighborhoods)
# plt.show()
plt.tight_layout()
plt.savefig("../images/fei.png", dpi=mydpi, bbox_inches="tight", pad_inches=0.03)
| [
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig",
"numpy.arange",
"matplotlib.pyplot.subplots"
] | [((1734, 1746), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1743, 1746), True, 'import numpy as np\n'), ((1835, 1864), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'pltsize'}), '(figsize=pltsize)\n', (1847, 1864), True, 'import matplotlib.pyplot as plt\n'), ((3562, 3580), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3578, 3580), True, 'import matplotlib.pyplot as plt\n'), ((3581, 3667), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../images/fei.png"""'], {'dpi': 'mydpi', 'bbox_inches': '"""tight"""', 'pad_inches': '(0.03)'}), "('../images/fei.png', dpi=mydpi, bbox_inches='tight', pad_inches\n =0.03)\n", (3592, 3667), True, 'import matplotlib.pyplot as plt\n'), ((2471, 2499), 'numpy.arange', 'np.arange', (['(0)', '(1.05)'], {'step': '(0.2)'}), '(0, 1.05, step=0.2)\n', (2480, 2499), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import json
from dipy.tracking.streamlinespeed import length
import numpy as np
from scilpy.io.streamlines import load_tractogram_with_reference
from scilpy.io.utils import (add_json_args,
add_reference_arg,
assert_inputs_exist)
def _build_arg_parser():
p = argparse.ArgumentParser(
description='Compute streamlines min, mean and max length, as well as '
'standard deviation of length in mm.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('in_bundle',
help='Fiber bundle file.')
add_reference_arg(p)
add_json_args(p)
return p
def main():
parser = _build_arg_parser()
args = parser.parse_args()
assert_inputs_exist(parser, args.in_bundle)
sft = load_tractogram_with_reference(parser, args, args.in_bundle)
streamlines = sft.streamlines
lengths = [0]
if streamlines:
lengths = list(length(streamlines))
print(json.dumps({'min_length': float(np.min(lengths)),
'mean_length': float(np.mean(lengths)),
'max_length': float(np.max(lengths)),
'std_length': float(np.std(lengths))},
indent=args.indent, sort_keys=args.sort_keys))
if __name__ == '__main__':
main()
| [
"scilpy.io.utils.assert_inputs_exist",
"dipy.tracking.streamlinespeed.length",
"argparse.ArgumentParser",
"numpy.std",
"numpy.min",
"numpy.mean",
"numpy.max",
"scilpy.io.utils.add_json_args",
"scilpy.io.utils.add_reference_arg",
"scilpy.io.streamlines.load_tractogram_with_reference"
] | [((386, 583), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute streamlines min, mean and max length, as well as standard deviation of length in mm."""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Compute streamlines min, mean and max length, as well as standard deviation of length in mm.'\n , formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (409, 583), False, 'import argparse\n'), ((698, 718), 'scilpy.io.utils.add_reference_arg', 'add_reference_arg', (['p'], {}), '(p)\n', (715, 718), False, 'from scilpy.io.utils import add_json_args, add_reference_arg, assert_inputs_exist\n'), ((723, 739), 'scilpy.io.utils.add_json_args', 'add_json_args', (['p'], {}), '(p)\n', (736, 739), False, 'from scilpy.io.utils import add_json_args, add_reference_arg, assert_inputs_exist\n'), ((837, 880), 'scilpy.io.utils.assert_inputs_exist', 'assert_inputs_exist', (['parser', 'args.in_bundle'], {}), '(parser, args.in_bundle)\n', (856, 880), False, 'from scilpy.io.utils import add_json_args, add_reference_arg, assert_inputs_exist\n'), ((892, 952), 'scilpy.io.streamlines.load_tractogram_with_reference', 'load_tractogram_with_reference', (['parser', 'args', 'args.in_bundle'], {}), '(parser, args, args.in_bundle)\n', (922, 952), False, 'from scilpy.io.streamlines import load_tractogram_with_reference\n'), ((1048, 1067), 'dipy.tracking.streamlinespeed.length', 'length', (['streamlines'], {}), '(streamlines)\n', (1054, 1067), False, 'from dipy.tracking.streamlinespeed import length\n'), ((1112, 1127), 'numpy.min', 'np.min', (['lengths'], {}), '(lengths)\n', (1118, 1127), True, 'import numpy as np\n'), ((1173, 1189), 'numpy.mean', 'np.mean', (['lengths'], {}), '(lengths)\n', (1180, 1189), True, 'import numpy as np\n'), ((1234, 1249), 'numpy.max', 'np.max', (['lengths'], {}), '(lengths)\n', (1240, 1249), True, 'import numpy as np\n'), ((1294, 1309), 'numpy.std', 'np.std', (['lengths'], {}), '(lengths)\n', (1300, 1309), True, 'import numpy as np\n')] |
# Dada uma placa quadrada 1x1 m^2 e as temperaturas de fronteiras
# calcule a temperatura nesta placa e visualize a distribuição
# usando Gauss-Seidel e um grid nxn
# Aluno: <NAME>
# NºUSP: 4182085
import GaussSeidel as gsd
import numpy as np
import matplotlib.pyplot as plt
# Retorna a matriz de coeficientes e o vetor B
def createLinearSystem(grid, n):
m = (n-2)**2
A = 4*np.identity(m, float) # Vezes quatro pois a variavel é a média entre 4
b = np.zeros(m, float)
for i in range(1, n-1):
for j in range(1,n-1):
g = 0
if(grid[i,j-1] == 3.14):
A[(i-1)*(n-2) + (j-1)][(i-1)*(n-2) + (j-2)] = -1
#print('{} = {} {}'.format(((i-1)*(n-2) + (j-1)),i-1, (i-1)*(n-2) + (j-2)))
else: g = g + grid[i, j-1]
if(grid[i-1,j] == 3.14):
A[(i-1)*(n-2) + (j-1)][(i-2)*(n-2) + (j-1)] = -1
#print('{} = {} {}'.format(((i-1)*(n-2) + (j-1)),i-1, (i-2)*(n-2) + (j-1)))
else: g = g + grid[i-1, j]
if(grid[i,j+1] == 3.14):
A[(i-1)*(n-2) + (j-1)][(i-1)*(n-2) + j] = -1
#print('{} = {} {}'.format(((i-1)*(n-2) + (j-1)),i-1, (i-1)*(n-2) + j))
else: g = g + grid[i, j+1]
if(grid[i+1,j] == 3.14):
A[(i-1)*(n-2) + (j-1)][i*(n-2) + ((j)-1)] = -1
#print('{} = {} {}'.format(((i-1)*(n-2) + (j-1)),i-1, i*(n-2) + ((j)-1)))
else: g = g + grid[i+1, j]
b[(i-1)*(n-2) + (j-1)] = g
return A, b
# Inicializa o grid com as temp. fronteiras
def initGrid(dR, n):
grid = np.full((n, n), float(3.14),float)
print('>>Inicializando o grid.')
grid[0,0] = (dR[0] + dR[1])/2
grid[n-1,0] = (dR[1] + dR[2])/2
grid[n-1,n-1] = (dR[2] + dR[3])/2
grid[0, n-1] = (dR[3] + dR[0])/2
for i in range(1,n-1):
grid[i,0] = dR[0] # Parte esquerda
grid[0,i] = dR[1] # Parte do topo
grid[i,n-1] = dR[2] # Parte direita
grid[n-1,i] = dR[3] # Parde de baixo
return grid
# Atualiza o grid com a solução
def updateGrid(grid, xf, n):
for i in range(1, n-1):
for j in range(1,n-1):
grid[i][j] = xf[(i-1)*(n-2) + (j-1)]
def main():
print('>>>A coleta de fronteira é sentido horário começando pelo lado esquerdo.')
print('>>>Lembrando que a dimensão do grid é n gera (n-2)^2 variaveis.')
n = int(input('>>>Digite a dimensão do grid.\n'))
dR = np.zeros(4) # Vetor de temperaturas das fronteiras
for i in range(4):
dR[i] = float(input('>>>Digite a temp da fronteira [{}].\n'.format(i+1)))
grid = initGrid(dR, n) # Inicia o grid com as temperatura de fronteiras em equilibrio
A, b = createLinearSystem(grid, n) # Cria a matriz a partir das equações
xi = gsd.initialX(A, b) # Pega o chute inicial
t = 20 # Número de iterações do Gauss-Seidel
xf = gsd.solve(A, b, xi, t) # Resolve o sistema
updateGrid(grid, xf, n) # Atualiza o grid com a solução aproximada
# Parte de visualização
plt.title('Heat Distribution')
plt.xlabel('Coluna do grid')
plt.ylabel('Linha do grid')
plt.imshow(grid, cmap='plasma', interpolation='nearest')
plt.colorbar()
plt.show()
if __name__ == "__main__":
main() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"numpy.identity",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.ylabel",
"GaussSeidel.initialX",
"matplotlib.pyplot.xlabel",
"GaussSeidel.solve"
] | [((480, 498), 'numpy.zeros', 'np.zeros', (['m', 'float'], {}), '(m, float)\n', (488, 498), True, 'import numpy as np\n'), ((2652, 2663), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2660, 2663), True, 'import numpy as np\n'), ((3005, 3023), 'GaussSeidel.initialX', 'gsd.initialX', (['A', 'b'], {}), '(A, b)\n', (3017, 3023), True, 'import GaussSeidel as gsd\n'), ((3107, 3129), 'GaussSeidel.solve', 'gsd.solve', (['A', 'b', 'xi', 't'], {}), '(A, b, xi, t)\n', (3116, 3129), True, 'import GaussSeidel as gsd\n'), ((3262, 3292), 'matplotlib.pyplot.title', 'plt.title', (['"""Heat Distribution"""'], {}), "('Heat Distribution')\n", (3271, 3292), True, 'import matplotlib.pyplot as plt\n'), ((3298, 3326), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Coluna do grid"""'], {}), "('Coluna do grid')\n", (3308, 3326), True, 'import matplotlib.pyplot as plt\n'), ((3332, 3359), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Linha do grid"""'], {}), "('Linha do grid')\n", (3342, 3359), True, 'import matplotlib.pyplot as plt\n'), ((3365, 3421), 'matplotlib.pyplot.imshow', 'plt.imshow', (['grid'], {'cmap': '"""plasma"""', 'interpolation': '"""nearest"""'}), "(grid, cmap='plasma', interpolation='nearest')\n", (3375, 3421), True, 'import matplotlib.pyplot as plt\n'), ((3427, 3441), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3439, 3441), True, 'import matplotlib.pyplot as plt\n'), ((3447, 3457), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3455, 3457), True, 'import matplotlib.pyplot as plt\n'), ((400, 421), 'numpy.identity', 'np.identity', (['m', 'float'], {}), '(m, float)\n', (411, 421), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Build new Fiber to Fiber
"""
import matplotlib
matplotlib.use('agg')
import glob
import numpy as np
import os.path as op
import splinelab
import fitsio
from astropy.io import fits
from distutils.dir_util import mkpath
from input_utils import setup_parser, set_daterange, setup_logging
from scipy.interpolate import splev, splrep
from utils import biweight_location
from bspline import Bspline
def bspline_matrix(x, nknots, norm=False):
''' Make the bspline knot matrix for linalg calculation later '''
v = np.linspace(0, 1, nknots)
k = splinelab.augknt(v, 3)
B = Bspline(k, 3)
if norm:
x = (x - x.min()) / (x.max() - x.min() + 0.1)
c = np.array([B(xp) for xp in x])
return B, c
def check_if_type(date, obsid, args):
''' Test if header has IMAGETYP '''
filenames = glob.glob(op.join(args.rootdir, date, args.instrument,
args.instrument + obsid, 'exp01',
args.instrument, 'multi_*_*_*_LL.fits'))
try:
kind = fits.open(filenames[0])[0].header['IMAGETYP']
except:
args.log.warn('No IMAGETYP in header for %s and observation %s'
% (date, obsid))
return False
if kind == args.type:
return True
else:
return False
def build_filenames(date, obsid, args):
'''
Build directory structure and search for all the files in a given
observation and exposure.
'''
if args.type == 'twi':
expstr = '01'
else:
expstr = '*'
filenames = glob.glob(op.join(args.rootdir, date, args.instrument,
args.instrument + obsid, 'exp%s' % expstr,
args.instrument, 'multi_*_*_*_LL.fits'))
ifuslot_list = [op.basename(fn).split('_')[2] for fn in filenames]
ifuslots = np.unique(ifuslot_list)
exposure_list = [op.basename(op.dirname(op.dirname(fn)))[3:]
for fn in filenames]
exposures = np.unique(exposure_list)
return filenames, ifuslots, exposures, ifuslot_list, exposure_list
def grab_attribute(filename, args, attributes=[],
amps=['LL', 'LU', 'RU', 'RL']):
''' grab specified attributes from multi* file '''
basename = filename[:-8]
s = [[] for a in attributes]
for amp in amps:
name = basename + '_%s.fits' % amp
try:
for i, attribute in enumerate(attributes):
s[i].append(fitsio.read(name, attribute))
except IOError:
args.log.warning('%s not found, filling with zeros' % name)
for i, attribute in enumerate(attributes):
s[i].append(np.zeros((112, 1032)))
for i, attribute in enumerate(attributes):
if s[i][-1].shape != (112, 1032):
s[i][-1] = np.zeros((112, 1032))
return [np.array(si) for si in s]
def put_attribute(filename, args, data, attributes=[]):
''' put specified attributes into multi* file '''
try:
for i, attribute in enumerate(attributes):
F = fitsio.FITS(filename, 'rw')
F.write(data[i], extname=attribute+'_1')
except IOError:
for i, attribute in enumerate(attributes):
args.log.warning('%s not found to add %s' % attribute)
def rectify(wave, spec, rectified_dlam=1., minwave=None, maxwave=None):
''' Rectify spectra to same "rect_wave" '''
dlam = np.zeros(wave.shape)
dlam[:, 1:] = np.diff(wave, axis=1)
dlam[:, 0] = dlam[:, 1]
if rectified_dlam is None:
rectified_dlam = np.nanmedian(dlam)
rect_wave = np.arange(wave.min(), wave.max() + rectified_dlam,
rectified_dlam)
if minwave is not None and maxwave is not None:
wnew = np.arange(minwave, maxwave + rectified_dlam,
rectified_dlam)
else:
wnew = rect_wave * 1.
rect_spec = np.zeros((spec.shape[0], len(wnew)))
xs = np.linspace(0, 1, len(rect_wave))
xn = np.interp(wnew, rect_wave, xs)
for i in np.arange(spec.shape[0]):
if np.all(spec[i] == 0):
rect_spec[i, :] = 0.0
else:
y = spec[i] / dlam[i]
xp = np.interp(wave[i], rect_wave, xs)
tck = splrep(xp, y)
rect_spec[i, :] = splev(xn, tck)
rect_wave = wnew * 1.
return rect_wave, rect_spec
def main():
parser = setup_parser()
parser.add_argument("-t", "--type",
help='''Observation Type, twi or sci''',
type=str, default='twi')
parser.add_argument("-n", "--nknots",
help='''Number of knots for bspline''',
type=int, default=7)
parser.add_argument("-b", "--nbins",
help='''Number of bins to collapse data
for bspline fit''', type=int, default=40)
args = parser.parse_args(args=None)
args.log = setup_logging(logname='build_ftf')
args = set_daterange(args)
ifu_spline_dict = {}
filename_dict = {}
# HARDCODED SIZE FOR SPEED BUT MUST MATCH SIZE OF "rw" BELOW.
B, C = bspline_matrix(np.linspace(0, 1, 2001), nknots=args.nknots)
for datet in args.daterange:
date = '%04d%02d%02d' % (datet.year, datet.month, datet.day)
obsids = glob.glob(op.join(args.rootdir, date, args.instrument,
args.instrument + '*'))
obsids = [obsid[-7:] for obsid in obsids]
for obsid in obsids:
if not check_if_type(date, obsid, args):
continue
filenames, ifus, exps, i_list, e_list = build_filenames(date,
obsid,
args)
for exposure in exps:
file_list = [fn for fn, e in zip(filenames, e_list)
if e == exposure]
ifuslot_list = [i for i, e in zip(i_list, e_list)
if e == exposure]
ifuslot_amp = ['%s%s' % (ifu, amp) for ifu in ifuslot_list
for amp in ['LL', 'LU', 'RU', 'RL']]
for ifua in ifuslot_amp:
if ifua not in ifu_spline_dict:
ifu_spline_dict[ifua] = []
filename_dict[ifua] = []
args.log.info('Building Fiber to Fiber for %s, observation %s,'
' exposure %s' % (date, obsid, exposure))
allspec = []
for filen, ifu in zip(file_list, ifuslot_list):
args.log.info('Reading in %s' % filen)
amps = ['LL', 'LU', 'RU', 'RL']
wave, spec = grab_attribute(filen, args,
attributes=['wavelength',
'spectrum'],
amps=amps)
for wv, sp, amp in zip(wave, spec, amps):
rw, rs = rectify(wv, sp, minwave=3500.,
maxwave=5500.)
allspec.append(rs)
name = filen[:-8] + '_%s.fits' % amp
filename_dict['%s%s' % (ifu, amp)].append(name)
allspec = np.array(allspec)
avgspec = np.nanmedian(allspec, axis=(0, 1))
X = np.arange(len(rw))
XL = np.array_split(X, args.nbins)
xloc = np.array([np.median(xl) for xl in XL])
xloc = (xloc - 0.) / (len(rw) - 1.)
B, c = bspline_matrix(xloc, nknots=args.nknots)
for sp, ifua in zip(allspec, ifuslot_amp):
args.log.info('Working on ifuslot %s' % ifua)
div = sp / avgspec
splinecoeff = np.zeros((sp.shape[0], c.shape[1]))
div_list = np.array_split(div, args.nbins, axis=1)
mdiv = [np.nanmedian(d, axis=1) for d in div_list]
mdiv = np.array(mdiv).swapaxes(0, 1)
for i, fiber in enumerate(mdiv):
sel = np.where(np.isfinite(fiber))[0]
splinecoeff[i, :] = np.linalg.lstsq(c[sel, :],
fiber[sel])[0]
ifu_spline_dict[ifua].append(splinecoeff)
ifu_ftf_dict = {}
for ifu in ifu_spline_dict:
fibers_spline_coeff = biweight_location(np.array(ifu_spline_dict[ifu]),
axis=(0,))
ftf = np.zeros(rs.shape)
for i, fiber in enumerate(fibers_spline_coeff):
ftf[i, :] = np.dot(C, fiber)
ifu_ftf_dict[ifu] = ftf
for filename in filename_dict[ifu]:
args.log.info('Writing Fiber to Fiber to %s' % filename)
put_attribute(filename, args, [ftf], attributes=['fiber_to_fiber'])
if __name__ == '__main__':
main()
| [
"input_utils.setup_logging",
"numpy.nanmedian",
"fitsio.read",
"numpy.arange",
"numpy.interp",
"os.path.join",
"numpy.array_split",
"numpy.unique",
"os.path.dirname",
"numpy.isfinite",
"numpy.linspace",
"scipy.interpolate.splrep",
"os.path.basename",
"numpy.median",
"input_utils.setup_pa... | [((76, 97), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (90, 97), False, 'import matplotlib\n'), ((547, 572), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'nknots'], {}), '(0, 1, nknots)\n', (558, 572), True, 'import numpy as np\n'), ((581, 603), 'splinelab.augknt', 'splinelab.augknt', (['v', '(3)'], {}), '(v, 3)\n', (597, 603), False, 'import splinelab\n'), ((612, 625), 'bspline.Bspline', 'Bspline', (['k', '(3)'], {}), '(k, 3)\n', (619, 625), False, 'from bspline import Bspline\n'), ((1879, 1902), 'numpy.unique', 'np.unique', (['ifuslot_list'], {}), '(ifuslot_list)\n', (1888, 1902), True, 'import numpy as np\n'), ((2026, 2050), 'numpy.unique', 'np.unique', (['exposure_list'], {}), '(exposure_list)\n', (2035, 2050), True, 'import numpy as np\n'), ((3459, 3479), 'numpy.zeros', 'np.zeros', (['wave.shape'], {}), '(wave.shape)\n', (3467, 3479), True, 'import numpy as np\n'), ((3498, 3519), 'numpy.diff', 'np.diff', (['wave'], {'axis': '(1)'}), '(wave, axis=1)\n', (3505, 3519), True, 'import numpy as np\n'), ((4031, 4061), 'numpy.interp', 'np.interp', (['wnew', 'rect_wave', 'xs'], {}), '(wnew, rect_wave, xs)\n', (4040, 4061), True, 'import numpy as np\n'), ((4075, 4099), 'numpy.arange', 'np.arange', (['spec.shape[0]'], {}), '(spec.shape[0])\n', (4084, 4099), True, 'import numpy as np\n'), ((4429, 4443), 'input_utils.setup_parser', 'setup_parser', ([], {}), '()\n', (4441, 4443), False, 'from input_utils import setup_parser, set_daterange, setup_logging\n'), ((4975, 5009), 'input_utils.setup_logging', 'setup_logging', ([], {'logname': '"""build_ftf"""'}), "(logname='build_ftf')\n", (4988, 5009), False, 'from input_utils import setup_parser, set_daterange, setup_logging\n'), ((5021, 5040), 'input_utils.set_daterange', 'set_daterange', (['args'], {}), '(args)\n', (5034, 5040), False, 'from input_utils import setup_parser, set_daterange, setup_logging\n'), ((853, 975), 'os.path.join', 'op.join', (['args.rootdir', 'date', 'args.instrument', '(args.instrument + obsid)', '"""exp01"""', 'args.instrument', '"""multi_*_*_*_LL.fits"""'], {}), "(args.rootdir, date, args.instrument, args.instrument + obsid,\n 'exp01', args.instrument, 'multi_*_*_*_LL.fits')\n", (860, 975), True, 'import os.path as op\n'), ((1596, 1728), 'os.path.join', 'op.join', (['args.rootdir', 'date', 'args.instrument', '(args.instrument + obsid)', "('exp%s' % expstr)", 'args.instrument', '"""multi_*_*_*_LL.fits"""'], {}), "(args.rootdir, date, args.instrument, args.instrument + obsid, \n 'exp%s' % expstr, args.instrument, 'multi_*_*_*_LL.fits')\n", (1603, 1728), True, 'import os.path as op\n'), ((2893, 2905), 'numpy.array', 'np.array', (['si'], {}), '(si)\n', (2901, 2905), True, 'import numpy as np\n'), ((3604, 3622), 'numpy.nanmedian', 'np.nanmedian', (['dlam'], {}), '(dlam)\n', (3616, 3622), True, 'import numpy as np\n'), ((3800, 3860), 'numpy.arange', 'np.arange', (['minwave', '(maxwave + rectified_dlam)', 'rectified_dlam'], {}), '(minwave, maxwave + rectified_dlam, rectified_dlam)\n', (3809, 3860), True, 'import numpy as np\n'), ((4112, 4132), 'numpy.all', 'np.all', (['(spec[i] == 0)'], {}), '(spec[i] == 0)\n', (4118, 4132), True, 'import numpy as np\n'), ((5182, 5205), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(2001)'], {}), '(0, 1, 2001)\n', (5193, 5205), True, 'import numpy as np\n'), ((8759, 8777), 'numpy.zeros', 'np.zeros', (['rs.shape'], {}), '(rs.shape)\n', (8767, 8777), True, 'import numpy as np\n'), ((3107, 3134), 'fitsio.FITS', 'fitsio.FITS', (['filename', '"""rw"""'], {}), "(filename, 'rw')\n", (3118, 3134), False, 'import fitsio\n'), ((4233, 4266), 'numpy.interp', 'np.interp', (['wave[i]', 'rect_wave', 'xs'], {}), '(wave[i], rect_wave, xs)\n', (4242, 4266), True, 'import numpy as np\n'), ((4285, 4298), 'scipy.interpolate.splrep', 'splrep', (['xp', 'y'], {}), '(xp, y)\n', (4291, 4298), False, 'from scipy.interpolate import splev, splrep\n'), ((4329, 4343), 'scipy.interpolate.splev', 'splev', (['xn', 'tck'], {}), '(xn, tck)\n', (4334, 4343), False, 'from scipy.interpolate import splev, splrep\n'), ((5356, 5423), 'os.path.join', 'op.join', (['args.rootdir', 'date', 'args.instrument', "(args.instrument + '*')"], {}), "(args.rootdir, date, args.instrument, args.instrument + '*')\n", (5363, 5423), True, 'import os.path as op\n'), ((8654, 8684), 'numpy.array', 'np.array', (['ifu_spline_dict[ifu]'], {}), '(ifu_spline_dict[ifu])\n', (8662, 8684), True, 'import numpy as np\n'), ((8858, 8874), 'numpy.dot', 'np.dot', (['C', 'fiber'], {}), '(C, fiber)\n', (8864, 8874), True, 'import numpy as np\n'), ((2858, 2879), 'numpy.zeros', 'np.zeros', (['(112, 1032)'], {}), '((112, 1032))\n', (2866, 2879), True, 'import numpy as np\n'), ((7448, 7465), 'numpy.array', 'np.array', (['allspec'], {}), '(allspec)\n', (7456, 7465), True, 'import numpy as np\n'), ((7492, 7526), 'numpy.nanmedian', 'np.nanmedian', (['allspec'], {'axis': '(0, 1)'}), '(allspec, axis=(0, 1))\n', (7504, 7526), True, 'import numpy as np\n'), ((7587, 7616), 'numpy.array_split', 'np.array_split', (['X', 'args.nbins'], {}), '(X, args.nbins)\n', (7601, 7616), True, 'import numpy as np\n'), ((1065, 1088), 'astropy.io.fits.open', 'fits.open', (['filenames[0]'], {}), '(filenames[0])\n', (1074, 1088), False, 'from astropy.io import fits\n'), ((1813, 1828), 'os.path.basename', 'op.basename', (['fn'], {}), '(fn)\n', (1824, 1828), True, 'import os.path as op\n'), ((1947, 1961), 'os.path.dirname', 'op.dirname', (['fn'], {}), '(fn)\n', (1957, 1961), True, 'import os.path as op\n'), ((2502, 2530), 'fitsio.read', 'fitsio.read', (['name', 'attribute'], {}), '(name, attribute)\n', (2513, 2530), False, 'import fitsio\n'), ((7993, 8028), 'numpy.zeros', 'np.zeros', (['(sp.shape[0], c.shape[1])'], {}), '((sp.shape[0], c.shape[1]))\n', (8001, 8028), True, 'import numpy as np\n'), ((8060, 8099), 'numpy.array_split', 'np.array_split', (['div', 'args.nbins'], {'axis': '(1)'}), '(div, args.nbins, axis=1)\n', (8074, 8099), True, 'import numpy as np\n'), ((2711, 2732), 'numpy.zeros', 'np.zeros', (['(112, 1032)'], {}), '((112, 1032))\n', (2719, 2732), True, 'import numpy as np\n'), ((7650, 7663), 'numpy.median', 'np.median', (['xl'], {}), '(xl)\n', (7659, 7663), True, 'import numpy as np\n'), ((8128, 8151), 'numpy.nanmedian', 'np.nanmedian', (['d'], {'axis': '(1)'}), '(d, axis=1)\n', (8140, 8151), True, 'import numpy as np\n'), ((8198, 8212), 'numpy.array', 'np.array', (['mdiv'], {}), '(mdiv)\n', (8206, 8212), True, 'import numpy as np\n'), ((8387, 8425), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['c[sel, :]', 'fiber[sel]'], {}), '(c[sel, :], fiber[sel])\n', (8402, 8425), True, 'import numpy as np\n'), ((8320, 8338), 'numpy.isfinite', 'np.isfinite', (['fiber'], {}), '(fiber)\n', (8331, 8338), True, 'import numpy as np\n')] |
from flask import Flask, request, json
from flask_cors import CORS
from bs4 import BeautifulSoup
import requests
import base64
from PIL import Image
import numpy as np
import io
import re
from eval import evaluate
from locateWord import find_word
import os
app = Flask(__name__)
CORS(app)
links = ""
words = ""
imgArray = []
def scrape_videos(websiteURL):
html_text = requests.get(websiteURL).text
soup = BeautifulSoup(html_text, 'lxml')
video_div_container = soup.find('div', {"itemtype":"http://schema.org/VideoObject"})
try:
video = video_div_container.select("#video_con_signasl_1")[0].find('source')
videoLink = video['src']
except:
try:
videoLink = video_div_container.select("iframe")[0].attrs['src'] + "&autoplay=1"
videoLink = videoLink.replace("loop=1", "loop=0")
except:
videoLink = "unavailable"
return str(videoLink)
def compile_videos(words):
all_videos = []
for word in words:
all_videos.append(scrape_videos('https://www.signasl.org/sign/' + str(word)))
return all_videos
@app.route('/api/getText', methods=['GET'])
def api():
global words, links
return{
"link": links,
"words": words
}
def cleanText(word):
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
clean_word = ""
for char in word:
if char not in punctuations:
clean_word = clean_word + char
return clean_word
@app.route('/api/sendText', methods=['POST'])
def findVideos():
global words, links
request_data = json.loads(request.data)
cleaned_words = list(map(cleanText, request_data['content'].lower().split()))
links = compile_videos(cleaned_words)
words = list(request_data['content'].split())
return {
'message': links,
'words': words
}
@app.route('/api/sendImage', methods=['POST'])
def getImageData():
global imgArray
request_data = json.loads(request.data)
if (request_data['save'] == "True"):
image_data = re.sub('^data:image/.+;base64,', '', request_data['image_data'])
imgdata = base64.b64decode(image_data)
image = Image.open(io.BytesIO(imgdata))
image = np.array(image)
imgArray.append(image)
return {
'word': ""
}
else:
prediction = evaluate(imgArray)
word = find_word(prediction)
imgArray = []
return {
'word': word
} | [
"io.BytesIO",
"flask_cors.CORS",
"flask.Flask",
"base64.b64decode",
"numpy.array",
"requests.get",
"flask.json.loads",
"bs4.BeautifulSoup",
"eval.evaluate",
"locateWord.find_word",
"re.sub"
] | [((264, 279), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (269, 279), False, 'from flask import Flask, request, json\n'), ((280, 289), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (284, 289), False, 'from flask_cors import CORS\n'), ((417, 449), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_text', '"""lxml"""'], {}), "(html_text, 'lxml')\n", (430, 449), False, 'from bs4 import BeautifulSoup\n'), ((1577, 1601), 'flask.json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (1587, 1601), False, 'from flask import Flask, request, json\n'), ((1951, 1975), 'flask.json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (1961, 1975), False, 'from flask import Flask, request, json\n'), ((376, 400), 'requests.get', 'requests.get', (['websiteURL'], {}), '(websiteURL)\n', (388, 400), False, 'import requests\n'), ((2038, 2102), 're.sub', 're.sub', (['"""^data:image/.+;base64,"""', '""""""', "request_data['image_data']"], {}), "('^data:image/.+;base64,', '', request_data['image_data'])\n", (2044, 2102), False, 'import re\n'), ((2121, 2149), 'base64.b64decode', 'base64.b64decode', (['image_data'], {}), '(image_data)\n', (2137, 2149), False, 'import base64\n'), ((2214, 2229), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2222, 2229), True, 'import numpy as np\n'), ((2342, 2360), 'eval.evaluate', 'evaluate', (['imgArray'], {}), '(imgArray)\n', (2350, 2360), False, 'from eval import evaluate\n'), ((2376, 2397), 'locateWord.find_word', 'find_word', (['prediction'], {}), '(prediction)\n', (2385, 2397), False, 'from locateWord import find_word\n'), ((2177, 2196), 'io.BytesIO', 'io.BytesIO', (['imgdata'], {}), '(imgdata)\n', (2187, 2196), False, 'import io\n')] |
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import pickle
import imutils
import time
import math
import cv2
import os
class RecognizeFaceGenderAge(object):
def __init__(self):
# construct the argument parser and parse the arguments
self.ap = argparse.ArgumentParser()
self.ap.add_argument("-d", "--detector", required=True,
help="path to OpenCV's deep learning face detector")
self.ap.add_argument("-m", "--embedding-model", required=True,
help="path to OpenCV's deep learning face embedding model")
self.ap.add_argument("-r", "--recognizer", required=True,
help="path to model trained to recognize faces")
self.ap.add_argument("-l", "--le", required=True,
help="path to label encoder")
self.ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
self.ap.add_argument('--input', help='Path to input image or video file.')
self.ap.add_argument("--device", default="cpu", help="Device to inference on")
self.args = vars(self.ap.parse_args())
self.faceProto = "opencv_face_detector.pbtxt"
self.faceModel = "opencv_face_detector_uint8.pb"
self.ageProto = "age_deploy.prototxt"
self.ageModel = "age_net.caffemodel"
self.genderProto = "gender_deploy.prototxt"
self.genderModel = "gender_net.caffemodel"
self.MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
self.ageList = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
self.genderList = ['Male', 'Female']
# load our serialized face detector from disk
print("[INFO] loading face detector...")
self.protoPath = os.path.sep.join([self.args["detector"], "deploy.prototxt"])
self.modelPath = os.path.sep.join([self.args["detector"],
"res10_300x300_ssd_iter_140000.caffemodel"])
self.detector = cv2.dnn.readNetFromCaffe(self.protoPath, self.modelPath)
# load our serialized face embedding model from disk
print("[INFO] loading face recognizer...")
self.embedder = cv2.dnn.readNetFromTorch(self.args["embedding_model"])
# load the actual face recognition model along with the label encoder
self.recognizer = pickle.loads(open(self.args["recognizer"], "rb").read())
self.le = pickle.loads(open(self.args["le"], "rb").read())
# Load network
self.ageNet = cv2.dnn.readNet(self.ageModel, self.ageProto)
self.genderNet = cv2.dnn.readNet(self.genderModel, self.genderProto)
self.faceNet = cv2.dnn.readNet(self.faceModel, self.faceProto)
def setBackend(self):
if self.args["device"] == "cpu":
self.ageNet.setPreferableBackend(cv2.dnn.DNN_TARGET_CPU)
self.genderNet.setPreferableBackend(cv2.dnn.DNN_TARGET_CPU)
self.faceNet.setPreferableBackend(cv2.dnn.DNN_TARGET_CPU)
print("Using CPU device")
elif self.args["device"] == "gpu":
self.ageNet.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
self.ageNet.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
self.genderNet.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
self.genderNet.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
self.faceNet.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
self.faceNet.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
print("Using GPU device")
def videostream(self):
# initialize the video stream, then allow the camera sensor to warm up
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
# start the FPS throughput estimator
fps = FPS().start()
while True:
# grab the frame from the threaded video stream
frame = vs.read()
# resize the frame to have a width of 600 pixels (while
# maintaining the aspect ratio), and then grab the image
# dimensions
frame = imutils.resize(frame, width=600)
(h, w) = frame.shape[:2]
# construct a blob from the image
imageBlob = cv2.dnn.blobFromImage(
cv2.resize(frame, (300, 300)), 1.0, (300, 300),
(104.0, 177.0, 123.0), swapRB=False, crop=False)
# apply OpenCV's deep learning-based face detector to localize
# faces in the input image
self.detector.setInput(imageBlob)
detections = self.detector.forward()
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections
if confidence > self.args["confidence"]:
# compute the (x, y)-coordinates of the bounding box for
# the face
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# extract the face ROI
face = frame[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
# ensure the face width and height are sufficiently large
if fW < 20 or fH < 20:
continue
# construct a blob for the face ROI, then pass the blob
# through our face embedding model to obtain the 128-d
# quantification of the face
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255,
(96, 96), (0, 0, 0), swapRB=True, crop=False)
blob = cv2.dnn.blobFromImage(face, 1.0, (227, 227),
self.MODEL_MEAN_VALUES, swapRB=False)
self.embedder.setInput(faceBlob)
self.genderNet.setInput(blob)
vec = self.embedder.forward()
genderPreds = self.genderNet.forward()
gender = self.genderList[genderPreds[0].argmax()]
self.ageNet.setInput(blob)
agePreds = self.ageNet.forward()
age = self.ageList[agePreds[0].argmax()]
label = "{}, age:{}".format(gender, age)
print("Gender : {}, conf = {:.3f}".format(gender, genderPreds[0].max()))
print("Age Output : {}".format(agePreds))
print("Age : {}, conf = {:.3f}".format(age, agePreds[0].max()))
# perform classification to recognize the face
preds = self.recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = self.le.classes_[j]
# draw the bounding box of the face along with the
# associated probability
if proba > 0.6:
text = "{}: {:.2f}%".format(name, proba * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY),
(0, 255, 0), 2)
cv2.putText(frame, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2)
else:
text = "Unknown Face Detected"
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY),
(0, 0, 255), 2)
cv2.putText(frame, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
cv2.putText(frame, label, (startX, y-30),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2, cv2.LINE_AA)
# update the FPS counter
fps.update()
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
if __name__ == '__main__':
try:
r = RecognizeFaceGenderAge()
r.setBackend()
r.videostream()
except cv2.error as e:
print(e)
| [
"imutils.video.VideoStream",
"imutils.video.FPS",
"cv2.putText",
"argparse.ArgumentParser",
"numpy.argmax",
"cv2.waitKey",
"cv2.dnn.readNetFromTorch",
"cv2.dnn.blobFromImage",
"cv2.imshow",
"cv2.dnn.readNet",
"time.sleep",
"cv2.rectangle",
"numpy.array",
"cv2.dnn.readNetFromCaffe",
"imut... | [((354, 379), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (377, 379), False, 'import argparse\n'), ((1777, 1837), 'os.path.sep.join', 'os.path.sep.join', (["[self.args['detector'], 'deploy.prototxt']"], {}), "([self.args['detector'], 'deploy.prototxt'])\n", (1793, 1837), False, 'import os\n'), ((1858, 1947), 'os.path.sep.join', 'os.path.sep.join', (["[self.args['detector'], 'res10_300x300_ssd_iter_140000.caffemodel']"], {}), "([self.args['detector'],\n 'res10_300x300_ssd_iter_140000.caffemodel'])\n", (1874, 1947), False, 'import os\n'), ((1967, 2023), 'cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (['self.protoPath', 'self.modelPath'], {}), '(self.protoPath, self.modelPath)\n', (1991, 2023), False, 'import cv2\n'), ((2145, 2199), 'cv2.dnn.readNetFromTorch', 'cv2.dnn.readNetFromTorch', (["self.args['embedding_model']"], {}), "(self.args['embedding_model'])\n", (2169, 2199), False, 'import cv2\n'), ((2450, 2495), 'cv2.dnn.readNet', 'cv2.dnn.readNet', (['self.ageModel', 'self.ageProto'], {}), '(self.ageModel, self.ageProto)\n', (2465, 2495), False, 'import cv2\n'), ((2516, 2567), 'cv2.dnn.readNet', 'cv2.dnn.readNet', (['self.genderModel', 'self.genderProto'], {}), '(self.genderModel, self.genderProto)\n', (2531, 2567), False, 'import cv2\n'), ((2586, 2633), 'cv2.dnn.readNet', 'cv2.dnn.readNet', (['self.faceModel', 'self.faceProto'], {}), '(self.faceModel, self.faceProto)\n', (2601, 2633), False, 'import cv2\n'), ((3564, 3579), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (3574, 3579), False, 'import time\n'), ((7417, 7440), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7438, 7440), False, 'import cv2\n'), ((3882, 3914), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(600)'}), '(frame, width=600)\n', (3896, 3914), False, 'import imutils\n'), ((7058, 7084), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (7068, 7084), False, 'import cv2\n'), ((3534, 3552), 'imutils.video.VideoStream', 'VideoStream', ([], {'src': '(0)'}), '(src=0)\n', (3545, 3552), False, 'from imutils.video import VideoStream\n'), ((3629, 3634), 'imutils.video.FPS', 'FPS', ([], {}), '()\n', (3632, 3634), False, 'from imutils.video import FPS\n'), ((4026, 4055), 'cv2.resize', 'cv2.resize', (['frame', '(300, 300)'], {}), '(frame, (300, 300))\n', (4036, 4055), False, 'import cv2\n'), ((7095, 7109), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7106, 7109), False, 'import cv2\n'), ((5183, 5271), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['face', '(1.0 / 255)', '(96, 96)', '(0, 0, 0)'], {'swapRB': '(True)', 'crop': '(False)'}), '(face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True,\n crop=False)\n', (5204, 5271), False, 'import cv2\n'), ((5289, 5376), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['face', '(1.0)', '(227, 227)', 'self.MODEL_MEAN_VALUES'], {'swapRB': '(False)'}), '(face, 1.0, (227, 227), self.MODEL_MEAN_VALUES, swapRB\n =False)\n', (5310, 5376), False, 'import cv2\n'), ((6085, 6101), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (6094, 6101), True, 'import numpy as np\n'), ((6868, 6977), 'cv2.putText', 'cv2.putText', (['frame', 'label', '(startX, y - 30)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.8)', '(0, 255, 255)', '(2)', 'cv2.LINE_AA'], {}), '(frame, label, (startX, y - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8,\n (0, 255, 255), 2, cv2.LINE_AA)\n', (6879, 6977), False, 'import cv2\n'), ((4713, 4735), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (4721, 4735), True, 'import numpy as np\n'), ((6389, 6457), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(startX, startY)', '(endX, endY)', '(0, 255, 0)', '(2)'], {}), '(frame, (startX, startY), (endX, endY), (0, 255, 0), 2)\n', (6402, 6457), False, 'import cv2\n'), ((6473, 6563), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(startX, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.55)', '(0, 255, 0)', '(2)'], {}), '(frame, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, \n 255, 0), 2)\n', (6484, 6563), False, 'import cv2\n'), ((6684, 6752), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(startX, startY)', '(endX, endY)', '(0, 0, 255)', '(2)'], {}), '(frame, (startX, startY), (endX, endY), (0, 0, 255), 2)\n', (6697, 6752), False, 'import cv2\n'), ((6768, 6857), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(startX, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.45)', '(0, 0, 255)', '(2)'], {}), '(frame, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0,\n 255), 2)\n', (6779, 6857), False, 'import cv2\n')] |
import numpy as np
from PIL import Image
import requests
from keras import backend
from keras.models import Model
from keras.applications.vgg16 import VGG16
from scipy.optimize import fmin_l_bfgs_b
from matplotlib import pyplot as plt
iterations = 10#迭代次数
CHANNELS = 3
image_size = 300 #图片大小
image_width = image_size
image_height = image_size
imagenet_mean_rgb_values = [123.68, 116.779, 103.939]
content_weght = 0.02
style_weight = 4.5
total_variation_weght = 0.995 #TV正则项权重
total_variation_loss_factor = 1.25#TV正则项损失因子
input_image_path = "cartoon_images/Cartoon_FCM.jpg"#需要风格转移的图片路径
style_image_path = "cartoon_images/style8.jpg"#风格图片路径
output_image_path = "cartoon_images/output_style8.png"#风格迁移后图片输出的路径
combined_image_path = "cartoon_images/combined_style8.png"#组合对比图片路径
input_image = Image.open(input_image_path)
input_image = input_image.resize((image_width, image_height))
input_image.save(input_image_path)
style_image = Image.open(style_image_path)
style_image = style_image.resize((image_width, image_height))
style_image.save(style_image_path)
#选择一张输入图,减去通道颜色均值后,得到风格图片在vgg16各个层的输出值
input_image_array = np.asarray(input_image, dtype="float32")
input_image_array = np.expand_dims(input_image_array, axis=0)
input_image_array[:, :, :, 0] -= imagenet_mean_rgb_values[2]
input_image_array[:, :, :, 1] -= imagenet_mean_rgb_values[1]
input_image_array[:, :, :, 2] -= imagenet_mean_rgb_values[0]
input_image_array = input_image_array[:, :, :, ::-1] # bgr ->rgb
#选择一张风格图,减去通道颜色均值后,得到风格图片在vgg16各个层的输出值
style_image_array = np.asarray(style_image, dtype="float32")
style_image_array = np.expand_dims(style_image_array, axis=0)
style_image_array[:, :, :, 0] -= imagenet_mean_rgb_values[2]
style_image_array[:, :, :, 1] -= imagenet_mean_rgb_values[1]
style_image_array[:, :, :, 2] -= imagenet_mean_rgb_values[0]
style_image_array = style_image_array[:, :, :, ::-1] # bgr ->rgb
input_image = backend.variable(input_image_array)
style_image = backend.variable(style_image_array)
combination_image = backend.placeholder((1, image_height, image_size, 3))
input_tensor = backend.concatenate([input_image,style_image,combination_image], axis=0)
model = VGG16(input_tensor=input_tensor, include_top=False)
def content_loss(content, combination):# 内容损失函数loss值
return backend.sum(backend.square(combination - content))
layers = dict([(layer.name, layer.output) for layer in model.layers])
content_layer = "block2_conv2"
# 内容特征对应的vgg16各层名称
layer_features = layers[content_layer]
content_image_features = layer_features[0, :, :, :]
combination_features = layer_features[2, :, :, :]
loss = backend.variable(0.)
loss = loss + content_weght * content_loss(content_image_features,
combination_features)
#计算 loss值 loss等于 内容特征的权重 * 输入图片内容损失函数的loss值
def gram_matrix(x):
features = backend.batch_flatten(backend.permute_dimensions(x, (2, 0, 1)))
gram = backend.dot(features, backend.transpose(features))
return gram
def compute_style_loss(style, combination): # 计算风格图片的loss值 即风格损失函数
style = gram_matrix(style)
combination = gram_matrix(combination)
size = image_height * image_width
return backend.sum(backend.square(style - combination)) / (4. * (CHANNELS ** 2) * (size ** 2))
style_layers = ["block1_conv2", "block2_conv2", "block3_conv3", "block4_conv3", "block5_conv3"]
# 风格特征对应的vgg16各层名称
for layer_name in style_layers: #计算风格特征对应的各层的损失值相加
layer_features = layers[layer_name]
style_features = layer_features[1, :, :, :]
combination_features = layer_features[2, :, :, :]
style_loss = compute_style_loss(style_features, combination_features)
loss += (style_weight / len(style_layers)) * style_loss
# loss值 等于风格特征的权重除以层数 * 对应层的风格损失值 最后再求和
def total_variation_loss(x):# 定义组合图片的损失函数total variation loss
a = backend.square(x[:, :image_height-1, :image_width-1, :] - x[:, 1:, :image_width-1, :])
b = backend.square(x[:, :image_height-1, :image_width-1, :] - x[:, :image_height-1, 1:, :])
return backend.sum(backend.pow(a + b, total_variation_loss_factor))#TV正则项损失因子
loss += total_variation_weght * total_variation_loss(combination_image)
# TV正则项权重*损失函数total variation loss
outputs = [loss]
outputs += backend.gradients(loss, combination_image)
def evaluate_loss_and_gradients(x):
#评估 loss值 和梯度
x = x.reshape((1, image_height, image_width, CHANNELS))
outs = backend.function([combination_image], outputs)([x])
loss = outs[0]
gradients = outs[1].flatten().astype("float64")
return loss, gradients
class Evaluator:
def loss(self, x):
loss, gradients = evaluate_loss_and_gradients(x)
self._gradients = gradients
return loss
def gradients(self, x):
return self._gradients
evaluator = Evaluator()
x = np.random.uniform(0, 255, (1, image_height, image_width, 3)) - 128.
# 从一个均匀分布[low,high)中随机采样,注意定义域是左闭右开,即包含low,不包含high.
for i in range(iterations):
x, loss, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), fprime=evaluator.gradients, maxfun=20)
#evaluator.loss loss函数
# 返回值 x 估计最小值的位置,即loss最小时对应的x
# loss最小的Func值,即loss值。
# x.flatten() 最初的猜测,即待更新参数初始值
# fprime 梯度函数
# maxfun 功能评估的最大数量
print("Iteration %d loss: %d" % (i, loss))
x = x.reshape((image_height, image_width, CHANNELS))
x = x[:, :, ::-1] # bgr ->rgb
# 将之前减去通道颜色均值加回来
x[:, :, 0] += imagenet_mean_rgb_values[2]
x[:, :, 1] += imagenet_mean_rgb_values[1]
x[:, :, 2] += imagenet_mean_rgb_values[0]
x = np.clip(x, 0, 255).astype("uint8") # 防止越界 限制在0-255之间
output_image = Image.fromarray(x)
output_image.save(output_image_path)
plt.imshow(output_image)
# 可视化合并结果
combined = Image.new("RGB", (image_width*3, image_height))
x_offset = 0
for image in map(Image.open, [input_image_path, style_image_path, output_image_path]):
combined.paste(image, (x_offset, 0))
x_offset += image_width
combined.save(combined_image_path)
plt.imshow(combined)
plt.show() | [
"PIL.Image.new",
"numpy.clip",
"keras.backend.transpose",
"keras.backend.pow",
"keras.applications.vgg16.VGG16",
"keras.backend.permute_dimensions",
"keras.backend.placeholder",
"keras.backend.concatenate",
"matplotlib.pyplot.imshow",
"keras.backend.gradients",
"matplotlib.pyplot.show",
"numpy... | [((791, 819), 'PIL.Image.open', 'Image.open', (['input_image_path'], {}), '(input_image_path)\n', (801, 819), False, 'from PIL import Image\n'), ((932, 960), 'PIL.Image.open', 'Image.open', (['style_image_path'], {}), '(style_image_path)\n', (942, 960), False, 'from PIL import Image\n'), ((1119, 1159), 'numpy.asarray', 'np.asarray', (['input_image'], {'dtype': '"""float32"""'}), "(input_image, dtype='float32')\n", (1129, 1159), True, 'import numpy as np\n'), ((1180, 1221), 'numpy.expand_dims', 'np.expand_dims', (['input_image_array'], {'axis': '(0)'}), '(input_image_array, axis=0)\n', (1194, 1221), True, 'import numpy as np\n'), ((1531, 1571), 'numpy.asarray', 'np.asarray', (['style_image'], {'dtype': '"""float32"""'}), "(style_image, dtype='float32')\n", (1541, 1571), True, 'import numpy as np\n'), ((1592, 1633), 'numpy.expand_dims', 'np.expand_dims', (['style_image_array'], {'axis': '(0)'}), '(style_image_array, axis=0)\n', (1606, 1633), True, 'import numpy as np\n'), ((1897, 1932), 'keras.backend.variable', 'backend.variable', (['input_image_array'], {}), '(input_image_array)\n', (1913, 1932), False, 'from keras import backend\n'), ((1947, 1982), 'keras.backend.variable', 'backend.variable', (['style_image_array'], {}), '(style_image_array)\n', (1963, 1982), False, 'from keras import backend\n'), ((2003, 2056), 'keras.backend.placeholder', 'backend.placeholder', (['(1, image_height, image_size, 3)'], {}), '((1, image_height, image_size, 3))\n', (2022, 2056), False, 'from keras import backend\n'), ((2073, 2147), 'keras.backend.concatenate', 'backend.concatenate', (['[input_image, style_image, combination_image]'], {'axis': '(0)'}), '([input_image, style_image, combination_image], axis=0)\n', (2092, 2147), False, 'from keras import backend\n'), ((2154, 2205), 'keras.applications.vgg16.VGG16', 'VGG16', ([], {'input_tensor': 'input_tensor', 'include_top': '(False)'}), '(input_tensor=input_tensor, include_top=False)\n', (2159, 2205), False, 'from keras.applications.vgg16 import VGG16\n'), ((2594, 2615), 'keras.backend.variable', 'backend.variable', (['(0.0)'], {}), '(0.0)\n', (2610, 2615), False, 'from keras import backend\n'), ((4205, 4247), 'keras.backend.gradients', 'backend.gradients', (['loss', 'combination_image'], {}), '(loss, combination_image)\n', (4222, 4247), False, 'from keras import backend\n'), ((5541, 5559), 'PIL.Image.fromarray', 'Image.fromarray', (['x'], {}), '(x)\n', (5556, 5559), False, 'from PIL import Image\n'), ((5597, 5621), 'matplotlib.pyplot.imshow', 'plt.imshow', (['output_image'], {}), '(output_image)\n', (5607, 5621), True, 'from matplotlib import pyplot as plt\n'), ((5643, 5692), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(image_width * 3, image_height)'], {}), "('RGB', (image_width * 3, image_height))\n", (5652, 5692), False, 'from PIL import Image\n'), ((5895, 5915), 'matplotlib.pyplot.imshow', 'plt.imshow', (['combined'], {}), '(combined)\n', (5905, 5915), True, 'from matplotlib import pyplot as plt\n'), ((5916, 5926), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5924, 5926), True, 'from matplotlib import pyplot as plt\n'), ((3803, 3900), 'keras.backend.square', 'backend.square', (['(x[:, :image_height - 1, :image_width - 1, :] - x[:, 1:, :image_width - 1, :])'], {}), '(x[:, :image_height - 1, :image_width - 1, :] - x[:, 1:, :\n image_width - 1, :])\n', (3817, 3900), False, 'from keras import backend\n'), ((3898, 3996), 'keras.backend.square', 'backend.square', (['(x[:, :image_height - 1, :image_width - 1, :] - x[:, :image_height - 1, 1:, :])'], {}), '(x[:, :image_height - 1, :image_width - 1, :] - x[:, :\n image_height - 1, 1:, :])\n', (3912, 3996), False, 'from keras import backend\n'), ((4769, 4829), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(255)', '(1, image_height, image_width, 3)'], {}), '(0, 255, (1, image_height, image_width, 3))\n', (4786, 4829), True, 'import numpy as np\n'), ((2283, 2320), 'keras.backend.square', 'backend.square', (['(combination - content)'], {}), '(combination - content)\n', (2297, 2320), False, 'from keras import backend\n'), ((2844, 2884), 'keras.backend.permute_dimensions', 'backend.permute_dimensions', (['x', '(2, 0, 1)'], {}), '(x, (2, 0, 1))\n', (2870, 2884), False, 'from keras import backend\n'), ((2919, 2946), 'keras.backend.transpose', 'backend.transpose', (['features'], {}), '(features)\n', (2936, 2946), False, 'from keras import backend\n'), ((4009, 4056), 'keras.backend.pow', 'backend.pow', (['(a + b)', 'total_variation_loss_factor'], {}), '(a + b, total_variation_loss_factor)\n', (4020, 4056), False, 'from keras import backend\n'), ((4374, 4420), 'keras.backend.function', 'backend.function', (['[combination_image]', 'outputs'], {}), '([combination_image], outputs)\n', (4390, 4420), False, 'from keras import backend\n'), ((5473, 5491), 'numpy.clip', 'np.clip', (['x', '(0)', '(255)'], {}), '(x, 0, 255)\n', (5480, 5491), True, 'import numpy as np\n'), ((3168, 3203), 'keras.backend.square', 'backend.square', (['(style - combination)'], {}), '(style - combination)\n', (3182, 3203), False, 'from keras import backend\n')] |
# coding: utf-8
from __future__ import division, print_function
__author__ = "adrn <<EMAIL>>"
# Third-party
import astropy.units as u
import astropy.coordinates as coord
import numpy as np
import gala.dynamics as gd
# Project
from ..core import Ophiuchus
from ...data import OphiuchusData
def test_roundtrip_transform():
n = 128
g = coord.SkyCoord(l=np.random.uniform(0,360,size=n)*u.deg,
b=np.random.uniform(-60,60,size=n)*u.deg,
distance=np.random.uniform(0,100,size=n)*u.kpc,
frame='galactic')
o = g.transform_to(Ophiuchus)
assert np.allclose(o.distance.value, g.distance.value)
g2 = o.transform_to(coord.Galactic)
assert np.allclose(g2.distance.value, g.distance.value)
assert np.allclose(g2.l.value, g.l.value, atol=1E-9)
assert np.allclose(g2.b.value, g.b.value, atol=1E-9)
def test_data_phi2_is_small():
d = OphiuchusData(expr="source == b'Sesar2015a'")
oph = d.coord.transform_to(Ophiuchus)
assert np.all(np.abs(oph.phi2) < 5.*u.arcmin) # all phi2 should be <5 arcmin
def test_orbit_transform():
pos = np.random.uniform(size=(3,128))*u.kpc
vel = np.random.uniform(size=(3,128))*u.kpc/u.Myr
orbit = gd.CartesianOrbit(pos=pos, vel=vel)
c,v = orbit.to_frame(coord.Galactic)
oph = c.transform_to(Ophiuchus)
pm_l,pm_b,vr = v
assert pm_l.unit == u.mas/u.yr
assert pm_b.unit == u.mas/u.yr
assert vr.unit == vel.unit
| [
"gala.dynamics.CartesianOrbit",
"numpy.random.uniform",
"numpy.allclose",
"numpy.abs"
] | [((624, 671), 'numpy.allclose', 'np.allclose', (['o.distance.value', 'g.distance.value'], {}), '(o.distance.value, g.distance.value)\n', (635, 671), True, 'import numpy as np\n'), ((724, 772), 'numpy.allclose', 'np.allclose', (['g2.distance.value', 'g.distance.value'], {}), '(g2.distance.value, g.distance.value)\n', (735, 772), True, 'import numpy as np\n'), ((784, 830), 'numpy.allclose', 'np.allclose', (['g2.l.value', 'g.l.value'], {'atol': '(1e-09)'}), '(g2.l.value, g.l.value, atol=1e-09)\n', (795, 830), True, 'import numpy as np\n'), ((841, 887), 'numpy.allclose', 'np.allclose', (['g2.b.value', 'g.b.value'], {'atol': '(1e-09)'}), '(g2.b.value, g.b.value, atol=1e-09)\n', (852, 887), True, 'import numpy as np\n'), ((1239, 1274), 'gala.dynamics.CartesianOrbit', 'gd.CartesianOrbit', ([], {'pos': 'pos', 'vel': 'vel'}), '(pos=pos, vel=vel)\n', (1256, 1274), True, 'import gala.dynamics as gd\n'), ((1135, 1167), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(3, 128)'}), '(size=(3, 128))\n', (1152, 1167), True, 'import numpy as np\n'), ((1033, 1049), 'numpy.abs', 'np.abs', (['oph.phi2'], {}), '(oph.phi2)\n', (1039, 1049), True, 'import numpy as np\n'), ((1183, 1215), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(3, 128)'}), '(size=(3, 128))\n', (1200, 1215), True, 'import numpy as np\n'), ((363, 396), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(360)'], {'size': 'n'}), '(0, 360, size=n)\n', (380, 396), True, 'import numpy as np\n'), ((427, 461), 'numpy.random.uniform', 'np.random.uniform', (['(-60)', '(60)'], {'size': 'n'}), '(-60, 60, size=n)\n', (444, 461), True, 'import numpy as np\n'), ((499, 532), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(100)'], {'size': 'n'}), '(0, 100, size=n)\n', (516, 532), True, 'import numpy as np\n')] |
import numpy as np
# reshape
b1 = np.arange(15)
# b1 = [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14]
b2 = b1.reshape((3,5))
b2 = b2.reshape(1,-1)
b3 = b1[::-1].reshape(1,-1)
# b2 = [[ 0 1 2 3 4]
# [ 5 6 7 8 9]
# [10 11 12 13 14]]
print(b1.shape,b2.shape,b3.shape)
b4 = np.concatenate([b2, b3])
print(f"b4 = {b4} \n ")
b5 = np.vstack([b2, b3])
print(f"b5 = {b5} \n ")
| [
"numpy.vstack",
"numpy.arange",
"numpy.concatenate"
] | [((35, 48), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (44, 48), True, 'import numpy as np\n'), ((283, 307), 'numpy.concatenate', 'np.concatenate', (['[b2, b3]'], {}), '([b2, b3])\n', (297, 307), True, 'import numpy as np\n'), ((339, 358), 'numpy.vstack', 'np.vstack', (['[b2, b3]'], {}), '([b2, b3])\n', (348, 358), True, 'import numpy as np\n')] |
import glob
import joblib
import fitsio
import os
import numpy as np
import meds
import tqdm
from meds.defaults import BMASK_EDGE
BINS = np.linspace(-20, 20, 41) + 0.5
BANDS = ["g", "r", "i", "z", "Y"]
BCEN = (BINS[:-1] + BINS[1:])/2
def _convert_to_index(row, col, dbox=100, edge=50):
xind = (col.astype(int) - edge)//dbox
yind = (row.astype(int) - edge)//dbox
return xind + 99*yind
def _compute_hist_for_tile_band(tname, band):
pizza_name = (
"./pizza_meds/%s/p01/pizza-cutter/"
"%s_r5191p01_%s_pizza-cutter-slices.fits.fz" % (
tname,
tname,
band,
)
)
stamp_name = glob.glob("./meds/%s_*_%s_meds-Y6A1.fits.fz" % (tname, band))[0]
if (not os.path.exists(pizza_name)) or (not os.path.exists(stamp_name)):
return None
with meds.MEDS(pizza_name) as m, meds.MEDS(stamp_name) as mobj:
pizza_inds = _convert_to_index(mobj["orig_row"][:, 0], mobj["orig_col"][:, 0])
assert np.array_equal(
_convert_to_index(m["orig_row"][:, 0], m["orig_col"][:, 0]),
np.arange(len(m["orig_col"][:, 0]), dtype=int),
)
dnepoch = []
nepoch = []
for obj_ind, pizza_ind in tqdm.tqdm(
enumerate(pizza_inds), total=len(pizza_inds)
):
if pizza_ind < 0 or pizza_ind >= 9801 or m["nepoch"][pizza_ind] <= 0:
continue
nepoch_obj = 0
for msk_ind in range(1, mobj["ncutout"][obj_ind]):
msk = mobj.get_cutout(obj_ind, msk_ind, type="bmask")
if not np.any(msk & BMASK_EDGE):
nepoch_obj += 1
dnepoch.append(m["nepoch"][pizza_ind] - nepoch_obj)
nepoch.append(nepoch_obj)
print(
"tile: %s\nband: %s\n\tpizza: %s %s\n\tmeds: %s %s\n\tdiff: %s %s" % (
tname, band,
np.mean(np.array(nepoch) + np.array(dnepoch)),
np.std(np.array(nepoch) + np.array(dnepoch)),
np.mean(nepoch), np.std(nepoch),
np.mean(dnepoch), np.std(dnepoch),
),
flush=True,
)
tiles = list(set([os.path.basename(f).split("_")[0] for f in glob.glob("./meds/*")]))
assert len(tiles) == 100
os.system("mkdir -p hdata")
jobs = []
totd = []
for i, tile in enumerate(tiles):
for band in BANDS:
jobs.append(joblib.delayed(_compute_hist_for_tile_band)(tile, band))
with joblib.Parallel(n_jobs=5, backend='loky', verbose=100) as para:
para(jobs)
| [
"os.path.basename",
"numpy.std",
"os.path.exists",
"os.system",
"joblib.Parallel",
"numpy.any",
"numpy.mean",
"numpy.array",
"numpy.linspace",
"glob.glob",
"joblib.delayed",
"meds.MEDS"
] | [((2276, 2303), 'os.system', 'os.system', (['"""mkdir -p hdata"""'], {}), "('mkdir -p hdata')\n", (2285, 2303), False, 'import os\n'), ((139, 163), 'numpy.linspace', 'np.linspace', (['(-20)', '(20)', '(41)'], {}), '(-20, 20, 41)\n', (150, 163), True, 'import numpy as np\n'), ((2464, 2518), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': '(5)', 'backend': '"""loky"""', 'verbose': '(100)'}), "(n_jobs=5, backend='loky', verbose=100)\n", (2479, 2518), False, 'import joblib\n'), ((657, 718), 'glob.glob', 'glob.glob', (["('./meds/%s_*_%s_meds-Y6A1.fits.fz' % (tname, band))"], {}), "('./meds/%s_*_%s_meds-Y6A1.fits.fz' % (tname, band))\n", (666, 718), False, 'import glob\n'), ((830, 851), 'meds.MEDS', 'meds.MEDS', (['pizza_name'], {}), '(pizza_name)\n', (839, 851), False, 'import meds\n'), ((858, 879), 'meds.MEDS', 'meds.MEDS', (['stamp_name'], {}), '(stamp_name)\n', (867, 879), False, 'import meds\n'), ((735, 761), 'os.path.exists', 'os.path.exists', (['pizza_name'], {}), '(pizza_name)\n', (749, 761), False, 'import os\n'), ((771, 797), 'os.path.exists', 'os.path.exists', (['stamp_name'], {}), '(stamp_name)\n', (785, 797), False, 'import os\n'), ((2225, 2246), 'glob.glob', 'glob.glob', (['"""./meds/*"""'], {}), "('./meds/*')\n", (2234, 2246), False, 'import glob\n'), ((2401, 2444), 'joblib.delayed', 'joblib.delayed', (['_compute_hist_for_tile_band'], {}), '(_compute_hist_for_tile_band)\n', (2415, 2444), False, 'import joblib\n'), ((1596, 1620), 'numpy.any', 'np.any', (['(msk & BMASK_EDGE)'], {}), '(msk & BMASK_EDGE)\n', (1602, 1620), True, 'import numpy as np\n'), ((2029, 2044), 'numpy.mean', 'np.mean', (['nepoch'], {}), '(nepoch)\n', (2036, 2044), True, 'import numpy as np\n'), ((2046, 2060), 'numpy.std', 'np.std', (['nepoch'], {}), '(nepoch)\n', (2052, 2060), True, 'import numpy as np\n'), ((2078, 2094), 'numpy.mean', 'np.mean', (['dnepoch'], {}), '(dnepoch)\n', (2085, 2094), True, 'import numpy as np\n'), ((2096, 2111), 'numpy.std', 'np.std', (['dnepoch'], {}), '(dnepoch)\n', (2102, 2111), True, 'import numpy as np\n'), ((2182, 2201), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (2198, 2201), False, 'import os\n'), ((1912, 1928), 'numpy.array', 'np.array', (['nepoch'], {}), '(nepoch)\n', (1920, 1928), True, 'import numpy as np\n'), ((1931, 1948), 'numpy.array', 'np.array', (['dnepoch'], {}), '(dnepoch)\n', (1939, 1948), True, 'import numpy as np\n'), ((1974, 1990), 'numpy.array', 'np.array', (['nepoch'], {}), '(nepoch)\n', (1982, 1990), True, 'import numpy as np\n'), ((1993, 2010), 'numpy.array', 'np.array', (['dnepoch'], {}), '(dnepoch)\n', (2001, 2010), True, 'import numpy as np\n')] |
from keras.backend.tensorflow_backend import set_session
from keras.layers import Input
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.core import Dense, Flatten
from keras.losses import mean_squared_error, binary_crossentropy
from keras.models import Model, load_model
from keras.optimizers import Adam, SGD
import numpy as np
import os
import pickle
import tensorflow as tf
# Hide unwanted messages for tensorflow
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.ERROR)
# Limit gpu memory if needed
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
sess = set_session(tf.Session(config=config))
def create_action_model(splits_location, partial_model, args):
"""Creates action model. Overwrites old one if with same arguments exists.
Arguments:
splits_location {string} -- full path to image splits file (.pkl)
partial_model {string} -- full path to partial model file (.h5)
args {namespace} -- has to contain following arguments:
disable_brake, test_count, action_loss_type, action_lr, action_epochs
Returns:
string -- full path to the created action model
"""
# Load images
with open(splits_location, 'rb') as f:
images, splits_n, min_split, images_action, hop, min_size = pickle.load(f)
############################################################################
# Preprocess images
############################################################################
##########################
# Split videos and actions
##########################
split_video = [[] for i in range(len(images))]
split_video_action = [[] for i in range(len(images_action))]
for j in range(len(images)):
for i in range(splits_n):
if i == 0:
split_video[j].insert(i, images[j][0:min_split[j][i]])
split_video_action[j].insert(i, images_action[j][0:min_split[j][i]])
elif i < splits_n-1:
split_video[j].insert(i, images[j][min_split[j][i-1]:min_split[j][i]])
split_video_action[j].insert(i, images_action[j][min_split[j][i-1]:min_split[j][i]])
else:
split_video[j].insert(i, images[j][min_split[j][i-1]:(images[j])[:].size])
split_video_action[j].insert(i, images_action[j][min_split[j][i-1]:(images_action[j])[:].size])
#####################
# Create one long array from all the images
#####################
ravel_splits = []
for i in range(len(split_video)):
for j in range(len(split_video[i])):
for k in range(split_video[i][j][:,1,1,1].size):
ravel_splits.append(split_video[i][j][k,:,:,:])
ravel_splits = np.array(ravel_splits)
######################
# Convert actions
######################
# This holds all the actions in one long array.
y_all = []
for demonstration in images_action:
for frame in demonstration:
y_all.append(frame)
y_all = np.array(y_all)
# Steering is saved as -0.5 ... 0.5, +0.5 is needed to get 0..1 range:
y_all[:,1] += 0.5
# Default actions count
actions_count = 3
# Change the steering to correspond key press.
for i in range(len(y_all)):
if y_all[i,1] > 0.5:
y_all[i,1] = 1
elif y_all[i,1] < 0.5:
y_all[i,1] = 0
if args.action_loss_type == 'bce':
# Expand y_all to have 4 or 5 columns.
y_all_binary = []
actions_count = 4
if actions_count == 4:
for action in y_all:
if action[1] == 0.5:
y_all_binary.append(np.array((action[0],0,0,action[2])))
elif action[1] == 0:
y_all_binary.append(np.array((action[0],1,0,action[2])))
elif action[1] == 1:
y_all_binary.append(np.array((action[0],0,1,action[2])))
# Currently depreceated to have also straight action as third column.
elif actions_count == 5:
for action in y_all:
if action[1] == 0.5:
y_all_binary.append(np.array((action[0],0,1,0,action[2])))
elif action[1] == 0:
y_all_binary.append(np.array((action[0],1,0,0,action[2])))
elif action[1] == 1:
y_all_binary.append(np.array((action[0],0,0,1,action[2])))
y_all_binary = np.array(y_all_binary)
# Replace y_all with binary one
y_all = y_all_binary
if args.disable_brake == 1:
y_all = y_all[:,:-1]
actions_count -= 1
#####################
# Generate train and test splits
#####################
# Get demonstration count.
images_length = [len(i) for i in images]
# Get amount of images to use as test with given args.test_count
test_frames = 0
if args.test_count > 0:
for test in range(args.test_count):
test_frames += images_length[-1-test]
X_train, X_test, y_train, y_test = ravel_splits[0:-test_frames], \
ravel_splits[-test_frames:], y_all[0:-test_frames], y_all[-test_frames:]
#############################################################################
# Action model
#############################################################################
inp = Input(shape=(299,299,3), name = 'image_input')
# with tf.device('/device:GPU:0'):
partial_trained = load_model(partial_model)
partial_trained_input = partial_trained(inp)
partial_trained_input = Conv2D(32,(5,5),activation='relu',padding='same')(partial_trained_input)
partial_trained_input = MaxPooling2D(pool_size=(4, 4))(partial_trained_input)
partial_trained_input = Conv2D(32,(5,5),activation='relu',padding='same')(partial_trained_input)
partial_trained_input = MaxPooling2D(pool_size=(2,2))(partial_trained_input)
partial_trained_input = Flatten()(partial_trained_input)
if args.action_loss_type == 'mse':
partial_trained_input = (Dense(actions_count, activation = 'sigmoid'))(partial_trained_input)
elif args.action_loss_type == 'bce':
partial_trained_input = (Dense(actions_count, activation = 'sigmoid'))(partial_trained_input)
else:
print('Error! Action model loss type not mse or bce.')
quit()
action_model = Model(inputs=inp, outputs=partial_trained_input)
action_model.summary()
#####################
# Loss
#####################
loss_2 = None
if args.action_loss_type == 'bce':
loss_2 = binary_crossentropy
elif args.action_loss_type == 'mse':
loss_2 = mean_squared_error
#####################
# Optimizer
#####################
optimizer_2 = None
# Low Learning rate needed
if args.action_optimizer == 'Adam':
optimizer_2 = Adam(lr=args.action_lr)
elif args.action_optimizer == 'sgd':
optimizer_2 = SGD(lr=args.action_lr)
#####################
# Compile
#####################
action_model.compile(loss=loss_2, optimizer=optimizer_2,metrics=['accuracy'])
#####################
# Fit
#####################
last_val_loss = None
if args.action_epochs > 0:
history = action_model.fit(X_train, y_train, validation_data = (X_test, y_test))
epoch = 1
while last_val_loss != history.history['val_loss'][-1] \
and epoch < args.action_epochs:
print('Epoch: %s/%s' % (epoch+1, args.action_epochs))
last_val_loss = history.history['val_loss'][-1]
history = action_model.fit(X_train, y_train, \
validation_data = (X_test, y_test))
epoch += 1
if history.history['acc'][-1] > 0.999:
print('Accuracy over 0.999. Fit ends.')
break
if epoch == args.action_epochs:
print('Maximum number of epochs reached.')
elif last_val_loss != history.history['val_loss'][-1]:
print('val_loss was same two times.')
#####################
# Save model
#####################
action_save_name = 'actionModel\\action_model_'
action_save_name += '%s_%s_lr%s_epochs%s_disable_brake%s_' % \
(args.action_optimizer, args.action_loss_type, args.action_lr,
args.action_epochs, args.disable_brake)
partial_folder = 'partialModel'
action_save_name += partial_model[len(partial_folder)+1:]
print('Saving model to: ', action_save_name)
action_model.save(action_save_name)
print('Model saved')
return action_save_name | [
"keras.models.load_model",
"keras.layers.core.Dense",
"keras.optimizers.SGD",
"keras.layers.convolutional.MaxPooling2D",
"tensorflow.Session",
"keras.optimizers.Adam",
"tensorflow.logging.set_verbosity",
"keras.models.Model",
"tensorflow.ConfigProto",
"pickle.load",
"numpy.array",
"keras.layer... | [((501, 543), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (525, 543), True, 'import tensorflow as tf\n'), ((586, 602), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (600, 602), True, 'import tensorflow as tf\n'), ((681, 706), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (691, 706), True, 'import tensorflow as tf\n'), ((2865, 2887), 'numpy.array', 'np.array', (['ravel_splits'], {}), '(ravel_splits)\n', (2873, 2887), True, 'import numpy as np\n'), ((3166, 3181), 'numpy.array', 'np.array', (['y_all'], {}), '(y_all)\n', (3174, 3181), True, 'import numpy as np\n'), ((5569, 5615), 'keras.layers.Input', 'Input', ([], {'shape': '(299, 299, 3)', 'name': '"""image_input"""'}), "(shape=(299, 299, 3), name='image_input')\n", (5574, 5615), False, 'from keras.layers import Input\n'), ((5681, 5706), 'keras.models.load_model', 'load_model', (['partial_model'], {}), '(partial_model)\n', (5691, 5706), False, 'from keras.models import Model, load_model\n'), ((6593, 6641), 'keras.models.Model', 'Model', ([], {'inputs': 'inp', 'outputs': 'partial_trained_input'}), '(inputs=inp, outputs=partial_trained_input)\n', (6598, 6641), False, 'from keras.models import Model, load_model\n'), ((1378, 1392), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1389, 1392), False, 'import pickle\n'), ((4633, 4655), 'numpy.array', 'np.array', (['y_all_binary'], {}), '(y_all_binary)\n', (4641, 4655), True, 'import numpy as np\n'), ((5788, 5841), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(32)', '(5, 5)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(32, (5, 5), activation='relu', padding='same')\n", (5794, 5841), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((5890, 5920), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(4, 4)'}), '(pool_size=(4, 4))\n', (5902, 5920), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((5973, 6026), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(32)', '(5, 5)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(32, (5, 5), activation='relu', padding='same')\n", (5979, 6026), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((6075, 6105), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (6087, 6105), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((6157, 6166), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (6164, 6166), False, 'from keras.layers.core import Dense, Flatten\n'), ((7111, 7134), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'args.action_lr'}), '(lr=args.action_lr)\n', (7115, 7134), False, 'from keras.optimizers import Adam, SGD\n'), ((6266, 6308), 'keras.layers.core.Dense', 'Dense', (['actions_count'], {'activation': '"""sigmoid"""'}), "(actions_count, activation='sigmoid')\n", (6271, 6308), False, 'from keras.layers.core import Dense, Flatten\n'), ((7200, 7222), 'keras.optimizers.SGD', 'SGD', ([], {'lr': 'args.action_lr'}), '(lr=args.action_lr)\n', (7203, 7222), False, 'from keras.optimizers import Adam, SGD\n'), ((6411, 6453), 'keras.layers.core.Dense', 'Dense', (['actions_count'], {'activation': '"""sigmoid"""'}), "(actions_count, activation='sigmoid')\n", (6416, 6453), False, 'from keras.layers.core import Dense, Flatten\n'), ((3835, 3873), 'numpy.array', 'np.array', (['(action[0], 0, 0, action[2])'], {}), '((action[0], 0, 0, action[2]))\n', (3843, 3873), True, 'import numpy as np\n'), ((3951, 3989), 'numpy.array', 'np.array', (['(action[0], 1, 0, action[2])'], {}), '((action[0], 1, 0, action[2]))\n', (3959, 3989), True, 'import numpy as np\n'), ((4332, 4373), 'numpy.array', 'np.array', (['(action[0], 0, 1, 0, action[2])'], {}), '((action[0], 0, 1, 0, action[2]))\n', (4340, 4373), True, 'import numpy as np\n'), ((4067, 4105), 'numpy.array', 'np.array', (['(action[0], 0, 1, action[2])'], {}), '((action[0], 0, 1, action[2]))\n', (4075, 4105), True, 'import numpy as np\n'), ((4450, 4491), 'numpy.array', 'np.array', (['(action[0], 1, 0, 0, action[2])'], {}), '((action[0], 1, 0, 0, action[2]))\n', (4458, 4491), True, 'import numpy as np\n'), ((4568, 4609), 'numpy.array', 'np.array', (['(action[0], 0, 0, 1, action[2])'], {}), '((action[0], 0, 0, 1, action[2]))\n', (4576, 4609), True, 'import numpy as np\n')] |
import numpy as np
import vaex
def test_correlation():
df = vaex.example()
# A single column pair
xy = yx = df.correlation('x', 'y')
xy_expected = np.corrcoef(df.x.values, df.y.values)[0,1]
np.testing.assert_array_almost_equal(xy, xy_expected, decimal=5)
np.testing.assert_array_almost_equal(df.correlation('x', 'y'), df.correlation('y', 'x'))
xx = df.correlation('x', 'x')
yy = df.correlation('y', 'y')
zz = df.correlation('z', 'z')
zx = xz = df.correlation('x', 'z')
zy = yz = df.correlation('y', 'z')
# A list of columns
result = df.correlation(x=['x', 'y', 'z'])
expected3 = expected = np.array(([xx, xy, xz],
[yx, yy, yz],
[zx, zy, zz]))
np.testing.assert_array_almost_equal(result, expected)
# A list of columns and a single target
desired = df.correlation(x=['x', 'y', 'z'], y='z')
expected = np.array([xz, yz, zz])
np.testing.assert_array_almost_equal(desired, expected)
result = df.correlation(x=['x', 'y', 'z'], y=['y', 'z'])
assert result.shape == (3, 2)
expected = np.array(([xy, xz],
[yy, yz],
[zy, zz]
))
np.testing.assert_array_almost_equal(result, expected)
result = df.correlation(x=['x', 'y', 'z'], y=['y', 'z'])
result = df.correlation(['x', 'y'], binby='x', shape=4, limits=[-2, 2])
result0 = df.correlation(['x', 'y'], selection=(df.x >= -2) & (df.x < -1))
np.testing.assert_array_almost_equal(result[0], result0)
xar = df.correlation(['x', 'y', 'z'], array_type='xarray')
np.testing.assert_array_almost_equal(xar.data, expected3)
assert xar.dims == ("x", "y")
assert xar.coords['x'].data.tolist() == ['x', 'y', 'z']
assert xar.coords['y'].data.tolist() == ['x', 'y', 'z']
dfc = df.correlation([('x', 'y'), ('x', 'z'), ('y', 'z')])
assert len(dfc) == 3
assert dfc['x'].tolist() == ['x', 'x', 'y']
assert dfc['y'].tolist() == ['y', 'z', 'z']
np.testing.assert_array_almost_equal(dfc['correlation'].tolist(), [xy, xz, yz])
| [
"numpy.corrcoef",
"vaex.example",
"numpy.testing.assert_array_almost_equal",
"numpy.array"
] | [((67, 81), 'vaex.example', 'vaex.example', ([], {}), '()\n', (79, 81), False, 'import vaex\n'), ((214, 278), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['xy', 'xy_expected'], {'decimal': '(5)'}), '(xy, xy_expected, decimal=5)\n', (250, 278), True, 'import numpy as np\n'), ((659, 711), 'numpy.array', 'np.array', (['([xx, xy, xz], [yx, yy, yz], [zx, zy, zz])'], {}), '(([xx, xy, xz], [yx, yy, yz], [zx, zy, zz]))\n', (667, 711), True, 'import numpy as np\n'), ((766, 820), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (802, 820), True, 'import numpy as np\n'), ((936, 958), 'numpy.array', 'np.array', (['[xz, yz, zz]'], {}), '([xz, yz, zz])\n', (944, 958), True, 'import numpy as np\n'), ((963, 1018), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['desired', 'expected'], {}), '(desired, expected)\n', (999, 1018), True, 'import numpy as np\n'), ((1130, 1170), 'numpy.array', 'np.array', (['([xy, xz], [yy, yz], [zy, zz])'], {}), '(([xy, xz], [yy, yz], [zy, zz]))\n', (1138, 1170), True, 'import numpy as np\n'), ((1251, 1305), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (1287, 1305), True, 'import numpy as np\n'), ((1529, 1585), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['result[0]', 'result0'], {}), '(result[0], result0)\n', (1565, 1585), True, 'import numpy as np\n'), ((1655, 1712), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['xar.data', 'expected3'], {}), '(xar.data, expected3)\n', (1691, 1712), True, 'import numpy as np\n'), ((167, 204), 'numpy.corrcoef', 'np.corrcoef', (['df.x.values', 'df.y.values'], {}), '(df.x.values, df.y.values)\n', (178, 204), True, 'import numpy as np\n')] |
import math
import numpy as np
from numpy.linalg import norm
from gensim.models import Word2Vec
import pickle
from tfidf import calculate_tf_query, calculate_idf, calculate_tf_doc
def load_doc_tfidf(path):
with open(path, 'rb') as doc_tfidf_file:
doc_tfidf = pickle.load(doc_tfidf_file)
return doc_tfidf
def cos_similarity_emb(query, doc):
similarity_score = np.dot(query, doc) / (norm(query) * norm(doc))
return similarity_score
def set_doc_embeddings(doc_embeddings, collection):
i = 0
for doc in collection:
doc.embeddings = doc_embeddings[i]
i += 1
return collection
def calculate_query_word_scores(query, terms, collection):
seen_terms_query = []
query_scores = {}
for term in query: # here term is string
if term not in seen_terms_query: # avoid calculating tfidf more than one time for each term in doc
query_scores[term] = (1 + math.log10(calculate_tf_query(term, query))) * calculate_idf(len(collection),
terms.get(term))
seen_terms_query.append(term)
return query_scores
def create_doc_tfidf_file(terms, collection):
for doc in collection: # create a term score vector for each doc
seen_terms = []
for term in doc.content: # here term is string
if term not in seen_terms: # avoid calculating tfidf more than one time for each term in doc
doc.term_scores[term] = (1 + math.log10(calculate_tf_doc(terms.get(term), doc))) * calculate_idf(len(collection), terms.get(term))
seen_terms.append(term)
doc_tfidf = []
for doc in collection:
doc_tfidf.append(doc.term_scores)
return doc_tfidf
def initialize_word2vec(w2v_model_path, terms, collection):
# load word2vec model
w2v_model = Word2Vec.load(w2v_model_path)
doc_tfidf = create_doc_tfidf_file(terms, collection)
# create word2vec vector for each doc (weighted average with tf-idf as word weights)
doc_embeddings = []
for doc in doc_tfidf:
doc_vector = np.zeros(300)
weights_sum = 0
for token, weight in doc.items():
try:
doc_vector += w2v_model.wv[token] * weight
weights_sum += weight
except:
pass
np.seterr(divide='ignore', invalid='ignore')
doc_embeddings.append(doc_vector / weights_sum)
collection = set_doc_embeddings(doc_embeddings, collection)
return collection
def query_word2vec(query, w2v_model_path, terms, collection):
# load word2vec model
w2v_model = Word2Vec.load(w2v_model_path)
# create word2vec vector for query (weighted average with tf-idf as word weights)
query_word_scores = calculate_query_word_scores(query, terms, collection)
query_vector = np.zeros(300)
weights_sum = 0
for token, weight in query_word_scores.items():
query_vector += w2v_model.wv[token] * weight
weights_sum += weight
query_embedding = query_vector / weights_sum
# calculate cosine similarity for query and docs, return k best docs
doc_scores = {}
for doc in collection:
doc_scores[doc] = cos_similarity_emb(query_embedding, doc.embeddings)
doc_scores = dict(sorted(doc_scores.items(), key=lambda item: item[1], reverse=True))
# return top K docs
k = 5
first_K_pairs = {i: doc_scores[i] for i in list(doc_scores)[:k]}
return first_K_pairs
| [
"tfidf.calculate_tf_query",
"numpy.seterr",
"numpy.zeros",
"pickle.load",
"numpy.linalg.norm",
"numpy.dot",
"gensim.models.Word2Vec.load"
] | [((1895, 1924), 'gensim.models.Word2Vec.load', 'Word2Vec.load', (['w2v_model_path'], {}), '(w2v_model_path)\n', (1908, 1924), False, 'from gensim.models import Word2Vec\n'), ((2680, 2709), 'gensim.models.Word2Vec.load', 'Word2Vec.load', (['w2v_model_path'], {}), '(w2v_model_path)\n', (2693, 2709), False, 'from gensim.models import Word2Vec\n'), ((2894, 2907), 'numpy.zeros', 'np.zeros', (['(300)'], {}), '(300)\n', (2902, 2907), True, 'import numpy as np\n'), ((273, 300), 'pickle.load', 'pickle.load', (['doc_tfidf_file'], {}), '(doc_tfidf_file)\n', (284, 300), False, 'import pickle\n'), ((383, 401), 'numpy.dot', 'np.dot', (['query', 'doc'], {}), '(query, doc)\n', (389, 401), True, 'import numpy as np\n'), ((2143, 2156), 'numpy.zeros', 'np.zeros', (['(300)'], {}), '(300)\n', (2151, 2156), True, 'import numpy as np\n'), ((2386, 2430), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (2395, 2430), True, 'import numpy as np\n'), ((405, 416), 'numpy.linalg.norm', 'norm', (['query'], {}), '(query)\n', (409, 416), False, 'from numpy.linalg import norm\n'), ((419, 428), 'numpy.linalg.norm', 'norm', (['doc'], {}), '(doc)\n', (423, 428), False, 'from numpy.linalg import norm\n'), ((941, 972), 'tfidf.calculate_tf_query', 'calculate_tf_query', (['term', 'query'], {}), '(term, query)\n', (959, 972), False, 'from tfidf import calculate_tf_query, calculate_idf, calculate_tf_doc\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.