code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
class Perceptron:
def __init__(self, number_of_inputs, learning_rate):
self.weights = np.random.rand(1, number_of_inputs + 1)[0]
self.learning_rate = learning_rate
"""A step function where non-negative values are returned by a 1 and negative values are returned by a -1"""
def activate(self, z):
if z >= 0:
return 1
else:
return -1
def feed_forward(self, input_values):
inputs = np.array([
input_values[0], input_values[1], -1
])
z = inputs.dot(self.weights.transpose())
return self.activate(z)
def update_weights(self, actual_x, error):
x = np.array([
actual_x[0], actual_x[1], -1
])
self.weights += self.learning_rate*error*x
"""
Below code simulates a perceptron learning to act as an OR gate.
(-1) represents 0
(+1) represents 1
"""
if __name__ == "__main__":
print("\nPerceptron learning the OR gate functionality\n")
np.random.seed(1111)
perceptron = Perceptron(2, 0.01)
training_x = np.array([[-1, -1], [-1, 1], [1, -1], [1, 1]])
training_y = np.array([[-1], [1], [1], [1]])
for epoch in range(25):
total_error = 0
for example in range(len(training_x)):
y_predicted = perceptron.feed_forward(training_x[example])
y_expected = training_y[example][0]
error = y_expected - y_predicted
total_error += error
perceptron.update_weights(training_x[example], error)
print("epoch " + str(epoch) + " Total Error " + str(total_error))
if total_error == 0:
break
print("Final Weights : " + str(perceptron.weights))
"Testing final weights"
print("\nTesting final weights")
print('Input [-1, -1] Output ' + str(perceptron.feed_forward([-1, -1])))
print('Input [-1, +1] Output ' + str(perceptron.feed_forward([-1, +1])))
print('Input [+1, -1] Output ' + str(perceptron.feed_forward([+1, -1])))
print('Input [+1, +1] Output ' + str(perceptron.feed_forward([+1, +1])))
| [
"numpy.array",
"numpy.random.rand",
"numpy.random.seed"
] | [((1013, 1033), 'numpy.random.seed', 'np.random.seed', (['(1111)'], {}), '(1111)\n', (1027, 1033), True, 'import numpy as np\n'), ((1088, 1134), 'numpy.array', 'np.array', (['[[-1, -1], [-1, 1], [1, -1], [1, 1]]'], {}), '([[-1, -1], [-1, 1], [1, -1], [1, 1]])\n', (1096, 1134), True, 'import numpy as np\n'), ((1152, 1183), 'numpy.array', 'np.array', (['[[-1], [1], [1], [1]]'], {}), '([[-1], [1], [1], [1]])\n', (1160, 1183), True, 'import numpy as np\n'), ((482, 530), 'numpy.array', 'np.array', (['[input_values[0], input_values[1], -1]'], {}), '([input_values[0], input_values[1], -1])\n', (490, 530), True, 'import numpy as np\n'), ((694, 734), 'numpy.array', 'np.array', (['[actual_x[0], actual_x[1], -1]'], {}), '([actual_x[0], actual_x[1], -1])\n', (702, 734), True, 'import numpy as np\n'), ((119, 158), 'numpy.random.rand', 'np.random.rand', (['(1)', '(number_of_inputs + 1)'], {}), '(1, number_of_inputs + 1)\n', (133, 158), True, 'import numpy as np\n')] |
#
# grid_spline.py
#
# Code for one-dimensional cubic splines on a
# uniform grid, including analytic slope, curvature,
# and extremum evaluation.
#
# Most convenient interface is via GridSpline class,
# which encapsulates the low-level routines.
#
# <NAME>, <NAME>, 2010-2014
#
import numpy as n
def tri_diag(a, b, c, r):
"""
Tri-diagonal solver
"""
ndim = len(b)
alpha = a.copy()
beta = b.copy()
for i in range(1,ndim):
beta[i] = b[i] - alpha[i] * c[i-1] / beta[i-1]
gamma = c / beta
y = r.copy()
y[0] = r[0] / beta[0]
for i in range(1,ndim):
y[i] = (r[i] - alpha[i] * y[i-1]) / beta[i]
x = y.copy()
x[ndim-1] = y[ndim-1]
for i in range(ndim-2, -1, -1):
x[i] = y[i] - gamma[i] * x[i+1]
return x
def spline_get_ms(y):
"""
Compute knot slopes to initialize spline
"""
bign = len(y) - 1
m = 0. * y
m[0] = 2.0 * y[1] - 1.5 * y[0] - 0.5 * y[2]
m[bign] = 1.5 * y[bign] + 0.5 * y[bign-2] - 2.0 * y[bign-1]
r = 3.0 * (y[2:] - y[:-2])
r[0] = r[0] - m[0]
r[bign-2] = r[bign-2] - m[bign]
on_diag = n.zeros(bign-1) + 4.0
off_diag = n.zeros(bign-1) + 1.0
m[1:bign] = tri_diag(off_diag, on_diag, off_diag, r)
return m
def spline_get_val(y, m, x):
"""
Evaluate spline value at positions x
"""
intervals = len(y) - 1
i = n.int32(x) + 1
# (the following is a hack to keep the upper
# bound in the valid interval range:)
i = i - i / (intervals + 1)
d1 = x - i + 1.
d2 = d1 - 1.0
spline_val = (y[i] * d1**3 - y[i-1] * d2**3
+ (m[i] - 3.0 * y[i]) * d1**2 * d2
+ (m[i-1] + 3.0 * y[i-1]) * d1 * d2**2)
return spline_val
def spline_get_slope(y, m, x):
"""
Evaluate spline slope at positions x
"""
intervals = len(y) - 1
i = n.int32(x) + 1
# (the following is a hack to keep the upper
# bound in the valid interval range:)
i = i - i / (intervals + 1)
d1 = x - i + 1.
d2 = d1 - 1.0
spline_slope = (m[i] * d1**2 + m[i-1] * d2**2 +
(2.0*m[i] + 2.0*m[i-1] - 6.0*y[i] + 6.0*y[i-1]) * d1 * d2)
return spline_slope
def spline_get_curv(y, m, x):
"""
Evaluate spline curvature at positions x
"""
intervals = len(y) - 1
i = n.int32(x) + 1
# (the following is a hack to keep the upper
# bound in the valid interval range:)
i = i - i / (intervals + 1)
d1 = x - i + 1.
d2 = d1 - 1.0
spline_curv = (2.0 * m[i] * d1 + 2.0 * m[i-1] * d2 +
(2.0*m[i] + 2.0*m[i-1] - 6.0*y[i] + 6.0*y[i-1]) * (d1 + d2))
return spline_curv
def spline_get_max(y, m):
"""
Find positions of analytic maxima of spline
"""
bign = len(y) - 1
xval = n.zeros(bign) - 1.0
#Quadratic derivative coefficients in the intervals:
a = (3.0 * (m[0:bign] + (n.roll(m, -1))[0:bign])
+ 6.0 * (y[0:bign] - (n.roll(y, -1))[0:bign]))
b = (-2.0 * (2.0 * m[0:bign] + (n.roll(m, -1))[0:bign]
+ 3.0 * (y[0:bign] - (n.roll(y, -1))[0:bign])))
c = (m[0:bign])
# Discriminant:
d = b**2 - 4.0 * a * c
# Find any linear-root maxima:
lroots = (n.where((a == 0) * (b < 0)))[0]
if len(lroots) > 0:
xval[lroots] = -c[lroots] / b[lroots]
# Find any quadratic-root maxima:
qroots = (n.where((a != 0) * (d > 0)))[0]
if len(qroots) > 0:
xval[qroots] = -0.5 * (b[qroots] + n.sqrt(d[qroots])) / a[qroots]
# Find roots that are within the necessary interval bounds:
roots = (n.where((xval >= 0.0) * (xval < 1.0)))[0]
if len(roots) <= 0:
return n.asarray([])
# Transform root values to global x-coordinate and return.
xval = xval + n.arange(bign)
xval = xval[roots]
return xval
# OOP interface to this business:
class GridSpline:
"""
Initialize a spline object for uniformly gridded 1D data.
Calling syntax:
GS = GridSpline(y)
where y is an array of values to be splined.
The abscissa for the spline is taken to be a zero-based
vector of integers of length equal to the y-vector.
<NAME>, U. of Utah, 2010-2014
"""
def __init__(self, y):
self.y = y.copy()
self.ms = spline_get_ms(self.y)
def get_val(self, x):
"""
Return spline evaluated at abscissa positions x.
"""
return spline_get_val(self.y, self.ms, x)
def get_slope(self, x):
"""
Return analytic derivative of spline evaluated at
abscissa positions x.
"""
return spline_get_slope(self.y, self.ms, x)
def get_curv(self, x):
"""
Return analytic curvature of spline evaluated at
abscissa positions x.
"""
return spline_get_curv(self.y, self.ms, x)
def get_max(self):
"""
Return analytically determined locations of maxima
of spline over domain of original values.
"""
return spline_get_max(self.y, self.ms)
def get_min(self):
"""
Return analytically determined locations of minima
of spline over domain of original values.
"""
return spline_get_max(-self.y, -self.ms)
| [
"numpy.roll",
"numpy.sqrt",
"numpy.where",
"numpy.int32",
"numpy.asarray",
"numpy.zeros",
"numpy.arange"
] | [((1120, 1137), 'numpy.zeros', 'n.zeros', (['(bign - 1)'], {}), '(bign - 1)\n', (1127, 1137), True, 'import numpy as n\n'), ((1157, 1174), 'numpy.zeros', 'n.zeros', (['(bign - 1)'], {}), '(bign - 1)\n', (1164, 1174), True, 'import numpy as n\n'), ((1371, 1381), 'numpy.int32', 'n.int32', (['x'], {}), '(x)\n', (1378, 1381), True, 'import numpy as n\n'), ((1852, 1862), 'numpy.int32', 'n.int32', (['x'], {}), '(x)\n', (1859, 1862), True, 'import numpy as n\n'), ((2310, 2320), 'numpy.int32', 'n.int32', (['x'], {}), '(x)\n', (2317, 2320), True, 'import numpy as n\n'), ((2770, 2783), 'numpy.zeros', 'n.zeros', (['bign'], {}), '(bign)\n', (2777, 2783), True, 'import numpy as n\n'), ((3196, 3223), 'numpy.where', 'n.where', (['((a == 0) * (b < 0))'], {}), '((a == 0) * (b < 0))\n', (3203, 3223), True, 'import numpy as n\n'), ((3350, 3377), 'numpy.where', 'n.where', (['((a != 0) * (d > 0))'], {}), '((a != 0) * (d > 0))\n', (3357, 3377), True, 'import numpy as n\n'), ((3557, 3594), 'numpy.where', 'n.where', (['((xval >= 0.0) * (xval < 1.0))'], {}), '((xval >= 0.0) * (xval < 1.0))\n', (3564, 3594), True, 'import numpy as n\n'), ((3638, 3651), 'numpy.asarray', 'n.asarray', (['[]'], {}), '([])\n', (3647, 3651), True, 'import numpy as n\n'), ((3733, 3747), 'numpy.arange', 'n.arange', (['bign'], {}), '(bign)\n', (3741, 3747), True, 'import numpy as n\n'), ((2876, 2889), 'numpy.roll', 'n.roll', (['m', '(-1)'], {}), '(m, -1)\n', (2882, 2889), True, 'import numpy as n\n'), ((2931, 2944), 'numpy.roll', 'n.roll', (['y', '(-1)'], {}), '(y, -1)\n', (2937, 2944), True, 'import numpy as n\n'), ((2992, 3005), 'numpy.roll', 'n.roll', (['m', '(-1)'], {}), '(m, -1)\n', (2998, 3005), True, 'import numpy as n\n'), ((3449, 3466), 'numpy.sqrt', 'n.sqrt', (['d[qroots]'], {}), '(d[qroots])\n', (3455, 3466), True, 'import numpy as n\n'), ((3054, 3067), 'numpy.roll', 'n.roll', (['y', '(-1)'], {}), '(y, -1)\n', (3060, 3067), True, 'import numpy as n\n')] |
# 运行参数: --model_dir=E:\model\official_myDataset --data_dir=E:\data\my_dataset --train_epochs=10 --distribution_strategy=one_device --num_gpus=1 --download
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Runs a simple model on the MNIST dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
import tensorflow_datasets as tfds
from official.utils.flags import core as flags_core
from official.utils.misc import distribution_utils
from official.utils.misc import model_helpers
from official.vision.image_classification.resnet import common
from sklearn.metrics import f1_score, precision_recall_fscore_support
import numpy as np
import json
FLAGS = flags.FLAGS
def build_model():
"""Constructs the ML model used to predict handwritten digits."""
image12 = tf.keras.layers.Input(shape=(12, 12, 1), name='image12')
image6 = tf.keras.layers.Input(shape=(6, 6, 1), name='image6')
image3 = tf.keras.layers.Input(shape=(3, 3, 1), name='image3')
road = tf.keras.layers.Input(shape=(3, 3, 1), name='road')
roadExt = tf.keras.layers.Input(shape=(3, 3, 1), name='roadExt')
y = tf.keras.layers.Conv2D(filters=8,
kernel_size=3,
padding='same',
activation='relu')(image12)
y = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
strides=(2, 2),
padding='same')(y)
y = tf.keras.layers.concatenate([y, image6], axis=-1)
y = tf.keras.layers.Conv2D(filters=12,
kernel_size=3,
padding='same',
activation='relu')(y)
y = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
strides=(2, 2),
padding='same')(y)
y = tf.keras.layers.concatenate([y, image3, road, roadExt], axis=-1)
y = tf.keras.layers.Conv2D(filters=16,
kernel_size=3,
padding='same',
activation='relu')(y)
y = tf.keras.layers.MaxPooling2D(pool_size=(3, 3),
strides=(3, 3),
padding='same')(y)
y = tf.keras.layers.Flatten()(y)
# y = tf.keras.layers.Dense(1024, activation='relu')(y)
# y = tf.keras.layers.Dropout(0.4)(y)
probs = tf.keras.layers.Dense(3, activation='softmax')(y)
model = tf.keras.models.Model([image12, image6, image3, road, roadExt], probs, name='my_dataset')
return model
def run(flags_obj, datasets_override=None, strategy_override=None):
"""Run MNIST model training and eval loop using native Keras APIs.
Args:
flags_obj: An object containing parsed flag values.
datasets_override: A pair of `tf.data.Dataset` objects to train the model,
representing the train and test sets.
strategy_override: A `tf.distribute.Strategy` object to use for model.
Returns:
Dictionary of training and eval stats.
"""
strategy = strategy_override or distribution_utils.get_distribution_strategy(
distribution_strategy=flags_obj.distribution_strategy,
num_gpus=flags_obj.num_gpus,
tpu_address=flags_obj.tpu)
strategy_scope = distribution_utils.get_strategy_scope(strategy)
mnist = tfds.builder('traffic_state_predict', data_dir=flags_obj.data_dir)
if flags_obj.download:
mnist.download_and_prepare()
# inputs = ["image12"]
# outputs = ["label"]
# mnist = mnist.map(lambda ex: ({i: ex[i] for i in inputs}, {o: ex[o] for o in outputs}))
mnist_train, mnist_vali, mnist_test = datasets_override or mnist.as_dataset(
split=['train', 'test','predict'],
# decoders={'image': decode_image()}, # pylint: disable=no-value-for-parameter
# as_supervised=True
)
# train_input_dataset = mnist_train.cache().repeat(10000).shuffle(buffer_size=100000).batch(128)
train_input_dataset = mnist_train.cache().batch(300)
eval_input_dataset = mnist_vali.cache().batch(300)
test_input_dataset = mnist_test.cache().batch(600)
with strategy_scope:
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
0.005, decay_steps=100000, decay_rate=0.96)
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
model = build_model()
model.compile(
optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'],
# metrics=weighted_fi_score,
)
checkpoint = tf.train.Checkpoint(myModel=model,myOptimizer=optimizer)
manager = tf.train.CheckpointManager(checkpoint, directory='./save0', checkpoint_name='model.ckpt', max_to_keep=5)
manager.restore_or_initialize()
best_score = 0.0
res = {}
for indx, train_data in enumerate(train_input_dataset.as_numpy_iterator()):
print(model.train_on_batch(
x=[train_data['image12'], train_data['image6'], train_data['image3'], train_data['road'],train_data['roadExt']],
y=train_data['label'],
class_weight={0: 1, 1: 3, 2: 6}
# class_weight={0: 2, 1: 8}
))
if indx % 200 == 0:
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
res_vali = {}
truth = []
predict = []
for indx, vali_data in enumerate(eval_input_dataset.as_numpy_iterator()):
# print(model.test_on_batch(x=[vali_data['image12'], vali_data['image6'], vali_data['image3'], vali_data['road'],vali_data['roadExt']], y=vali_data['label']))
outputs = model.predict_on_batch(x=[vali_data['image12'], vali_data['image6'], vali_data['image3'], vali_data['road'],vali_data['roadExt']])
truth = truth+list(vali_data['label'])
predict = predict+list(np.argmax(outputs, axis=-1))
for fname,output in zip(vali_data['fname'],list(outputs)):
res_vali[fname.decode()] = ','.join([str(itm) for itm in output])
json.dump(res_vali, open('res_vali.json', 'w', encoding='utf-8'))
p_class, r_class, f_class, support_micro = precision_recall_fscore_support(truth,predict,labels=[0, 1,2])
print('vali scores:')
print(f_class)
print(0.2 * f_class[0] + 0.2 * f_class[1] + 0.6 * f_class[2],best_score)
##################################################################################
truth_train = []
predict_train = []
res_train = {}
for indx, train_data in enumerate(train_input_dataset.as_numpy_iterator()):
outputs_train = model.predict_on_batch(
x=[train_data['image12'], train_data['image6'], train_data['image3'], train_data['road'],
train_data['roadExt']])
truth_train = truth_train + list(train_data['label'])
predict_train = predict_train + list(np.argmax(outputs_train, axis=-1))
for fname,output in zip(train_data['fname'],list(outputs_train)):
res_train[fname.decode()] = ','.join([str(itm) for itm in output])
# if indx>=100:
# break
json.dump(res_train, open('res_train.json', 'w', encoding='utf-8'))
p_class_train, r_class_train, f_class_train, support_micro_train = precision_recall_fscore_support(truth_train, predict_train, labels=[0, 1, 2])
print('train scores:')
print(f_class_train)
print(0.2 * f_class_train[0] + 0.2 * f_class_train[1] + 0.6 * f_class_train[2], best_score)
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
if 0.2 * f_class[0] + 0.2 * f_class[1] + 0.6 * f_class[2] > best_score:
manager.save()
res_test = {}
export_path = os.path.join(flags_obj.model_dir, 'saved_model')
model.save(export_path, include_optimizer=False)
for indx, test_data in enumerate(test_input_dataset.as_numpy_iterator()):
outputs_test = model.predict_on_batch(x=[test_data['image12'], test_data['image6'], test_data['image3'], test_data['road'],test_data['roadExt']])
for fname, output in zip(test_data['fname'], list(outputs_test)):
res_test[fname.decode()] = ','.join([str(itm) for itm in output])
# for ind, fname in enumerate(test_data['fname']):
# res[fname.decode()] = str(np.argmax(outputs_test, axis=-1)[ind])
json.dump(res_test,open('res_test.json','w',encoding='utf-8'))
# json.dump(res,open('res.json','w',encoding='utf-8'))
best_score = 0.2 * f_class[0] + 0.2 * f_class[1] + 0.6 * f_class[2]
return 'over'
def define_mnist_flags():
"""Define command line flags for MNIST model."""
flags_core.define_base(
clean=True,
num_gpu=True,
train_epochs=True,
epochs_between_evals=True,
distribution_strategy=True)
flags_core.define_device()
flags_core.define_distribution()
flags.DEFINE_bool('download', False,
'Whether to download data to `--data_dir`.')
# FLAGS.set_default('batch_size', 16)
def main(_):
model_helpers.apply_clean(FLAGS)
stats = run(flags.FLAGS)
logging.info('Run stats:\n%s', stats)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
define_mnist_flags()
app.run(main)
| [
"official.utils.flags.core.define_distribution",
"tensorflow.train.Checkpoint",
"official.utils.flags.core.define_device",
"absl.logging.info",
"tensorflow.keras.layers.Dense",
"official.utils.flags.core.define_base",
"tensorflow.keras.layers.Input",
"official.utils.misc.distribution_utils.get_distrib... | [((1583, 1639), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(12, 12, 1)', 'name': '"""image12"""'}), "(shape=(12, 12, 1), name='image12')\n", (1604, 1639), True, 'import tensorflow as tf\n'), ((1653, 1706), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(6, 6, 1)', 'name': '"""image6"""'}), "(shape=(6, 6, 1), name='image6')\n", (1674, 1706), True, 'import tensorflow as tf\n'), ((1720, 1773), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(3, 3, 1)', 'name': '"""image3"""'}), "(shape=(3, 3, 1), name='image3')\n", (1741, 1773), True, 'import tensorflow as tf\n'), ((1785, 1836), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(3, 3, 1)', 'name': '"""road"""'}), "(shape=(3, 3, 1), name='road')\n", (1806, 1836), True, 'import tensorflow as tf\n'), ((1851, 1905), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(3, 3, 1)', 'name': '"""roadExt"""'}), "(shape=(3, 3, 1), name='roadExt')\n", (1872, 1905), True, 'import tensorflow as tf\n'), ((2273, 2322), 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[y, image6]'], {'axis': '(-1)'}), '([y, image6], axis=-1)\n', (2300, 2322), True, 'import tensorflow as tf\n'), ((2685, 2749), 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[y, image3, road, roadExt]'], {'axis': '(-1)'}), '([y, image3, road, roadExt], axis=-1)\n', (2712, 2749), True, 'import tensorflow as tf\n'), ((3320, 3414), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', (['[image12, image6, image3, road, roadExt]', 'probs'], {'name': '"""my_dataset"""'}), "([image12, image6, image3, road, roadExt], probs, name\n ='my_dataset')\n", (3341, 3414), True, 'import tensorflow as tf\n'), ((4165, 4212), 'official.utils.misc.distribution_utils.get_strategy_scope', 'distribution_utils.get_strategy_scope', (['strategy'], {}), '(strategy)\n', (4202, 4212), False, 'from official.utils.misc import distribution_utils\n'), ((4226, 4292), 'tensorflow_datasets.builder', 'tfds.builder', (['"""traffic_state_predict"""'], {'data_dir': 'flags_obj.data_dir'}), "('traffic_state_predict', data_dir=flags_obj.data_dir)\n", (4238, 4292), True, 'import tensorflow_datasets as tfds\n'), ((10052, 10178), 'official.utils.flags.core.define_base', 'flags_core.define_base', ([], {'clean': '(True)', 'num_gpu': '(True)', 'train_epochs': '(True)', 'epochs_between_evals': '(True)', 'distribution_strategy': '(True)'}), '(clean=True, num_gpu=True, train_epochs=True,\n epochs_between_evals=True, distribution_strategy=True)\n', (10074, 10178), True, 'from official.utils.flags import core as flags_core\n'), ((10220, 10246), 'official.utils.flags.core.define_device', 'flags_core.define_device', ([], {}), '()\n', (10244, 10246), True, 'from official.utils.flags import core as flags_core\n'), ((10251, 10283), 'official.utils.flags.core.define_distribution', 'flags_core.define_distribution', ([], {}), '()\n', (10281, 10283), True, 'from official.utils.flags import core as flags_core\n'), ((10288, 10373), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""download"""', '(False)', '"""Whether to download data to `--data_dir`."""'], {}), "('download', False,\n 'Whether to download data to `--data_dir`.')\n", (10305, 10373), False, 'from absl import flags\n'), ((10453, 10485), 'official.utils.misc.model_helpers.apply_clean', 'model_helpers.apply_clean', (['FLAGS'], {}), '(FLAGS)\n', (10478, 10485), False, 'from official.utils.misc import model_helpers\n'), ((10519, 10556), 'absl.logging.info', 'logging.info', (['"""Run stats:\n%s"""', 'stats'], {}), "('Run stats:\\n%s', stats)\n", (10531, 10556), False, 'from absl import logging\n'), ((10590, 10625), 'absl.logging.set_verbosity', 'logging.set_verbosity', (['logging.INFO'], {}), '(logging.INFO)\n', (10611, 10625), False, 'from absl import logging\n'), ((10655, 10668), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (10662, 10668), False, 'from absl import app\n'), ((1915, 2003), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': '(8)', 'kernel_size': '(3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=8, kernel_size=3, padding='same', activation\n ='relu')\n", (1937, 2003), True, 'import tensorflow as tf\n'), ((2109, 2187), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), strides=(2, 2), padding='same')\n", (2137, 2187), True, 'import tensorflow as tf\n'), ((2332, 2420), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': '(12)', 'kernel_size': '(3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=12, kernel_size=3, padding='same',\n activation='relu')\n", (2354, 2420), True, 'import tensorflow as tf\n'), ((2521, 2599), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), strides=(2, 2), padding='same')\n", (2549, 2599), True, 'import tensorflow as tf\n'), ((2759, 2847), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': '(16)', 'kernel_size': '(3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=16, kernel_size=3, padding='same',\n activation='relu')\n", (2781, 2847), True, 'import tensorflow as tf\n'), ((2948, 3026), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(3, 3)', 'padding': '"""same"""'}), "(pool_size=(3, 3), strides=(3, 3), padding='same')\n", (2976, 3026), True, 'import tensorflow as tf\n'), ((3113, 3138), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (3136, 3138), True, 'import tensorflow as tf\n'), ((3257, 3303), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(3)'], {'activation': '"""softmax"""'}), "(3, activation='softmax')\n", (3278, 3303), True, 'import tensorflow as tf\n'), ((3962, 4126), 'official.utils.misc.distribution_utils.get_distribution_strategy', 'distribution_utils.get_distribution_strategy', ([], {'distribution_strategy': 'flags_obj.distribution_strategy', 'num_gpus': 'flags_obj.num_gpus', 'tpu_address': 'flags_obj.tpu'}), '(distribution_strategy=\n flags_obj.distribution_strategy, num_gpus=flags_obj.num_gpus,\n tpu_address=flags_obj.tpu)\n', (4006, 4126), False, 'from official.utils.misc import distribution_utils\n'), ((5069, 5163), 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'tf.keras.optimizers.schedules.ExponentialDecay', (['(0.005)'], {'decay_steps': '(100000)', 'decay_rate': '(0.96)'}), '(0.005, decay_steps=100000,\n decay_rate=0.96)\n', (5115, 5163), True, 'import tensorflow as tf\n'), ((5193, 5244), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'lr_schedule'}), '(learning_rate=lr_schedule)\n', (5217, 5244), True, 'import tensorflow as tf\n'), ((5510, 5567), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'myModel': 'model', 'myOptimizer': 'optimizer'}), '(myModel=model, myOptimizer=optimizer)\n', (5529, 5567), True, 'import tensorflow as tf\n'), ((5585, 5694), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['checkpoint'], {'directory': '"""./save0"""', 'checkpoint_name': '"""model.ckpt"""', 'max_to_keep': '(5)'}), "(checkpoint, directory='./save0', checkpoint_name\n ='model.ckpt', max_to_keep=5)\n", (5611, 5694), True, 'import tensorflow as tf\n'), ((7208, 7273), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['truth', 'predict'], {'labels': '[0, 1, 2]'}), '(truth, predict, labels=[0, 1, 2])\n', (7239, 7273), False, 'from sklearn.metrics import f1_score, precision_recall_fscore_support\n'), ((8446, 8523), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['truth_train', 'predict_train'], {'labels': '[0, 1, 2]'}), '(truth_train, predict_train, labels=[0, 1, 2])\n', (8477, 8523), False, 'from sklearn.metrics import f1_score, precision_recall_fscore_support\n'), ((9006, 9054), 'os.path.join', 'os.path.join', (['flags_obj.model_dir', '"""saved_model"""'], {}), "(flags_obj.model_dir, 'saved_model')\n", (9018, 9054), False, 'import os\n'), ((6885, 6912), 'numpy.argmax', 'np.argmax', (['outputs'], {'axis': '(-1)'}), '(outputs, axis=-1)\n', (6894, 6912), True, 'import numpy as np\n'), ((8023, 8056), 'numpy.argmax', 'np.argmax', (['outputs_train'], {'axis': '(-1)'}), '(outputs_train, axis=-1)\n', (8032, 8056), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import sys
import os
import peakutils
from scipy import signal
def detectKanji(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(img_gray, (11, 11), 0)
ret3, img_thres = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
height, width = img_thres.shape[:2]
print("列:", height, width)
shadow_height = [0] * height
global kanji_id
for x in range(0, (height - 1)):
for y in range(0, (width - 1)):
if img_thres[x][y] == 0:
shadow_height[x] += 1
shadow_height=np.array(shadow_height)
y1 = signal.savgol_filter(shadow_height, 151, 5)
# for x in range (len(y1)):
# cv2.line(img,(0,x),(int(y1[x]),x),(0,255,0),1)
indexes1= peakutils.indexes(y1, thres=0.1, min_dist=80)
lineNum=0
lines=[]
for index in indexes1:
lineNum+=1
# cv2.putText(img, str(lineNum), (int(y1[index]),index), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 2)
startHeight=index-75
if startHeight<0:startHeight=0
endHeight=index+75
rect = img[startHeight:endHeight, 0:width]
lines.append(rect)
print("line num:"+str(len(indexes1)))
return lines
if __name__ == "__main__":
filename=sys.argv[1]
img=cv2.imread(filename)
lines= detectKanji(img)
count=1
for char in lines:
path=os.path.join('./','test')
if os.path.exists(path) is False:
os.mkdir(path)
cv2.imwrite(os.path.join(path,'%2d0.jpg'%count),char)
count+=1
cv2.namedWindow("window1",cv2.WINDOW_NORMAL)
cv2.imshow("window1",img)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"os.path.exists",
"cv2.threshold",
"os.path.join",
"scipy.signal.savgol_filter",
"cv2.imshow",
"peakutils.indexes",
"numpy.array",
"cv2.waitKey",
"cv2.destroyAllWindows",
"os.mkdir",
"cv2.cvtColor",
"cv2.GaussianBlur",
"cv2.imread",
"cv2.namedWindow"
] | [((131, 168), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (143, 168), False, 'import cv2\n'), ((180, 219), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img_gray', '(11, 11)', '(0)'], {}), '(img_gray, (11, 11), 0)\n', (196, 219), False, 'import cv2\n'), ((242, 306), 'cv2.threshold', 'cv2.threshold', (['blur', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (255, 306), False, 'import cv2\n'), ((606, 629), 'numpy.array', 'np.array', (['shadow_height'], {}), '(shadow_height)\n', (614, 629), True, 'import numpy as np\n'), ((639, 682), 'scipy.signal.savgol_filter', 'signal.savgol_filter', (['shadow_height', '(151)', '(5)'], {}), '(shadow_height, 151, 5)\n', (659, 682), False, 'from scipy import signal\n'), ((786, 831), 'peakutils.indexes', 'peakutils.indexes', (['y1'], {'thres': '(0.1)', 'min_dist': '(80)'}), '(y1, thres=0.1, min_dist=80)\n', (803, 831), False, 'import peakutils\n'), ((1295, 1315), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (1305, 1315), False, 'import cv2\n'), ((1571, 1616), 'cv2.namedWindow', 'cv2.namedWindow', (['"""window1"""', 'cv2.WINDOW_NORMAL'], {}), "('window1', cv2.WINDOW_NORMAL)\n", (1586, 1616), False, 'import cv2\n'), ((1620, 1646), 'cv2.imshow', 'cv2.imshow', (['"""window1"""', 'img'], {}), "('window1', img)\n", (1630, 1646), False, 'import cv2\n'), ((1650, 1664), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1661, 1664), False, 'import cv2\n'), ((1669, 1692), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1690, 1692), False, 'import cv2\n'), ((1392, 1418), 'os.path.join', 'os.path.join', (['"""./"""', '"""test"""'], {}), "('./', 'test')\n", (1404, 1418), False, 'import os\n'), ((1429, 1449), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1443, 1449), False, 'import os\n'), ((1472, 1486), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (1480, 1486), False, 'import os\n'), ((1507, 1545), 'os.path.join', 'os.path.join', (['path', "('%2d0.jpg' % count)"], {}), "(path, '%2d0.jpg' % count)\n", (1519, 1545), False, 'import os\n')] |
"""
Collection of approximation methods
Global methods are used on a distribution as a wrapper. Local
function are used by the graph-module as part of calculations.
Functions
---------
pdf Probability density function (local)
pdf_full Probability density function (global)
ppf Inverse CDF (local)
inv Inverse CDF (global)
mom Raw statistical moments (global)
find_interior_point Find an interior point (global)
"""
import numpy
import chaospy.quad
from .baseclass import Dist
def pdf(dist, x, G, eps=1.e-7, verbose=False,
retall=False):
"""
Calculate the probability density function locally.
Parameters
----------
dist : Dist
Distribution in question. May not be an advanced variable.
x : numpy.ndarray
Location coordinates. Requires that x.shape=(len(dist), K).
G : Graph
The chaospy state of the distribution calculations.
eps : float
Acceptable error level for the approximations
retall : bool
If True return Graph with the next calculation state with the
approximation.
Returns
-------
out[, G]
out : numpy.ndarray
Local probability density function with out.shape=x.shape.
To calculate actual density function: numpy.prod(out, 0)
G : Graph
The chaospy calculation state after approximation is complete.
"""
x = numpy.asfarray(x)
lo,up = numpy.min(x), numpy.max(x)
mu = .5*(lo+up)
eps = numpy.where(x<mu, eps, -eps)
G.__call__ = G.fwd_call
out = numpy.empty(x.shape)
for d in range(len(dist)):
x[d] += eps[d]
out[d] = G.copy()(x.copy(), dist)[d]
x[d] -= eps[d]
out = numpy.abs((out-G(x.copy(), dist))/eps)
G.__call__ = G.pdf_call
if retall:
return out, G
return out
def pdf_full(dist, x, eps=1.e-7, verbose=False,
retall=False):
"""
Calculate the probability density function globaly.
Parameters
----------
dist : Dist
Distribution in question. May not be an advanced variable.
x : numpy.ndarray
Location coordinates. Requires that x.shape=(len(dist), K).
eps : float
Acceptable error level for the approximations
retall : bool
If True return Graph with the next calculation state with the
approximation.
Returns
-------
out[, G]
out : numpy.ndarray
Global probability density function with out.shape=x.shape.
To calculate actual density function: numpy.prod(out, 0)
G : Graph
The chaospy calculation state after approximation is complete.
"""
dim = len(dist)
x = numpy.asfarray(x)
shape = x.shape
x = x.reshape(dim, x.size/dim)
xdx = x.copy()
y,G = dist.fwd(x,retall=True)
lo,up = dist.range(x)
mu = .5*(lo+up)
out = numpy.empty(shape)
eps = eps*numpy.ones(dim)
for i in range(dim):
eps_ = numpy.where(x[i]<mu[i], eps[i], -eps[i])
xdx[i] += eps_
out[i] = numpy.abs(dist.fwd(xdx)[i]-y[i])/eps[i]
xdx[i] -= eps_
if retall:
return out, G
return out
def inv(dist, q, maxiter=100, tol=1e-5, retall=False,
verbose=False):
"""
Calculate the approximation of the point percentile function.
Parameters
----------
dist : Dist
Distribution to estimate ppf.
q : numpy.ndarray
Input values. All values must be on [0,1] and
`q.shape==(dim,size)` where dim is the number of dimensions in
dist and size is the number of values to calculate
simultaneously.
maxiter : int
The maximum number of iterations allowed before aborting
tol : float
Tolerance parameter determining convergence.
retall : bool
If true, return all.
Returns
-------
x, itrs, y
x : numpy.ndarray
Distribution definition values.
itrs : int
The number of iterations used before converging.
y : numpy.ndarray
The model forward transformed value in x
"""
dim = len(dist)
size = q.size/dim
q = q.reshape(dim, size)
lo,up = dist.range(numpy.zeros((dim, size)))
lo = lo*numpy.ones((dim,size))
up = up*numpy.ones((dim,size))
span = .5*(up-lo)
too_much = numpy.any(dist.fwd(lo)>0, 0)
while numpy.any(too_much):
lo[:,too_much] -= span[:,too_much]
too_much[too_much] = numpy.any(dist.fwd(lo)[:,too_much]>0, 0)
too_little = numpy.any(dist.fwd(up)<1, 0)
while numpy.any(too_little):
up[:, too_little] += span[:, too_little]
too_little[too_little] = numpy.any(dist.fwd(up)[:,too_little]<1, 0)
# Initial values
x = (up-lo)*q + lo
flo, fup = -q, 1-q
fx = tol*10*numpy.ones((dim,size))
div = numpy.any((x<up)*(x>lo), 0)
for iteration in range(1, maxiter+1):
# eval function
fx[:,div] = dist.fwd(x)[:,div]-q[:,div]
# convergence test
div[div] = numpy.any(numpy.abs(fx)>tol, 0)[div]
if not numpy.any(div):
break
dfx = dist.pdf(x)[:,div]
dfx = numpy.where(dfx==0, numpy.inf, dfx)
# reduce boundaries
lo_,up_ = dist.range(x)
flo[:,div] = numpy.where(fx<=0, fx, flo)[:,div]
lo[:,div] = numpy.where(fx<=0, x, lo)[:,div]
lo = numpy.min([lo_, lo], 0)
fup[:,div] = numpy.where(fx>=0, fx, fup)[:,div]
up[:,div] = numpy.where(fx>=0, x, up)[:,div]
up = numpy.max([up_, up], 0)
# Newton increment
xdx = x[:,div]-fx[:,div]/dfx
# if new val on interior use Newton
# else binary search
x[:,div] = numpy.where((xdx<up[:,div])*(xdx>lo[:,div]),
xdx, .5*(up+lo)[:,div])
if retall:
return x, iteration, dist.fwd(x)
return x
def ppf(dist, q, G, maxiter=100, tol=1e-5, retall=False,
verbose=False):
"""
Calculate the approximation of the point percentile function.
Parameters
----------
dist : Dist
Distribution to estimate ppf.
q : numpy.ndarray
Input values. All values must be on [0,1] and
`q.shape==(dim,size)` where dim is the number of dimensions in
dist and size is the number of values to calculate
simultaneously.
maxiter : int
The maximum number of iterations allowed before aborting
tol : float
Tolerance parameter determining convergence.
retall : bool
If true, return all.
Returns
-------
x, itrs
x : numpy.ndarray
Distribution definition values.
itrs : int
The number of iterations used before converging.
"""
if not dist.advance:
dist.prm, prm = G.K.build(), dist.prm
out = inv(dist, q, maxiter, tol, retall, verbose)
dist.prm = prm
return out
dim = len(dist)
shape = q.shape
size = q.size/dim
q = q.reshape(dim, size)
X = G.copy().run(size, "rnd")[1].node[dist]["key"]
x = numpy.mean(X, -1)
lo,up = numpy.min(X, -1), numpy.max(X, -1)
lo = (lo*numpy.ones((size,dim))).T
up = (up*numpy.ones((size,dim))).T
# Initial values
x = ((up.T-lo.T)*q.T + lo.T).T
flo, fup = -q, 1-q
fx = Fx = tol*10*numpy.ones((dim,size))
dfx = 1.
for iteration in range(1, maxiter+1):
try:
# eval function
fx = G.copy().fwd_call(x, dist)
success = (fx>=0)*(fx<=1)
Fx = fx-q
dfx = G.copy().pdf_call(x, dist)
dfx = numpy.where(dfx==0, numpy.inf, dfx)
except:
success = numpy.zeros(size, dtype=bool)
# convergence test
if numpy.all(success) and numpy.all(numpy.abs(fx)<tol):
break
# reduce boundaries
flo = numpy.where((Fx<0)*success, Fx, flo)
lo = numpy.where((Fx<0)*success, x, lo)
fup = numpy.where((Fx>0)*success, Fx, fup)
up = numpy.where((Fx>0)*success, x, up)
# Newton increment
xdx = x-Fx/dfx
# if new val on interior use Newton
# else binary search
x = numpy.where(success, xdx, .5*(up+lo))
x = x.reshape(shape)
if retall:
return x, iteration, Fx
return x
def mom(dist, K, retall=False, control_var=None,
**kws):
"""
Approxmethod for estimation of raw statistical moments.
Parameters
----------
dist : Dist
Distribution domain with dim=len(dist)
K : numpy.ndarray
The exponents of the moments of interest with shape (dim,K).
Optional keywords
control_var : Dist
If provided will be used as a control variable to try to reduce
the error.
acc : int, optional
The order of quadrature/MCI
sparse : bool
If True used Smolyak's sparse grid instead of normal tensor
product grid in numerical integration.
rule : str
Quadrature rule
Key Description
---- -----------
"G" Optiomal Gaussian quadrature from Golub-Welsch
Slow for high order and composit is ignored.
"E" Gauss-Legendre quadrature
"C" Clenshaw-Curtis quadrature. Exponential growth rule is
used when sparse is True to make the rule nested.
Monte Carlo Integration
Key Description
---- -----------
"H" Halton sequence
"K" Korobov set
"L" Latin hypercube sampling
"M" Hammersley sequence
"R" (Pseudo-)Random sampling
"S" Sobol sequence
composit : int, array_like optional
If provided, composit quadrature will be used.
Ignored in the case if gaussian=True.
If int provided, determines number of even domain splits
If array of ints, determines number of even domain splits along
each axis
If array of arrays/floats, determines location of splits
antithetic : array_like, optional
List of bool. Represents the axes to mirror using antithetic
variable during MCI.
"""
dim = len(dist)
shape = K.shape
size = int(K.size/dim)
K = K.reshape(dim,size)
if dim>1:
shape = shape[1:]
order = kws.pop("order", 40)
X,W = chaospy.quad.generate_quadrature(order, dist, **kws)
grid = numpy.mgrid[:len(X[0]),:size]
X = X.T[grid[0]].T
K = K.T[grid[1]].T
out = numpy.prod(X**K, 0)*W
if not (control_var is None):
Y = control_var.ppf(dist.fwd(X))
mu = control_var.mom(numpy.eye(len(control_var)))
if mu.size==1 and dim>1:
mu = mu.repeat(dim)
for d in range(dim):
alpha = numpy.cov(out, Y[d])[0,1]/numpy.var(Y[d])
out -= alpha*(Y[d]-mu)
out = numpy.sum(out, -1)
return out
def find_interior_point(dist):
"""
Find interior point using the range-function
Parameters
----------
dist : Dist
Distribution to find interior on.
Returns
-------
interior_point : numpy.ndarray
shape=(len(dist),)
"""
try:
x = dist.inv([.5]*len(dist))
return x
except:
pass
bnd = dist.range(numpy.zeros(len(dist)))
x = .5*(bnd[1]-bnd[0])
for i in range(10):
bnd = dist.range(x)
x_ = .5*(bnd[1]-bnd[0])
if numpy.allclose(x, x_):
break
x = x_
return x
# TODO: integrate these two functions.
def ttr(order, domain, **kws):
prm = kws
prm["accuracy"] = order
prm["retall"] = True
def _three_terms_recursion(self, keys, **kws):
_, _, coeffs1, coeffs2 = chaospy.quad.generate_stieltjes(
domain, numpy.max(keys)+1, **self1.prm)
out = numpy.ones((2,) + keys.shape)
idx = 0
for idzs in keys.T:
idy = 0
for idz in idzs:
if idz:
out[:, idy, idx] = coeffs1[idy, idz], coeffs2[idy, idz]
idy += 1
idx += 1
return _three_terms_recursion
def moment_generator(order, domain, accuracy=100, sparse=False, rule="C",
composite=1, part=None, trans=lambda x:x, **kws):
"""Moment generator."""
if isinstance(domain, Dist):
dim = len(domain)
else:
dim = numpy.array(domain[0]).size
if not numpy.array(trans(numpy.zeros(dim))).shape:
func = trans
trans = lambda x: [func(x)]
if part is None:
abscissas, weights = chaospy.quad.generate_quadrature(
order, domain=domain, accuracy=accuracy, sparse=sparse,
rule=rule, composite=composite, part=part, **kws)
values = numpy.transpose(trans(abscissas))
def moment_function(keys):
"""Raw statistical moment function."""
return numpy.sum(numpy.prod(values**keys, -1)*weights, 0)
else:
isdist = isinstance(domain, Dist)
if isdist:
lower, upper = domain.range()
else:
lower, upper = numpy.array(domain)
abscissas = []
weights = []
values = []
for idx in numpy.ndindex(*part):
abscissa, weight = chaospy.quad.collection.clenshaw_curtis(
order, lower, upper, part=(idx, part))
value = numpy.array(trans(abscissa))
if isdist:
weight *= domain.pdf(abscissa).flatten()
if numpy.any(weight):
abscissas.append(abscissa)
weights.append(weight)
values.append(value)
def moment_function(keys):
"""Raw statistical moment function."""
out = 0.
for idx in range(len(abscissas)):
out += numpy.sum(
numpy.prod(values[idx].T**keys, -1)*weights[idx], 0)
return out
def mom(keys, **kws):
"""Statistical moment function."""
return numpy.array([moment_function(key) for key in keys.T])
return mom
| [
"numpy.mean",
"numpy.prod",
"numpy.allclose",
"numpy.abs",
"numpy.ones",
"numpy.where",
"numpy.ndindex",
"numpy.any",
"numpy.max",
"numpy.asfarray",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"numpy.empty",
"numpy.cov",
"numpy.min",
"numpy.all",
"numpy.var"
] | [((1346, 1363), 'numpy.asfarray', 'numpy.asfarray', (['x'], {}), '(x)\n', (1360, 1363), False, 'import numpy\n'), ((1433, 1463), 'numpy.where', 'numpy.where', (['(x < mu)', 'eps', '(-eps)'], {}), '(x < mu, eps, -eps)\n', (1444, 1463), False, 'import numpy\n'), ((1502, 1522), 'numpy.empty', 'numpy.empty', (['x.shape'], {}), '(x.shape)\n', (1513, 1522), False, 'import numpy\n'), ((2536, 2553), 'numpy.asfarray', 'numpy.asfarray', (['x'], {}), '(x)\n', (2550, 2553), False, 'import numpy\n'), ((2719, 2737), 'numpy.empty', 'numpy.empty', (['shape'], {}), '(shape)\n', (2730, 2737), False, 'import numpy\n'), ((4096, 4115), 'numpy.any', 'numpy.any', (['too_much'], {}), '(too_much)\n', (4105, 4115), False, 'import numpy\n'), ((4287, 4308), 'numpy.any', 'numpy.any', (['too_little'], {}), '(too_little)\n', (4296, 4308), False, 'import numpy\n'), ((4552, 4585), 'numpy.any', 'numpy.any', (['((x < up) * (x > lo))', '(0)'], {}), '((x < up) * (x > lo), 0)\n', (4561, 4585), False, 'import numpy\n'), ((6669, 6686), 'numpy.mean', 'numpy.mean', (['X', '(-1)'], {}), '(X, -1)\n', (6679, 6686), False, 'import numpy\n'), ((10284, 10302), 'numpy.sum', 'numpy.sum', (['out', '(-1)'], {}), '(out, -1)\n', (10293, 10302), False, 'import numpy\n'), ((1376, 1388), 'numpy.min', 'numpy.min', (['x'], {}), '(x)\n', (1385, 1388), False, 'import numpy\n'), ((1390, 1402), 'numpy.max', 'numpy.max', (['x'], {}), '(x)\n', (1399, 1402), False, 'import numpy\n'), ((2752, 2767), 'numpy.ones', 'numpy.ones', (['dim'], {}), '(dim)\n', (2762, 2767), False, 'import numpy\n'), ((2809, 2851), 'numpy.where', 'numpy.where', (['(x[i] < mu[i])', 'eps[i]', '(-eps[i])'], {}), '(x[i] < mu[i], eps[i], -eps[i])\n', (2820, 2851), False, 'import numpy\n'), ((3923, 3947), 'numpy.zeros', 'numpy.zeros', (['(dim, size)'], {}), '((dim, size))\n', (3934, 3947), False, 'import numpy\n'), ((3961, 3984), 'numpy.ones', 'numpy.ones', (['(dim, size)'], {}), '((dim, size))\n', (3971, 3984), False, 'import numpy\n'), ((3996, 4019), 'numpy.ones', 'numpy.ones', (['(dim, size)'], {}), '((dim, size))\n', (4006, 4019), False, 'import numpy\n'), ((4519, 4542), 'numpy.ones', 'numpy.ones', (['(dim, size)'], {}), '((dim, size))\n', (4529, 4542), False, 'import numpy\n'), ((4877, 4914), 'numpy.where', 'numpy.where', (['(dfx == 0)', 'numpy.inf', 'dfx'], {}), '(dfx == 0, numpy.inf, dfx)\n', (4888, 4914), False, 'import numpy\n'), ((5096, 5119), 'numpy.min', 'numpy.min', (['[lo_, lo]', '(0)'], {}), '([lo_, lo], 0)\n', (5105, 5119), False, 'import numpy\n'), ((5243, 5266), 'numpy.max', 'numpy.max', (['[up_, up]', '(0)'], {}), '([up_, up], 0)\n', (5252, 5266), False, 'import numpy\n'), ((5425, 5511), 'numpy.where', 'numpy.where', (['((xdx < up[:, div]) * (xdx > lo[:, div]))', 'xdx', '(0.5 * (up + lo)[:, div])'], {}), '((xdx < up[:, div]) * (xdx > lo[:, div]), xdx, 0.5 * (up + lo)[:,\n div])\n', (5436, 5511), False, 'import numpy\n'), ((6699, 6715), 'numpy.min', 'numpy.min', (['X', '(-1)'], {}), '(X, -1)\n', (6708, 6715), False, 'import numpy\n'), ((6717, 6733), 'numpy.max', 'numpy.max', (['X', '(-1)'], {}), '(X, -1)\n', (6726, 6733), False, 'import numpy\n'), ((6913, 6936), 'numpy.ones', 'numpy.ones', (['(dim, size)'], {}), '((dim, size))\n', (6923, 6936), False, 'import numpy\n'), ((7460, 7500), 'numpy.where', 'numpy.where', (['((Fx < 0) * success)', 'Fx', 'flo'], {}), '((Fx < 0) * success, Fx, flo)\n', (7471, 7500), False, 'import numpy\n'), ((7510, 7548), 'numpy.where', 'numpy.where', (['((Fx < 0) * success)', 'x', 'lo'], {}), '((Fx < 0) * success, x, lo)\n', (7521, 7548), False, 'import numpy\n'), ((7560, 7600), 'numpy.where', 'numpy.where', (['((Fx > 0) * success)', 'Fx', 'fup'], {}), '((Fx > 0) * success, Fx, fup)\n', (7571, 7600), False, 'import numpy\n'), ((7610, 7648), 'numpy.where', 'numpy.where', (['((Fx > 0) * success)', 'x', 'up'], {}), '((Fx > 0) * success, x, up)\n', (7621, 7648), False, 'import numpy\n'), ((7782, 7824), 'numpy.where', 'numpy.where', (['success', 'xdx', '(0.5 * (up + lo))'], {}), '(success, xdx, 0.5 * (up + lo))\n', (7793, 7824), False, 'import numpy\n'), ((9923, 9944), 'numpy.prod', 'numpy.prod', (['(X ** K)', '(0)'], {}), '(X ** K, 0)\n', (9933, 9944), False, 'import numpy\n'), ((10814, 10835), 'numpy.allclose', 'numpy.allclose', (['x', 'x_'], {}), '(x, x_)\n', (10828, 10835), False, 'import numpy\n'), ((11207, 11236), 'numpy.ones', 'numpy.ones', (['((2,) + keys.shape)'], {}), '((2,) + keys.shape)\n', (11217, 11236), False, 'import numpy\n'), ((12592, 12612), 'numpy.ndindex', 'numpy.ndindex', (['*part'], {}), '(*part)\n', (12605, 12612), False, 'import numpy\n'), ((4795, 4809), 'numpy.any', 'numpy.any', (['div'], {}), '(div)\n', (4804, 4809), False, 'import numpy\n'), ((4995, 5024), 'numpy.where', 'numpy.where', (['(fx <= 0)', 'fx', 'flo'], {}), '(fx <= 0, fx, flo)\n', (5006, 5024), False, 'import numpy\n'), ((5050, 5077), 'numpy.where', 'numpy.where', (['(fx <= 0)', 'x', 'lo'], {}), '(fx <= 0, x, lo)\n', (5061, 5077), False, 'import numpy\n'), ((5142, 5171), 'numpy.where', 'numpy.where', (['(fx >= 0)', 'fx', 'fup'], {}), '(fx >= 0, fx, fup)\n', (5153, 5171), False, 'import numpy\n'), ((5197, 5224), 'numpy.where', 'numpy.where', (['(fx >= 0)', 'x', 'up'], {}), '(fx >= 0, x, up)\n', (5208, 5224), False, 'import numpy\n'), ((6747, 6770), 'numpy.ones', 'numpy.ones', (['(size, dim)'], {}), '((size, dim))\n', (6757, 6770), False, 'import numpy\n'), ((6786, 6809), 'numpy.ones', 'numpy.ones', (['(size, dim)'], {}), '((size, dim))\n', (6796, 6809), False, 'import numpy\n'), ((7202, 7239), 'numpy.where', 'numpy.where', (['(dfx == 0)', 'numpy.inf', 'dfx'], {}), '(dfx == 0, numpy.inf, dfx)\n', (7213, 7239), False, 'import numpy\n'), ((7346, 7364), 'numpy.all', 'numpy.all', (['success'], {}), '(success)\n', (7355, 7364), False, 'import numpy\n'), ((11768, 11790), 'numpy.array', 'numpy.array', (['domain[0]'], {}), '(domain[0])\n', (11779, 11790), False, 'import numpy\n'), ((12488, 12507), 'numpy.array', 'numpy.array', (['domain'], {}), '(domain)\n', (12499, 12507), False, 'import numpy\n'), ((12887, 12904), 'numpy.any', 'numpy.any', (['weight'], {}), '(weight)\n', (12896, 12904), False, 'import numpy\n'), ((7277, 7306), 'numpy.zeros', 'numpy.zeros', (['size'], {'dtype': 'bool'}), '(size, dtype=bool)\n', (7288, 7306), False, 'import numpy\n'), ((10222, 10237), 'numpy.var', 'numpy.var', (['Y[d]'], {}), '(Y[d])\n', (10231, 10237), False, 'import numpy\n'), ((11161, 11176), 'numpy.max', 'numpy.max', (['keys'], {}), '(keys)\n', (11170, 11176), False, 'import numpy\n'), ((4753, 4766), 'numpy.abs', 'numpy.abs', (['fx'], {}), '(fx)\n', (4762, 4766), False, 'import numpy\n'), ((7379, 7392), 'numpy.abs', 'numpy.abs', (['fx'], {}), '(fx)\n', (7388, 7392), False, 'import numpy\n'), ((10196, 10216), 'numpy.cov', 'numpy.cov', (['out', 'Y[d]'], {}), '(out, Y[d])\n', (10205, 10216), False, 'import numpy\n'), ((11826, 11842), 'numpy.zeros', 'numpy.zeros', (['dim'], {}), '(dim)\n', (11837, 11842), False, 'import numpy\n'), ((12292, 12322), 'numpy.prod', 'numpy.prod', (['(values ** keys)', '(-1)'], {}), '(values ** keys, -1)\n', (12302, 12322), False, 'import numpy\n'), ((13233, 13270), 'numpy.prod', 'numpy.prod', (['(values[idx].T ** keys)', '(-1)'], {}), '(values[idx].T ** keys, -1)\n', (13243, 13270), False, 'import numpy\n')] |
# ActivitySim
# See full license in LICENSE.txt.
from builtins import range
import logging
import numpy as np
import pandas as pd
from activitysim.core import logit
from activitysim.core import config
from activitysim.core import inject
from activitysim.core import tracing
from activitysim.core import chunk
from activitysim.core import pipeline
from activitysim.core.util import reindex
from activitysim.abm.models.util.trip import failed_trip_cohorts
from activitysim.abm.models.util.trip import cleanup_failed_trips
from activitysim.abm.models.util import estimation
logger = logging.getLogger(__name__)
"""
StopDepartArrivePeriodModel
StopDepartArriveProportions.csv
tourpurp,isInbound,interval,trip,p1,p2,p3,p4,p5...p40
"""
NO_TRIP_ID = 0
NO_DEPART = 0
DEPART_ALT_BASE = 'DEPART_ALT_BASE'
FAILFIX = 'FAILFIX'
FAILFIX_CHOOSE_MOST_INITIAL = 'choose_most_initial'
FAILFIX_DROP_AND_CLEANUP = 'drop_and_cleanup'
FAILFIX_DEFAULT = FAILFIX_CHOOSE_MOST_INITIAL
PROBS_JOIN_COLUMNS = ['primary_purpose', 'outbound', 'tour_hour', 'trip_num']
def set_tour_hour(trips, tours):
"""
add columns 'tour_hour', 'earliest', 'latest' to trips
Parameters
----------
trips: pd.DataFrame
tours: pd.DataFrame
Returns
-------
modifies trips in place
"""
# all trips must depart between tour start and end
trips['earliest'] = reindex(tours.start, trips.tour_id)
trips['latest'] = reindex(tours.end, trips.tour_id)
# tour_hour is start for outbound trips, and end for inbound trips
trips['tour_hour'] = np.where(
trips.outbound,
trips['earliest'],
trips['latest']).astype(np.int8)
# subtours indexed by parent_tour_id
subtours = tours.loc[tours.primary_purpose == 'atwork',
['tour_num', 'tour_count', 'parent_tour_id', 'start', 'end']]
subtours.parent_tour_id = subtours.parent_tour_id.astype(np.int64)
subtours = subtours.set_index('parent_tour_id')
subtours = subtours.astype(np.int16) # remaining columns are all small ints
# bool series
trip_has_subtours = trips.tour_id.isin(subtours.index)
outbound = trip_has_subtours & trips.outbound
trips.loc[outbound, 'latest'] = \
reindex(subtours[subtours.tour_num == 1]['start'], trips[outbound].tour_id)
inbound = trip_has_subtours & ~trips.outbound
trips.loc[inbound, 'earliest'] = \
reindex(subtours[subtours.tour_num == subtours.tour_count]['end'], trips[inbound].tour_id)
def clip_probs(trips, probs, model_settings):
"""
zero out probs before trips.earliest or after trips.latest
Parameters
----------
trips: pd.DataFrame
probs: pd.DataFrame
one row per trip, one column per time period, with float prob of picking that time period
depart_alt_base: int
int to add to probs column index to get time period it represents.
e.g. depart_alt_base = 5 means first column (column 0) represents 5 am
Returns
-------
probs: pd.DataFrame
clipped version of probs
"""
depart_alt_base = model_settings.get(DEPART_ALT_BASE)
# there should be one row in probs per trip
assert trips.shape[0] == probs.shape[0]
# probs should sum to 1 across rows before clipping
probs = probs.div(probs.sum(axis=1), axis=0)
num_rows, num_cols = probs.shape
ix_map = np.tile(np.arange(0, num_cols), num_rows).reshape(num_rows, num_cols) + depart_alt_base
# 5 6 7 8 9 10...
# 5 6 7 8 9 10...
# 5 6 7 8 9 10...
clip_mask = ((ix_map >= trips.earliest.values.reshape(num_rows, 1)) &
(ix_map <= trips.latest.values.reshape(num_rows, 1))) * 1
# [0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0]
# [0 0 0 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0]
# [0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0]...
probs = probs*clip_mask
return probs
def report_bad_choices(bad_row_map, df, filename, trace_label, trace_choosers=None):
"""
Parameters
----------
bad_row_map
df : pandas.DataFrame
utils or probs dataframe
trace_choosers : pandas.dataframe
the choosers df (for interaction_simulate) to facilitate the reporting of hh_id
because we can't deduce hh_id from the interaction_dataset which is indexed on index
values from alternatives df
"""
df = df[bad_row_map]
if trace_choosers is None:
hh_ids = tracing.hh_id_for_chooser(df.index, df)
else:
hh_ids = tracing.hh_id_for_chooser(df.index, trace_choosers)
df['household_id'] = hh_ids
filename = "%s.%s" % (trace_label, filename)
logger.info("dumping %s" % filename)
tracing.write_csv(df, file_name=filename, transpose=False)
# log the indexes of the first MAX_PRINT offending rows
MAX_PRINT = 0
for idx in df.index[:MAX_PRINT].values:
row_msg = "%s : failed %s = %s (hh_id = %s)" % \
(trace_label, df.index.name, idx, df.household_id.loc[idx])
logger.warning(row_msg)
def schedule_nth_trips(
trips,
probs_spec,
model_settings,
first_trip_in_leg,
report_failed_trips,
trace_hh_id,
trace_label):
"""
We join each trip with the appropriate row in probs_spec by joining on probs_join_cols,
which should exist in both trips, probs_spec dataframe.
Parameters
----------
trips: pd.DataFrame
probs_spec: pd.DataFrame
Dataframe of probs for choice of depart times and join columns to match them with trips.
Depart columns names are irrelevant. Instead, they are position dependent,
time period choice is their index + depart_alt_base
depart_alt_base: int
int to add to probs column index to get time period it represents.
e.g. depart_alt_base = 5 means first column (column 0) represents 5 am
report_failed_trips : bool
trace_hh_id
trace_label
Returns
-------
choices: pd.Series
time periods depart choices, one per trip (except for trips with zero probs)
"""
depart_alt_base = model_settings.get('DEPART_ALT_BASE')
probs_cols = [c for c in probs_spec.columns if c not in PROBS_JOIN_COLUMNS]
# left join trips to probs (there may be multiple rows per trip for multiple depart ranges)
choosers = pd.merge(trips.reset_index(), probs_spec, on=PROBS_JOIN_COLUMNS,
how='left').set_index('trip_id')
chunk.log_df(trace_label, "choosers", choosers)
if trace_hh_id and tracing.has_trace_targets(trips):
tracing.trace_df(choosers, '%s.choosers' % trace_label)
# choosers should now match trips row for row
assert choosers.index.is_unique
assert len(choosers.index) == len(trips.index)
# zero out probs outside earliest-latest window
chooser_probs = clip_probs(trips, choosers[probs_cols], model_settings)
chunk.log_df(trace_label, "chooser_probs", chooser_probs)
if first_trip_in_leg:
# probs should sum to 1 unless all zero
chooser_probs = chooser_probs.div(chooser_probs.sum(axis=1), axis=0).fillna(0)
# probs should sum to 1 with residual probs resulting in choice of 'fail'
chooser_probs['fail'] = 1 - chooser_probs.sum(axis=1).clip(0, 1)
chunk.log_df(trace_label, "chooser_probs", chooser_probs)
if trace_hh_id and tracing.has_trace_targets(trips):
tracing.trace_df(chooser_probs, '%s.chooser_probs' % trace_label)
choices, rands = logit.make_choices(chooser_probs, trace_label=trace_label, trace_choosers=choosers)
chunk.log_df(trace_label, "choices", choices)
chunk.log_df(trace_label, "rands", rands)
if trace_hh_id and tracing.has_trace_targets(trips):
tracing.trace_df(choices, '%s.choices' % trace_label, columns=[None, 'depart'])
tracing.trace_df(rands, '%s.rands' % trace_label, columns=[None, 'rand'])
# convert alt choice index to depart time (setting failed choices to -1)
failed = (choices == chooser_probs.columns.get_loc('fail'))
choices = (choices + depart_alt_base).where(~failed, -1)
chunk.log_df(trace_label, "failed", failed)
# report failed trips while we have the best diagnostic info
if report_failed_trips and failed.any():
report_bad_choices(
bad_row_map=failed,
df=choosers,
filename='failed_choosers',
trace_label=trace_label,
trace_choosers=None)
# trace before removing failures
if trace_hh_id and tracing.has_trace_targets(trips):
tracing.trace_df(choices, '%s.choices' % trace_label, columns=[None, 'depart'])
tracing.trace_df(rands, '%s.rands' % trace_label, columns=[None, 'rand'])
# remove any failed choices
if failed.any():
choices = choices[~failed]
assert (choices >= trips.earliest[~failed]).all()
assert (choices <= trips.latest[~failed]).all()
return choices
def schedule_trips_in_leg(
outbound,
trips,
probs_spec,
model_settings,
is_last_iteration,
trace_hh_id, trace_label):
"""
Parameters
----------
outbound
trips
probs_spec
depart_alt_base
is_last_iteration
trace_hh_id
trace_label
Returns
-------
choices: pd.Series
depart choice for trips, indexed by trip_id
"""
failfix = model_settings.get(FAILFIX, FAILFIX_DEFAULT)
# logger.debug("%s scheduling %s trips" % (trace_label, trips.shape[0]))
assert len(trips) > 0
assert (trips.outbound == outbound).all()
# initial trip of leg and all atwork trips get tour_hour
is_initial = (trips.trip_num == 1) if outbound else (trips.trip_num == trips.trip_count)
no_scheduling = is_initial | (trips.primary_purpose == 'atwork')
choices = trips.tour_hour[no_scheduling]
if no_scheduling.all():
return choices
result_list = []
result_list.append(choices)
trips = trips[~no_scheduling]
# add next_trip_id temp column (temp as trips is now a copy, as result of slicing)
trips = trips.sort_index()
trips['next_trip_id'] = np.roll(trips.index, -1 if outbound else 1)
is_final = (trips.trip_num == trips.trip_count) if outbound else (trips.trip_num == 1)
trips.next_trip_id = trips.next_trip_id.where(~is_final, NO_TRIP_ID)
# iterate over outbound trips in ascending trip_num order, skipping the initial trip
# iterate over inbound trips in descending trip_num order, skipping the finial trip
first_trip_in_leg = True
for i in range(trips.trip_num.min(), trips.trip_num.max() + 1):
if outbound:
nth_trips = trips[trips.trip_num == i]
else:
nth_trips = trips[trips.trip_num == trips.trip_count - i]
nth_trace_label = tracing.extend_trace_label(trace_label, 'num_%s' % i)
choices = schedule_nth_trips(
nth_trips,
probs_spec,
model_settings,
first_trip_in_leg=first_trip_in_leg,
report_failed_trips=is_last_iteration,
trace_hh_id=trace_hh_id,
trace_label=nth_trace_label)
# if outbound, this trip's depart constrains next trip's earliest depart option
# if inbound, we are handling in reverse order, so it constrains latest depart instead
ADJUST_NEXT_DEPART_COL = 'earliest' if outbound else 'latest'
# most initial departure (when no choice was made because all probs were zero)
if is_last_iteration and (failfix == FAILFIX_CHOOSE_MOST_INITIAL):
choices = choices.reindex(nth_trips.index)
logger.warning("%s coercing %s depart choices to most initial" %
(nth_trace_label, choices.isna().sum()))
choices = choices.fillna(trips[ADJUST_NEXT_DEPART_COL])
# adjust allowed depart range of next trip
has_next_trip = (nth_trips.next_trip_id != NO_TRIP_ID)
if has_next_trip.any():
next_trip_ids = nth_trips.next_trip_id[has_next_trip]
# patch choice any trips with next_trips that weren't scheduled
trips.loc[next_trip_ids, ADJUST_NEXT_DEPART_COL] = \
choices.reindex(next_trip_ids.index).fillna(trips[ADJUST_NEXT_DEPART_COL]).values
result_list.append(choices)
chunk.log_df(trace_label, f'result_list', result_list)
first_trip_in_leg = False
if len(result_list) > 1:
choices = pd.concat(result_list)
return choices
def run_trip_scheduling(
trips_chunk,
tours,
probs_spec,
model_settings,
estimator,
is_last_iteration,
chunk_size,
chunk_tag,
trace_hh_id,
trace_label):
# only non-initial trips require scheduling, segment handing first such trip in tour will use most space
# is_outbound_chooser = (trips.trip_num > 1) & trips.outbound & (trips.primary_purpose != 'atwork')
# is_inbound_chooser = (trips.trip_num < trips.trip_count) & ~trips.outbound & (trips.primary_purpose != 'atwork')
# num_choosers = (is_inbound_chooser | is_outbound_chooser).sum()
result_list = []
if trips_chunk.outbound.any():
leg_chunk = trips_chunk[trips_chunk.outbound]
leg_trace_label = tracing.extend_trace_label(trace_label, 'outbound')
choices = \
schedule_trips_in_leg(
outbound=True,
trips=leg_chunk,
probs_spec=probs_spec,
model_settings=model_settings,
is_last_iteration=is_last_iteration,
trace_hh_id=trace_hh_id,
trace_label=leg_trace_label)
result_list.append(choices)
chunk.log_df(trace_label, f'result_list', result_list)
if (~trips_chunk.outbound).any():
leg_chunk = trips_chunk[~trips_chunk.outbound]
leg_trace_label = tracing.extend_trace_label(trace_label, 'inbound')
choices = \
schedule_trips_in_leg(
outbound=False,
trips=leg_chunk,
probs_spec=probs_spec,
model_settings=model_settings,
is_last_iteration=is_last_iteration,
trace_hh_id=trace_hh_id,
trace_label=leg_trace_label)
result_list.append(choices)
chunk.log_df(trace_label, f'result_list', result_list)
choices = pd.concat(result_list)
return choices
@inject.step()
def trip_scheduling(
trips,
tours,
chunk_size,
trace_hh_id):
"""
Trip scheduling assigns depart times for trips within the start, end limits of the tour.
The algorithm is simplistic:
The first outbound trip starts at the tour start time, and subsequent outbound trips are
processed in trip_num order, to ensure that subsequent trips do not depart before the
trip that preceeds them.
Inbound trips are handled similarly, except in reverse order, starting with the last trip,
and working backwards to ensure that inbound trips do not depart after the trip that
succeeds them.
The probability spec assigns probabilities for depart times, but those possible departs must
be clipped to disallow depart times outside the tour limits, the departs of prior trips, and
in the case of work tours, the start/end times of any atwork subtours.
Scheduling can fail if the probability table assigns zero probabilities to all the available
depart times in a trip's depart window. (This could be avoided by giving every window a small
probability, rather than zero, but the existing mtctm1 prob spec does not do this. I believe
this is due to the its having been generated from a small household travel survey sample
that lacked any departs for some time periods.)
Rescheduling the trips that fail (along with their inbound or outbound leg-mates) can sometimes
fix this problem, if it was caused by an earlier trip's depart choice blocking a subsequent
trip's ability to schedule a depart within the resulting window. But it can also happen if
a tour is very short (e.g. one time period) and the prob spec having a zero probability for
that tour hour.
Therefore we need to handle trips that could not be scheduled. There are two ways (at least)
to solve this problem:
1) choose_most_initial
simply assign a depart time to the trip, even if it has a zero probability. It makes
most sense, in this case, to assign the 'most initial' depart time, so that subsequent trips
are minimally impacted. This can be done in the final iteration, thus affecting only the
trips that could no be scheduled by the standard approach
2) drop_and_cleanup
drop trips that could no be scheduled, and adjust their leg mates, as is done for failed
trips in trip_destination.
Which option is applied is determined by the FAILFIX model setting
"""
trace_label = "trip_scheduling"
model_settings_file_name = 'trip_scheduling.yaml'
model_settings = config.read_model_settings(model_settings_file_name)
trips_df = trips.to_frame()
tours = tours.to_frame()
# add columns 'tour_hour', 'earliest', 'latest' to trips
set_tour_hour(trips_df, tours)
# trip_scheduling is a probabilistic model ane we don't support estimation,
# but we do need to override choices in estimation mode
estimator = estimation.manager.begin_estimation('trip_scheduling')
if estimator:
estimator.write_spec(model_settings, tag='PROBS_SPEC')
estimator.write_model_settings(model_settings, model_settings_file_name)
chooser_cols_for_estimation = ['person_id', 'household_id', 'tour_id', 'trip_num', 'trip_count',
'primary_purpose', 'outbound', 'earliest', 'latest', 'tour_hour', ]
estimator.write_choosers(trips_df[chooser_cols_for_estimation])
probs_spec = pd.read_csv(config.config_file_path('trip_scheduling_probs.csv'), comment='#')
# FIXME for now, not really doing estimation for probabilistic model - just overwriting choices
# besides, it isn't clear that named coefficients would be helpful if we had some form of estimation
# coefficients_df = simulate.read_model_coefficients(model_settings)
# probs_spec = map_coefficients(probs_spec, coefficients_df)
# add tour-based chunk_id so we can chunk all trips in tour together
trips_df['chunk_id'] = reindex(pd.Series(list(range(len(tours))), tours.index), trips_df.tour_id)
assert 'DEPART_ALT_BASE' in model_settings
failfix = model_settings.get(FAILFIX, FAILFIX_DEFAULT)
max_iterations = model_settings.get('MAX_ITERATIONS', 1)
assert max_iterations > 0
choices_list = []
for chunk_i, trips_chunk, chunk_trace_label in chunk.adaptive_chunked_choosers_by_chunk_id(trips_df,
chunk_size,
trace_label,
trace_label):
i = 0
while (i < max_iterations) and not trips_chunk.empty:
# only chunk log first iteration since memory use declines with each iteration
with chunk.chunk_log(trace_label) if i == 0 else chunk.chunk_log_skip():
i += 1
is_last_iteration = (i == max_iterations)
trace_label_i = tracing.extend_trace_label(trace_label, "i%s" % i)
logger.info("%s scheduling %s trips within chunk %s", trace_label_i, trips_chunk.shape[0], chunk_i)
choices = \
run_trip_scheduling(
trips_chunk,
tours,
probs_spec,
model_settings,
estimator=estimator,
is_last_iteration=is_last_iteration,
trace_hh_id=trace_hh_id,
chunk_size=chunk_size,
chunk_tag=trace_label,
trace_label=trace_label_i)
# boolean series of trips whose individual trip scheduling failed
failed = choices.reindex(trips_chunk.index).isnull()
logger.info("%s %s failed", trace_label_i, failed.sum())
if not is_last_iteration:
# boolean series of trips whose leg scheduling failed
failed_cohorts = failed_trip_cohorts(trips_chunk, failed)
trips_chunk = trips_chunk[failed_cohorts]
choices = choices[~failed_cohorts]
choices_list.append(choices)
trips_df = trips.to_frame()
choices = pd.concat(choices_list)
choices = choices.reindex(trips_df.index)
if estimator:
estimator.write_choices(choices)
choices = estimator.get_survey_values(choices, 'trips', 'depart') # override choices
estimator.write_override_choices(choices)
estimator.end_estimation()
assert not choices.isnull().any()
if choices.isnull().any():
logger.warning("%s of %s trips could not be scheduled after %s iterations" %
(choices.isnull().sum(), trips_df.shape[0], i))
if failfix != FAILFIX_DROP_AND_CLEANUP:
raise RuntimeError("%s setting '%s' not enabled in settings" %
(FAILFIX, FAILFIX_DROP_AND_CLEANUP))
trips_df['failed'] = choices.isnull()
trips_df = cleanup_failed_trips(trips_df)
choices = choices.reindex(trips_df.index)
trips_df['depart'] = choices
assert not trips_df.depart.isnull().any()
pipeline.replace_table("trips", trips_df)
| [
"logging.getLogger",
"activitysim.core.util.reindex",
"activitysim.core.config.config_file_path",
"activitysim.core.chunk.log_df",
"activitysim.core.tracing.has_trace_targets",
"activitysim.core.tracing.extend_trace_label",
"numpy.arange",
"activitysim.core.logit.make_choices",
"numpy.where",
"act... | [((588, 615), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (605, 615), False, 'import logging\n'), ((14433, 14446), 'activitysim.core.inject.step', 'inject.step', ([], {}), '()\n', (14444, 14446), False, 'from activitysim.core import inject\n'), ((1374, 1409), 'activitysim.core.util.reindex', 'reindex', (['tours.start', 'trips.tour_id'], {}), '(tours.start, trips.tour_id)\n', (1381, 1409), False, 'from activitysim.core.util import reindex\n'), ((1432, 1465), 'activitysim.core.util.reindex', 'reindex', (['tours.end', 'trips.tour_id'], {}), '(tours.end, trips.tour_id)\n', (1439, 1465), False, 'from activitysim.core.util import reindex\n'), ((2234, 2309), 'activitysim.core.util.reindex', 'reindex', (["subtours[subtours.tour_num == 1]['start']", 'trips[outbound].tour_id'], {}), "(subtours[subtours.tour_num == 1]['start'], trips[outbound].tour_id)\n", (2241, 2309), False, 'from activitysim.core.util import reindex\n'), ((2408, 2503), 'activitysim.core.util.reindex', 'reindex', (["subtours[subtours.tour_num == subtours.tour_count]['end']", 'trips[inbound].tour_id'], {}), "(subtours[subtours.tour_num == subtours.tour_count]['end'], trips[\n inbound].tour_id)\n", (2415, 2503), False, 'from activitysim.core.util import reindex\n'), ((4657, 4715), 'activitysim.core.tracing.write_csv', 'tracing.write_csv', (['df'], {'file_name': 'filename', 'transpose': '(False)'}), '(df, file_name=filename, transpose=False)\n', (4674, 4715), False, 'from activitysim.core import tracing\n'), ((6439, 6486), 'activitysim.core.chunk.log_df', 'chunk.log_df', (['trace_label', '"""choosers"""', 'choosers'], {}), "(trace_label, 'choosers', choosers)\n", (6451, 6486), False, 'from activitysim.core import chunk\n'), ((6880, 6937), 'activitysim.core.chunk.log_df', 'chunk.log_df', (['trace_label', '"""chooser_probs"""', 'chooser_probs'], {}), "(trace_label, 'chooser_probs', chooser_probs)\n", (6892, 6937), False, 'from activitysim.core import chunk\n'), ((7252, 7309), 'activitysim.core.chunk.log_df', 'chunk.log_df', (['trace_label', '"""chooser_probs"""', 'chooser_probs'], {}), "(trace_label, 'chooser_probs', chooser_probs)\n", (7264, 7309), False, 'from activitysim.core import chunk\n'), ((7464, 7552), 'activitysim.core.logit.make_choices', 'logit.make_choices', (['chooser_probs'], {'trace_label': 'trace_label', 'trace_choosers': 'choosers'}), '(chooser_probs, trace_label=trace_label, trace_choosers=\n choosers)\n', (7482, 7552), False, 'from activitysim.core import logit\n'), ((7553, 7598), 'activitysim.core.chunk.log_df', 'chunk.log_df', (['trace_label', '"""choices"""', 'choices'], {}), "(trace_label, 'choices', choices)\n", (7565, 7598), False, 'from activitysim.core import chunk\n'), ((7603, 7644), 'activitysim.core.chunk.log_df', 'chunk.log_df', (['trace_label', '"""rands"""', 'rands'], {}), "(trace_label, 'rands', rands)\n", (7615, 7644), False, 'from activitysim.core import chunk\n'), ((8081, 8124), 'activitysim.core.chunk.log_df', 'chunk.log_df', (['trace_label', '"""failed"""', 'failed'], {}), "(trace_label, 'failed', failed)\n", (8093, 8124), False, 'from activitysim.core import chunk\n'), ((10107, 10150), 'numpy.roll', 'np.roll', (['trips.index', '(-1 if outbound else 1)'], {}), '(trips.index, -1 if outbound else 1)\n', (10114, 10150), True, 'import numpy as np\n'), ((14387, 14409), 'pandas.concat', 'pd.concat', (['result_list'], {}), '(result_list)\n', (14396, 14409), True, 'import pandas as pd\n'), ((17044, 17096), 'activitysim.core.config.read_model_settings', 'config.read_model_settings', (['model_settings_file_name'], {}), '(model_settings_file_name)\n', (17070, 17096), False, 'from activitysim.core import config\n'), ((17413, 17467), 'activitysim.abm.models.util.estimation.manager.begin_estimation', 'estimation.manager.begin_estimation', (['"""trip_scheduling"""'], {}), "('trip_scheduling')\n", (17448, 17467), False, 'from activitysim.abm.models.util import estimation\n'), ((18807, 18902), 'activitysim.core.chunk.adaptive_chunked_choosers_by_chunk_id', 'chunk.adaptive_chunked_choosers_by_chunk_id', (['trips_df', 'chunk_size', 'trace_label', 'trace_label'], {}), '(trips_df, chunk_size,\n trace_label, trace_label)\n', (18850, 18902), False, 'from activitysim.core import chunk\n'), ((20866, 20889), 'pandas.concat', 'pd.concat', (['choices_list'], {}), '(choices_list)\n', (20875, 20889), True, 'import pandas as pd\n'), ((21830, 21871), 'activitysim.core.pipeline.replace_table', 'pipeline.replace_table', (['"""trips"""', 'trips_df'], {}), "('trips', trips_df)\n", (21852, 21871), False, 'from activitysim.core import pipeline\n'), ((4410, 4449), 'activitysim.core.tracing.hh_id_for_chooser', 'tracing.hh_id_for_chooser', (['df.index', 'df'], {}), '(df.index, df)\n', (4435, 4449), False, 'from activitysim.core import tracing\n'), ((4477, 4528), 'activitysim.core.tracing.hh_id_for_chooser', 'tracing.hh_id_for_chooser', (['df.index', 'trace_choosers'], {}), '(df.index, trace_choosers)\n', (4502, 4528), False, 'from activitysim.core import tracing\n'), ((6511, 6543), 'activitysim.core.tracing.has_trace_targets', 'tracing.has_trace_targets', (['trips'], {}), '(trips)\n', (6536, 6543), False, 'from activitysim.core import tracing\n'), ((6553, 6608), 'activitysim.core.tracing.trace_df', 'tracing.trace_df', (['choosers', "('%s.choosers' % trace_label)"], {}), "(choosers, '%s.choosers' % trace_label)\n", (6569, 6608), False, 'from activitysim.core import tracing\n'), ((7334, 7366), 'activitysim.core.tracing.has_trace_targets', 'tracing.has_trace_targets', (['trips'], {}), '(trips)\n', (7359, 7366), False, 'from activitysim.core import tracing\n'), ((7376, 7441), 'activitysim.core.tracing.trace_df', 'tracing.trace_df', (['chooser_probs', "('%s.chooser_probs' % trace_label)"], {}), "(chooser_probs, '%s.chooser_probs' % trace_label)\n", (7392, 7441), False, 'from activitysim.core import tracing\n'), ((7669, 7701), 'activitysim.core.tracing.has_trace_targets', 'tracing.has_trace_targets', (['trips'], {}), '(trips)\n', (7694, 7701), False, 'from activitysim.core import tracing\n'), ((7711, 7790), 'activitysim.core.tracing.trace_df', 'tracing.trace_df', (['choices', "('%s.choices' % trace_label)"], {'columns': "[None, 'depart']"}), "(choices, '%s.choices' % trace_label, columns=[None, 'depart'])\n", (7727, 7790), False, 'from activitysim.core import tracing\n'), ((7799, 7872), 'activitysim.core.tracing.trace_df', 'tracing.trace_df', (['rands', "('%s.rands' % trace_label)"], {'columns': "[None, 'rand']"}), "(rands, '%s.rands' % trace_label, columns=[None, 'rand'])\n", (7815, 7872), False, 'from activitysim.core import tracing\n'), ((8492, 8524), 'activitysim.core.tracing.has_trace_targets', 'tracing.has_trace_targets', (['trips'], {}), '(trips)\n', (8517, 8524), False, 'from activitysim.core import tracing\n'), ((8534, 8613), 'activitysim.core.tracing.trace_df', 'tracing.trace_df', (['choices', "('%s.choices' % trace_label)"], {'columns': "[None, 'depart']"}), "(choices, '%s.choices' % trace_label, columns=[None, 'depart'])\n", (8550, 8613), False, 'from activitysim.core import tracing\n'), ((8622, 8695), 'activitysim.core.tracing.trace_df', 'tracing.trace_df', (['rands', "('%s.rands' % trace_label)"], {'columns': "[None, 'rand']"}), "(rands, '%s.rands' % trace_label, columns=[None, 'rand'])\n", (8638, 8695), False, 'from activitysim.core import tracing\n'), ((10774, 10827), 'activitysim.core.tracing.extend_trace_label', 'tracing.extend_trace_label', (['trace_label', "('num_%s' % i)"], {}), "(trace_label, 'num_%s' % i)\n", (10800, 10827), False, 'from activitysim.core import tracing\n'), ((12303, 12357), 'activitysim.core.chunk.log_df', 'chunk.log_df', (['trace_label', 'f"""result_list"""', 'result_list'], {}), "(trace_label, f'result_list', result_list)\n", (12315, 12357), False, 'from activitysim.core import chunk\n'), ((12441, 12463), 'pandas.concat', 'pd.concat', (['result_list'], {}), '(result_list)\n', (12450, 12463), True, 'import pandas as pd\n'), ((13260, 13311), 'activitysim.core.tracing.extend_trace_label', 'tracing.extend_trace_label', (['trace_label', '"""outbound"""'], {}), "(trace_label, 'outbound')\n", (13286, 13311), False, 'from activitysim.core import tracing\n'), ((13701, 13755), 'activitysim.core.chunk.log_df', 'chunk.log_df', (['trace_label', 'f"""result_list"""', 'result_list'], {}), "(trace_label, f'result_list', result_list)\n", (13713, 13755), False, 'from activitysim.core import chunk\n'), ((13876, 13926), 'activitysim.core.tracing.extend_trace_label', 'tracing.extend_trace_label', (['trace_label', '"""inbound"""'], {}), "(trace_label, 'inbound')\n", (13902, 13926), False, 'from activitysim.core import tracing\n'), ((14317, 14371), 'activitysim.core.chunk.log_df', 'chunk.log_df', (['trace_label', 'f"""result_list"""', 'result_list'], {}), "(trace_label, f'result_list', result_list)\n", (14329, 14371), False, 'from activitysim.core import chunk\n'), ((17947, 17999), 'activitysim.core.config.config_file_path', 'config.config_file_path', (['"""trip_scheduling_probs.csv"""'], {}), "('trip_scheduling_probs.csv')\n", (17970, 17999), False, 'from activitysim.core import config\n'), ((21663, 21693), 'activitysim.abm.models.util.trip.cleanup_failed_trips', 'cleanup_failed_trips', (['trips_df'], {}), '(trips_df)\n', (21683, 21693), False, 'from activitysim.abm.models.util.trip import cleanup_failed_trips\n'), ((1563, 1623), 'numpy.where', 'np.where', (['trips.outbound', "trips['earliest']", "trips['latest']"], {}), "(trips.outbound, trips['earliest'], trips['latest'])\n", (1571, 1623), True, 'import numpy as np\n'), ((19554, 19604), 'activitysim.core.tracing.extend_trace_label', 'tracing.extend_trace_label', (['trace_label', "('i%s' % i)"], {}), "(trace_label, 'i%s' % i)\n", (19580, 19604), False, 'from activitysim.core import tracing\n'), ((3383, 3405), 'numpy.arange', 'np.arange', (['(0)', 'num_cols'], {}), '(0, num_cols)\n', (3392, 3405), True, 'import numpy as np\n'), ((19371, 19399), 'activitysim.core.chunk.chunk_log', 'chunk.chunk_log', (['trace_label'], {}), '(trace_label)\n', (19386, 19399), False, 'from activitysim.core import chunk\n'), ((19415, 19437), 'activitysim.core.chunk.chunk_log_skip', 'chunk.chunk_log_skip', ([], {}), '()\n', (19435, 19437), False, 'from activitysim.core import chunk\n'), ((20614, 20654), 'activitysim.abm.models.util.trip.failed_trip_cohorts', 'failed_trip_cohorts', (['trips_chunk', 'failed'], {}), '(trips_chunk, failed)\n', (20633, 20654), False, 'from activitysim.abm.models.util.trip import failed_trip_cohorts\n')] |
import numpy as np
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.decomposition import NMF
import datetime
import matplotlib.pyplot as plt
if __name__ == "__main__":
startTime = datetime.datetime.now()
# Load training data
x = np.load('data/train_w2v_data_array.npy')
y = np.load('data/train_w2v_target_array.npy')
y = y.astype('int')
y = y.flatten()
# Load test data
z = np.load('data/test_w2v_data_array.npy')
t = np.load('data/test_w2v_target_array.npy')
t = t.astype('int')
t = t.flatten()
#Remove -ve values and scale all values by smallest -ve value in array
xmin = np.amin(x)
zmin = np.amin(z)
scale_min = min(xmin, zmin) * -1
x = np.add(x, scale_min)
z = np.add(z, scale_min)
# x = x + 11.573273289802543
# z = z + 16.698667840828804
# Predict using Naive Bayes Model
clf = MultinomialNB(alpha=1)
clf.fit(x, y)
p = clf.predict(z)
# Compute training time
endTime = datetime.datetime.now() - startTime
print("Total time taken to train: ", endTime)
print("\n")
print("W2V Multinomial Naive Bayes")
# Compute accuracy
accuracy = metrics.accuracy_score(t, p, normalize=False)
print("Accuracy: ", (accuracy / len(t)) * 100)
# Confusion matrix
confusion_matrix = metrics.confusion_matrix(t, p)
print("Confusion Matrix:\n", confusion_matrix)
# Replace 4s with 1s
t[np.where(t == 4)] = 1
p[np.where(p == 4)] = 1
y_scores = clf.predict_proba(z)
# Plot the Precision-Recall curve
precision, recall, _ = metrics.precision_recall_curve(t, y_scores[:, 1])
plt.step(recall, precision, color='b', alpha=0.2, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.2, color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
average_precision = metrics.average_precision_score(t, p)
plt.title('W2V Multinomial NB Precision-Recall curve: AP={0:0.2f}'.format(average_precision))
plt.savefig('data/w2v_MultinomialNB_precisionRecall.png')
plt.show()
| [
"matplotlib.pyplot.savefig",
"numpy.add",
"numpy.amin",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.average_precision_score",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xlabel",
"numpy.where",
"sklearn.metrics.precision_recall_curve",
"matplotlib.pyplot.fill_between",
"datetime.datetime.now",... | [((223, 246), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (244, 246), False, 'import datetime\n'), ((281, 321), 'numpy.load', 'np.load', (['"""data/train_w2v_data_array.npy"""'], {}), "('data/train_w2v_data_array.npy')\n", (288, 321), True, 'import numpy as np\n'), ((330, 372), 'numpy.load', 'np.load', (['"""data/train_w2v_target_array.npy"""'], {}), "('data/train_w2v_target_array.npy')\n", (337, 372), True, 'import numpy as np\n'), ((447, 486), 'numpy.load', 'np.load', (['"""data/test_w2v_data_array.npy"""'], {}), "('data/test_w2v_data_array.npy')\n", (454, 486), True, 'import numpy as np\n'), ((495, 536), 'numpy.load', 'np.load', (['"""data/test_w2v_target_array.npy"""'], {}), "('data/test_w2v_target_array.npy')\n", (502, 536), True, 'import numpy as np\n'), ((673, 683), 'numpy.amin', 'np.amin', (['x'], {}), '(x)\n', (680, 683), True, 'import numpy as np\n'), ((695, 705), 'numpy.amin', 'np.amin', (['z'], {}), '(z)\n', (702, 705), True, 'import numpy as np\n'), ((752, 772), 'numpy.add', 'np.add', (['x', 'scale_min'], {}), '(x, scale_min)\n', (758, 772), True, 'import numpy as np\n'), ((781, 801), 'numpy.add', 'np.add', (['z', 'scale_min'], {}), '(z, scale_min)\n', (787, 801), True, 'import numpy as np\n'), ((927, 949), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {'alpha': '(1)'}), '(alpha=1)\n', (940, 949), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((1221, 1266), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['t', 'p'], {'normalize': '(False)'}), '(t, p, normalize=False)\n', (1243, 1266), False, 'from sklearn import metrics\n'), ((1365, 1395), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['t', 'p'], {}), '(t, p)\n', (1389, 1395), False, 'from sklearn import metrics\n'), ((1632, 1681), 'sklearn.metrics.precision_recall_curve', 'metrics.precision_recall_curve', (['t', 'y_scores[:, 1]'], {}), '(t, y_scores[:, 1])\n', (1662, 1681), False, 'from sklearn import metrics\n'), ((1686, 1749), 'matplotlib.pyplot.step', 'plt.step', (['recall', 'precision'], {'color': '"""b"""', 'alpha': '(0.2)', 'where': '"""post"""'}), "(recall, precision, color='b', alpha=0.2, where='post')\n", (1694, 1749), True, 'import matplotlib.pyplot as plt\n'), ((1754, 1824), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['recall', 'precision'], {'step': '"""post"""', 'alpha': '(0.2)', 'color': '"""b"""'}), "(recall, precision, step='post', alpha=0.2, color='b')\n", (1770, 1824), True, 'import matplotlib.pyplot as plt\n'), ((1829, 1849), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (1839, 1849), True, 'import matplotlib.pyplot as plt\n'), ((1854, 1877), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (1864, 1877), True, 'import matplotlib.pyplot as plt\n'), ((1882, 1903), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (1890, 1903), True, 'import matplotlib.pyplot as plt\n'), ((1908, 1928), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (1916, 1928), True, 'import matplotlib.pyplot as plt\n'), ((1953, 1990), 'sklearn.metrics.average_precision_score', 'metrics.average_precision_score', (['t', 'p'], {}), '(t, p)\n', (1984, 1990), False, 'from sklearn import metrics\n'), ((2093, 2150), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""data/w2v_MultinomialNB_precisionRecall.png"""'], {}), "('data/w2v_MultinomialNB_precisionRecall.png')\n", (2104, 2150), True, 'import matplotlib.pyplot as plt\n'), ((2155, 2165), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2163, 2165), True, 'import matplotlib.pyplot as plt\n'), ((1038, 1061), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1059, 1061), False, 'import datetime\n'), ((1479, 1495), 'numpy.where', 'np.where', (['(t == 4)'], {}), '(t == 4)\n', (1487, 1495), True, 'import numpy as np\n'), ((1507, 1523), 'numpy.where', 'np.where', (['(p == 4)'], {}), '(p == 4)\n', (1515, 1523), True, 'import numpy as np\n')] |
""" Code for the EKF Refactored """
import sympy
from sympy import atan, pi, tan
from sympy import symbols, Matrix
from math import sqrt, tan, cos, sin, atan2
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import randn
from filterpy.kalman import ExtendedKalmanFilter as EKF
from numpy import array, sqrt
class RobotEKF(EKF):
def __init__(self, dt, wheelbase, std_vel, std_steer):
EKF.__init__(self, 3, 2, 2)
self.dt = dt
self.wheelbase = wheelbase
self.std_vel = std_vel
self.std_steer = std_steer
a, x, y, v, w, theta, time = symbols(
'a, x, y, v, w, theta, t')
d = v*time
beta = (d/w)*sympy.tan(a)
r = w/sympy.tan(a)
self.fxu = Matrix(
[[x-r*sympy.sin(theta)+r*sympy.sin(theta+beta)],
[y+r*sympy.cos(theta)-r*sympy.cos(theta+beta)],
[theta+beta]])
self.F_j = self.fxu.jacobian(Matrix([x, y, theta]))
self.V_j = self.fxu.jacobian(Matrix([v, a]))
# save dictionary and it's variables for later use
self.subs = {x: 0, y: 0, v:0, a:0,
time:dt, w:wheelbase, theta:0}
self.x_x, self.x_y, = x, y
self.v, self.a, self.theta = v, a, theta
def predict(self, u):
self.x = self.move(self.x, u, self.dt)
self.subs[self.theta] = self.x[2, 0]
self.subs[self.v] = u[0]
self.subs[self.a] = u[1]
F = array(self.F_j.evalf(subs=self.subs)).astype(float)
V = array(self.V_j.evalf(subs=self.subs)).astype(float)
# covariance of motion noise in control space
M = array([[self.std_vel*u[0]**2, 0],
[0, self.std_steer**2]])
self.P = np.dot(F, self.P).dot(F.T) + np.dot(V, M).dot(V.T)
def move(self, x, u, dt):
hdg = x[2, 0]
vel = u[0]
steering_angle = u[1]
dist = vel * dt
if abs(steering_angle) > 0.001: # is robot turning?
beta = (dist / self.wheelbase) * tan(steering_angle)
r = self.wheelbase / tan(steering_angle) # radius
dx = np.array([[-r*sin(hdg) + r*sin(hdg + beta)],
[r*cos(hdg) - r*cos(hdg + beta)],
[beta]])
else: # moving in straight line
dx = np.array([[dist*cos(hdg)],
[dist*sin(hdg)],
[0]])
return x + dx
##MIT license | [
"sympy.sin",
"sympy.cos",
"filterpy.kalman.ExtendedKalmanFilter.__init__",
"math.tan",
"sympy.Matrix",
"sympy.tan",
"sympy.symbols",
"numpy.array",
"numpy.dot",
"math.cos",
"math.sin"
] | [((417, 444), 'filterpy.kalman.ExtendedKalmanFilter.__init__', 'EKF.__init__', (['self', '(3)', '(2)', '(2)'], {}), '(self, 3, 2, 2)\n', (429, 444), True, 'from filterpy.kalman import ExtendedKalmanFilter as EKF\n'), ((605, 639), 'sympy.symbols', 'symbols', (['"""a, x, y, v, w, theta, t"""'], {}), "('a, x, y, v, w, theta, t')\n", (612, 639), False, 'from sympy import symbols, Matrix\n'), ((1652, 1716), 'numpy.array', 'array', (['[[self.std_vel * u[0] ** 2, 0], [0, self.std_steer ** 2]]'], {}), '([[self.std_vel * u[0] ** 2, 0], [0, self.std_steer ** 2]])\n', (1657, 1716), False, 'from numpy import array, sqrt\n'), ((693, 705), 'sympy.tan', 'sympy.tan', (['a'], {}), '(a)\n', (702, 705), False, 'import sympy\n'), ((720, 732), 'sympy.tan', 'sympy.tan', (['a'], {}), '(a)\n', (729, 732), False, 'import sympy\n'), ((953, 974), 'sympy.Matrix', 'Matrix', (['[x, y, theta]'], {}), '([x, y, theta])\n', (959, 974), False, 'from sympy import symbols, Matrix\n'), ((1013, 1027), 'sympy.Matrix', 'Matrix', (['[v, a]'], {}), '([v, a])\n', (1019, 1027), False, 'from sympy import symbols, Matrix\n'), ((2032, 2051), 'math.tan', 'tan', (['steering_angle'], {}), '(steering_angle)\n', (2035, 2051), False, 'from math import sqrt, tan, cos, sin, atan2\n'), ((2085, 2104), 'math.tan', 'tan', (['steering_angle'], {}), '(steering_angle)\n', (2088, 2104), False, 'from math import sqrt, tan, cos, sin, atan2\n'), ((1749, 1766), 'numpy.dot', 'np.dot', (['F', 'self.P'], {}), '(F, self.P)\n', (1755, 1766), True, 'import numpy as np\n'), ((1778, 1790), 'numpy.dot', 'np.dot', (['V', 'M'], {}), '(V, M)\n', (1784, 1790), True, 'import numpy as np\n'), ((802, 825), 'sympy.sin', 'sympy.sin', (['(theta + beta)'], {}), '(theta + beta)\n', (811, 825), False, 'import sympy\n'), ((863, 886), 'sympy.cos', 'sympy.cos', (['(theta + beta)'], {}), '(theta + beta)\n', (872, 886), False, 'import sympy\n'), ((2349, 2357), 'math.cos', 'cos', (['hdg'], {}), '(hdg)\n', (2352, 2357), False, 'from math import sqrt, tan, cos, sin, atan2\n'), ((2394, 2402), 'math.sin', 'sin', (['hdg'], {}), '(hdg)\n', (2397, 2402), False, 'from math import sqrt, tan, cos, sin, atan2\n'), ((783, 799), 'sympy.sin', 'sympy.sin', (['theta'], {}), '(theta)\n', (792, 799), False, 'import sympy\n'), ((844, 860), 'sympy.cos', 'sympy.cos', (['theta'], {}), '(theta)\n', (853, 860), False, 'import sympy\n'), ((2146, 2154), 'math.sin', 'sin', (['hdg'], {}), '(hdg)\n', (2149, 2154), False, 'from math import sqrt, tan, cos, sin, atan2\n'), ((2159, 2174), 'math.sin', 'sin', (['(hdg + beta)'], {}), '(hdg + beta)\n', (2162, 2174), False, 'from math import sqrt, tan, cos, sin, atan2\n'), ((2208, 2216), 'math.cos', 'cos', (['hdg'], {}), '(hdg)\n', (2211, 2216), False, 'from math import sqrt, tan, cos, sin, atan2\n'), ((2221, 2236), 'math.cos', 'cos', (['(hdg + beta)'], {}), '(hdg + beta)\n', (2224, 2236), False, 'from math import sqrt, tan, cos, sin, atan2\n')] |
import numpy as np
import pandas as pd
import pyro
import pyro.distributions as dist
import torch
from pyro.nn import PyroModule
from scvi import _CONSTANTS
from scvi.data._anndata import get_from_registry
from scvi.nn import one_hot
# class NegativeBinomial(TorchDistributionMixin, ScVINegativeBinomial):
# pass
class LocationModelMultiExperimentLocationBackgroundNormLevelGeneAlphaPyroModel(PyroModule):
"""
Cell2location models the elements of :math:`D` as Negative Binomial distributed,
given an unobserved gene expression level (rate) :math:`mu` and a gene- and batch-specific
over-dispersion parameter :math:`\alpha_{e,g}` which accounts for unexplained variance:
.. math::
D_{s,g} \sim \mathtt{NB}(\mu_{s,g}, \alpha_{e,g})
The expression level of genes :math:`\mu_{s,g}` in the mRNA count space is modelled
as a linear function of expression signatures of reference cell types :math:`g_{f,g}`:
.. math::
\mu_{s,g} = (m_{g} \left (\sum_{f} {w_{s,f} \: g_{f,g}} \right) + s_{e,g}) y_{s}
Here, :math:`w_{s,f}` denotes regression weight of each reference signature :math:`f` at location :math:`s`,
which can be interpreted as the expected number of cells at location :math:`s`
that express reference signature :math:`f`;
:math:`g_{f,g}` denotes the reference signatures of cell types :math:`f` of each gene :math:`g`,
`cell_state_df` input ;
:math:`m_{g}` denotes a gene-specific scaling parameter which adjusts for global differences in sensitivity
between technologies (platform effect);
:math:`y_{s}` denotes a location/observation-specific scaling parameter which adjusts for differences in sensitivity
between observations and batches;
:math:`s_{e,g}` is additive component that account for gene- and location-specific shift,
such as due to contaminating or free-floating RNA.
To account for the similarity of location patterns across cell types, :math:`w_{s,f}` is modelled using
another layer of decomposition (factorization) using :math:`r={1, .., R}` groups of cell types,
that can be interpreted as cellular compartments or tissue zones. Unless stated otherwise, R is set to 50.
Corresponding graphical model can be found in supplementary methods:
https://www.biorxiv.org/content/10.1101/2020.11.15.378125v1.supplementary-material
Approximate Variational Inference is used to estimate the posterior distribution of all model parameters.
Estimation of absolute cell abundance `w_{s,f}` is guided using informed prior on the number of cells
(argument called `N_cells_per_location`). It is a tissue-level global estimate, which can be derived from histology
images (H&E or DAPI), ideally paired to the spatial expression data or at least representing the same tissue type.
This parameter can be estimated by manually counting nuclei in a 10-20 locations in the histology image
(e.g. using 10X Loupe browser), and computing the average cell abundance.
An appropriate setting of this prior is essential to inform the estimation of absolute cell type abundance values,
however, the model is robust to a range of similar values.
In settings where suitable histology images are not available, the size of capture regions relative to
the expected size of cells can be used to estimate `N_cells_per_location`.
The prior on detection efficiency per location :math:`y_s` is selected to discourage over-normalisation, such that
unless data has evidence of strong technical effect, the effect is assumed to be small and close to
the mean sensitivity for each batch :math:`y_e`:
.. math::
y_s ~ Gamma(detection_alpha, detection_alpha / y_e)
where y_e is unknown/latent average detection efficiency in each batch/experiment:
.. math::
y_e ~ Gamma(10, 10 / detection_mean)
"""
def __init__(
self,
n_obs,
n_vars,
n_factors,
n_batch,
cell_state_mat,
n_groups: int = 50,
detection_mean=1 / 2,
detection_alpha=200.0,
m_g_gene_level_prior={"mean": 1, "mean_var_ratio": 1.0, "alpha_mean": 3.0},
N_cells_per_location=8.0,
A_factors_per_location=7.0,
N_cells_mean_var_ratio=1.0,
alpha_g_phi_hyp_prior={"alpha": 9.0, "beta": 3.0},
gene_add_alpha_hyp_prior={"alpha": 9.0, "beta": 3.0},
gene_add_mean_hyp_prior={
"alpha": 1.0,
"beta": 100.0,
},
detection_hyp_prior={"mean_alpha": 10.0},
w_sf_mean_var_ratio=5.0,
):
super().__init__()
self.n_obs = n_obs
self.n_vars = n_vars
self.n_factors = n_factors
self.n_batch = n_batch
self.n_groups = n_groups
self.m_g_gene_level_prior = m_g_gene_level_prior
self.alpha_g_phi_hyp_prior = alpha_g_phi_hyp_prior
self.w_sf_mean_var_ratio = w_sf_mean_var_ratio
self.gene_add_alpha_hyp_prior = gene_add_alpha_hyp_prior
self.gene_add_mean_hyp_prior = gene_add_mean_hyp_prior
detection_hyp_prior["mean"] = detection_mean
detection_hyp_prior["alpha"] = detection_alpha
self.detection_hyp_prior = detection_hyp_prior
self.register_buffer(
"detection_hyp_prior_alpha",
torch.tensor(self.detection_hyp_prior["alpha"]),
)
self.register_buffer(
"detection_mean_hyp_prior_alpha",
torch.tensor(self.detection_hyp_prior["mean_alpha"]),
)
self.register_buffer(
"detection_mean_hyp_prior_beta",
torch.tensor(self.detection_hyp_prior["mean_alpha"] / self.detection_hyp_prior["mean"]),
)
# compute hyperparameters from mean and sd
self.register_buffer("m_g_mu_hyp", torch.tensor(self.m_g_gene_level_prior["mean"]))
self.register_buffer(
"m_g_mu_mean_var_ratio_hyp",
torch.tensor(self.m_g_gene_level_prior["mean_var_ratio"]),
)
self.register_buffer("m_g_alpha_hyp_mean", torch.tensor(self.m_g_gene_level_prior["alpha_mean"]))
self.cell_state_mat = cell_state_mat
self.register_buffer("cell_state", torch.tensor(cell_state_mat.T))
self.register_buffer("N_cells_per_location", torch.tensor(N_cells_per_location))
self.register_buffer("A_factors_per_location", torch.tensor(A_factors_per_location))
self.register_buffer("N_cells_mean_var_ratio", torch.tensor(N_cells_mean_var_ratio))
self.register_buffer(
"alpha_g_phi_hyp_prior_alpha",
torch.tensor(self.alpha_g_phi_hyp_prior["alpha"]),
)
self.register_buffer(
"alpha_g_phi_hyp_prior_beta",
torch.tensor(self.alpha_g_phi_hyp_prior["beta"]),
)
self.register_buffer(
"gene_add_alpha_hyp_prior_alpha",
torch.tensor(self.gene_add_alpha_hyp_prior["alpha"]),
)
self.register_buffer(
"gene_add_alpha_hyp_prior_beta",
torch.tensor(self.gene_add_alpha_hyp_prior["beta"]),
)
self.register_buffer(
"gene_add_mean_hyp_prior_alpha",
torch.tensor(self.gene_add_mean_hyp_prior["alpha"]),
)
self.register_buffer(
"gene_add_mean_hyp_prior_beta",
torch.tensor(self.gene_add_mean_hyp_prior["beta"]),
)
self.register_buffer("w_sf_mean_var_ratio_tensor", torch.tensor(self.w_sf_mean_var_ratio))
self.register_buffer("n_factors_tensor", torch.tensor(self.n_factors))
self.register_buffer("n_groups_tensor", torch.tensor(self.n_groups))
self.register_buffer("ones", torch.ones((1, 1)))
self.register_buffer("ones_1_n_groups", torch.ones((1, self.n_groups)))
self.register_buffer("ones_1_n_factors", torch.ones((1, self.n_factors)))
self.register_buffer("ones_n_batch_1", torch.ones((self.n_batch, 1)))
self.register_buffer("eps", torch.tensor(1e-8))
@staticmethod
def _get_fn_args_from_batch(tensor_dict):
x_data = tensor_dict[_CONSTANTS.X_KEY]
ind_x = tensor_dict["ind_x"].long().squeeze()
batch_index = tensor_dict[_CONSTANTS.BATCH_KEY]
return (x_data, ind_x, batch_index), {}
def create_plates(self, x_data, idx, batch_index):
return pyro.plate("obs_plate", size=self.n_obs, dim=-2, subsample=idx)
def list_obs_plate_vars(self):
"""Create a dictionary with:
1. "name" - the name of observation/minibatch plate;
2. "input" - indexes of model args to provide to encoder network when using amortised inference;
3. "sites" - dictionary with
keys - names of variables that belong to the observation plate (used to recognise
and merge posterior samples for minibatch variables)
values - the dimensions in non-plate axis of each variable (used to construct output
layer of encoder network when using amortised inference)
"""
return {
"name": "obs_plate",
"input": [0, 2], # expression data + (optional) batch index
"input_transform": [
torch.log1p,
lambda x: x,
], # how to transform input data before passing to NN
"sites": {
"w_sf": self.n_factors,
"detection_y_s": 1,
},
}
def forward(self, x_data, idx, batch_index):
obs2sample = one_hot(batch_index, self.n_batch)
obs_plate = self.create_plates(x_data, idx, batch_index)
# =====================Gene expression level scaling m_g======================= #
# Explains difference in sensitivity for each gene between single cell and spatial technology
m_g_mean = pyro.sample(
"m_g_mean",
dist.Gamma(
self.m_g_mu_mean_var_ratio_hyp * self.m_g_mu_hyp,
self.m_g_mu_mean_var_ratio_hyp,
)
.expand([1, 1])
.to_event(2),
) # (1, 1)
m_g_alpha_e_inv = pyro.sample(
"m_g_alpha_e_inv",
dist.Exponential(self.m_g_alpha_hyp_mean).expand([1, 1]).to_event(2),
) # (1, 1)
m_g_alpha_e = self.ones / m_g_alpha_e_inv.pow(2)
m_g = pyro.sample(
"m_g",
dist.Gamma(m_g_alpha_e, m_g_alpha_e / m_g_mean).expand([1, self.n_vars]).to_event(2), # self.m_g_mu_hyp)
) # (1, n_vars)
# =====================Cell abundances w_sf======================= #
# factorisation prior on w_sf models similarity in locations
# across cell types f and reflects the absolute scale of w_sf
n_cells_per_location = pyro.sample(
"n_cells_per_location",
dist.Gamma(
self.N_cells_per_location * self.N_cells_mean_var_ratio,
self.N_cells_mean_var_ratio,
),
)
a_factors_per_location = pyro.sample(
"a_factors_per_location",
dist.Gamma(self.A_factors_per_location, self.ones),
)
# cell group loadings
shape = self.ones_1_n_factors * a_factors_per_location / self.n_factors_tensor
rate = self.ones_1_n_factors / (n_cells_per_location / a_factors_per_location)
with obs_plate:
w_sf = pyro.sample(
"w_sf",
dist.Gamma(
shape,
rate,
),
) # (self.n_obs, self.n_factors)
# =====================Location-specific detection efficiency ======================= #
# y_s with hierarchical mean prior
detection_mean_y_e = pyro.sample(
"detection_mean_y_e",
dist.Gamma(
self.ones * self.detection_mean_hyp_prior_alpha,
self.ones * self.detection_mean_hyp_prior_beta,
)
.expand([self.n_batch, 1])
.to_event(2),
)
detection_hyp_prior_alpha = pyro.deterministic(
"detection_hyp_prior_alpha",
self.ones_n_batch_1 * self.detection_hyp_prior_alpha,
)
beta = (obs2sample @ detection_hyp_prior_alpha) / (obs2sample @ detection_mean_y_e)
with obs_plate:
detection_y_s = pyro.sample(
"detection_y_s",
dist.Gamma(obs2sample @ detection_hyp_prior_alpha, beta),
) # (self.n_obs, 1)
# =====================Gene-specific additive component ======================= #
# per gene molecule contribution that cannot be explained by
# cell state signatures (e.g. background, free-floating RNA)
s_g_gene_add_alpha_hyp = pyro.sample(
"s_g_gene_add_alpha_hyp",
dist.Gamma(self.gene_add_alpha_hyp_prior_alpha, self.gene_add_alpha_hyp_prior_beta),
)
s_g_gene_add_mean = pyro.sample(
"s_g_gene_add_mean",
dist.Gamma(
self.gene_add_mean_hyp_prior_alpha,
self.gene_add_mean_hyp_prior_beta,
)
.expand([self.n_batch, 1])
.to_event(2),
) # (self.n_batch)
s_g_gene_add_alpha_e_inv = pyro.sample(
"s_g_gene_add_alpha_e_inv",
dist.Exponential(s_g_gene_add_alpha_hyp).expand([self.n_batch, 1]).to_event(2),
) # (self.n_batch)
s_g_gene_add_alpha_e = self.ones / s_g_gene_add_alpha_e_inv.pow(2)
s_g_gene_add = pyro.sample(
"s_g_gene_add",
dist.Gamma(s_g_gene_add_alpha_e, s_g_gene_add_alpha_e / s_g_gene_add_mean)
.expand([self.n_batch, self.n_vars])
.to_event(2),
) # (self.n_batch, n_vars)
# =====================Gene-specific overdispersion ======================= #
alpha_g_phi_hyp = pyro.sample(
"alpha_g_phi_hyp",
dist.Gamma(self.alpha_g_phi_hyp_prior_alpha, self.alpha_g_phi_hyp_prior_beta),
)
alpha_g_inverse = pyro.sample(
"alpha_g_inverse",
dist.Exponential(alpha_g_phi_hyp).expand([self.n_batch, self.n_vars]).to_event(2),
) # (self.n_batch, self.n_vars)
# =====================Expected expression ======================= #
# expected expression
mu = ((w_sf @ self.cell_state) * m_g + (obs2sample @ s_g_gene_add)) * detection_y_s
alpha = obs2sample @ (self.ones / alpha_g_inverse.pow(2))
# convert mean and overdispersion to total count and logits
# total_count, logits = _convert_mean_disp_to_counts_logits(
# mu, alpha, eps=self.eps
# )
# =====================DATA likelihood ======================= #
# Likelihood (sampling distribution) of data_target & add overdispersion via NegativeBinomial
with obs_plate:
pyro.sample(
"data_target",
dist.GammaPoisson(concentration=alpha, rate=alpha / mu),
# dist.NegativeBinomial(total_count=total_count, logits=logits),
obs=x_data,
)
# =====================Compute mRNA count from each factor in locations ======================= #
with obs_plate:
mRNA = w_sf * (self.cell_state * m_g).sum(-1)
pyro.deterministic("u_sf_mRNA_factors", mRNA)
def compute_expected(self, samples, adata, ind_x=None):
r"""Compute expected expression of each gene in each location. Useful for evaluating how well
the model learned expression pattern of all genes in the data.
"""
if ind_x is None:
ind_x = np.arange(adata.n_obs).astype(int)
else:
ind_x = ind_x.astype(int)
obs2sample = get_from_registry(adata, _CONSTANTS.BATCH_KEY)
obs2sample = pd.get_dummies(obs2sample.flatten()).values[ind_x, :]
mu = (
np.dot(samples["w_sf"][ind_x, :], self.cell_state_mat.T) * samples["m_g"]
+ np.dot(obs2sample, samples["s_g_gene_add"])
) * samples["detection_y_s"][ind_x, :]
alpha = np.dot(obs2sample, 1 / np.power(samples["alpha_g_inverse"], 2))
return {"mu": mu, "alpha": alpha, "ind_x": ind_x}
| [
"pyro.distributions.Exponential",
"torch.ones",
"scvi.nn.one_hot",
"pyro.distributions.GammaPoisson",
"numpy.power",
"pyro.deterministic",
"torch.tensor",
"scvi.data._anndata.get_from_registry",
"pyro.distributions.Gamma",
"numpy.dot",
"numpy.arange",
"pyro.plate"
] | [((8385, 8448), 'pyro.plate', 'pyro.plate', (['"""obs_plate"""'], {'size': 'self.n_obs', 'dim': '(-2)', 'subsample': 'idx'}), "('obs_plate', size=self.n_obs, dim=-2, subsample=idx)\n", (8395, 8448), False, 'import pyro\n'), ((9542, 9576), 'scvi.nn.one_hot', 'one_hot', (['batch_index', 'self.n_batch'], {}), '(batch_index, self.n_batch)\n', (9549, 9576), False, 'from scvi.nn import one_hot\n'), ((12087, 12193), 'pyro.deterministic', 'pyro.deterministic', (['"""detection_hyp_prior_alpha"""', '(self.ones_n_batch_1 * self.detection_hyp_prior_alpha)'], {}), "('detection_hyp_prior_alpha', self.ones_n_batch_1 * self.\n detection_hyp_prior_alpha)\n", (12105, 12193), False, 'import pyro\n'), ((15812, 15858), 'scvi.data._anndata.get_from_registry', 'get_from_registry', (['adata', '_CONSTANTS.BATCH_KEY'], {}), '(adata, _CONSTANTS.BATCH_KEY)\n', (15829, 15858), False, 'from scvi.data._anndata import get_from_registry\n'), ((5345, 5392), 'torch.tensor', 'torch.tensor', (["self.detection_hyp_prior['alpha']"], {}), "(self.detection_hyp_prior['alpha'])\n", (5357, 5392), False, 'import torch\n'), ((5492, 5544), 'torch.tensor', 'torch.tensor', (["self.detection_hyp_prior['mean_alpha']"], {}), "(self.detection_hyp_prior['mean_alpha'])\n", (5504, 5544), False, 'import torch\n'), ((5643, 5735), 'torch.tensor', 'torch.tensor', (["(self.detection_hyp_prior['mean_alpha'] / self.detection_hyp_prior['mean'])"], {}), "(self.detection_hyp_prior['mean_alpha'] / self.\n detection_hyp_prior['mean'])\n", (5655, 5735), False, 'import torch\n'), ((5837, 5884), 'torch.tensor', 'torch.tensor', (["self.m_g_gene_level_prior['mean']"], {}), "(self.m_g_gene_level_prior['mean'])\n", (5849, 5884), False, 'import torch\n'), ((5969, 6026), 'torch.tensor', 'torch.tensor', (["self.m_g_gene_level_prior['mean_var_ratio']"], {}), "(self.m_g_gene_level_prior['mean_var_ratio'])\n", (5981, 6026), False, 'import torch\n'), ((6090, 6143), 'torch.tensor', 'torch.tensor', (["self.m_g_gene_level_prior['alpha_mean']"], {}), "(self.m_g_gene_level_prior['alpha_mean'])\n", (6102, 6143), False, 'import torch\n'), ((6234, 6264), 'torch.tensor', 'torch.tensor', (['cell_state_mat.T'], {}), '(cell_state_mat.T)\n', (6246, 6264), False, 'import torch\n'), ((6320, 6354), 'torch.tensor', 'torch.tensor', (['N_cells_per_location'], {}), '(N_cells_per_location)\n', (6332, 6354), False, 'import torch\n'), ((6411, 6447), 'torch.tensor', 'torch.tensor', (['A_factors_per_location'], {}), '(A_factors_per_location)\n', (6423, 6447), False, 'import torch\n'), ((6504, 6540), 'torch.tensor', 'torch.tensor', (['N_cells_mean_var_ratio'], {}), '(N_cells_mean_var_ratio)\n', (6516, 6540), False, 'import torch\n'), ((6628, 6677), 'torch.tensor', 'torch.tensor', (["self.alpha_g_phi_hyp_prior['alpha']"], {}), "(self.alpha_g_phi_hyp_prior['alpha'])\n", (6640, 6677), False, 'import torch\n'), ((6773, 6821), 'torch.tensor', 'torch.tensor', (["self.alpha_g_phi_hyp_prior['beta']"], {}), "(self.alpha_g_phi_hyp_prior['beta'])\n", (6785, 6821), False, 'import torch\n'), ((6921, 6973), 'torch.tensor', 'torch.tensor', (["self.gene_add_alpha_hyp_prior['alpha']"], {}), "(self.gene_add_alpha_hyp_prior['alpha'])\n", (6933, 6973), False, 'import torch\n'), ((7072, 7123), 'torch.tensor', 'torch.tensor', (["self.gene_add_alpha_hyp_prior['beta']"], {}), "(self.gene_add_alpha_hyp_prior['beta'])\n", (7084, 7123), False, 'import torch\n'), ((7222, 7273), 'torch.tensor', 'torch.tensor', (["self.gene_add_mean_hyp_prior['alpha']"], {}), "(self.gene_add_mean_hyp_prior['alpha'])\n", (7234, 7273), False, 'import torch\n'), ((7371, 7421), 'torch.tensor', 'torch.tensor', (["self.gene_add_mean_hyp_prior['beta']"], {}), "(self.gene_add_mean_hyp_prior['beta'])\n", (7383, 7421), False, 'import torch\n'), ((7493, 7531), 'torch.tensor', 'torch.tensor', (['self.w_sf_mean_var_ratio'], {}), '(self.w_sf_mean_var_ratio)\n', (7505, 7531), False, 'import torch\n'), ((7583, 7611), 'torch.tensor', 'torch.tensor', (['self.n_factors'], {}), '(self.n_factors)\n', (7595, 7611), False, 'import torch\n'), ((7661, 7688), 'torch.tensor', 'torch.tensor', (['self.n_groups'], {}), '(self.n_groups)\n', (7673, 7688), False, 'import torch\n'), ((7728, 7746), 'torch.ones', 'torch.ones', (['(1, 1)'], {}), '((1, 1))\n', (7738, 7746), False, 'import torch\n'), ((7796, 7826), 'torch.ones', 'torch.ones', (['(1, self.n_groups)'], {}), '((1, self.n_groups))\n', (7806, 7826), False, 'import torch\n'), ((7877, 7908), 'torch.ones', 'torch.ones', (['(1, self.n_factors)'], {}), '((1, self.n_factors))\n', (7887, 7908), False, 'import torch\n'), ((7957, 7986), 'torch.ones', 'torch.ones', (['(self.n_batch, 1)'], {}), '((self.n_batch, 1))\n', (7967, 7986), False, 'import torch\n'), ((8024, 8043), 'torch.tensor', 'torch.tensor', (['(1e-08)'], {}), '(1e-08)\n', (8036, 8043), False, 'import torch\n'), ((10847, 10948), 'pyro.distributions.Gamma', 'dist.Gamma', (['(self.N_cells_per_location * self.N_cells_mean_var_ratio)', 'self.N_cells_mean_var_ratio'], {}), '(self.N_cells_per_location * self.N_cells_mean_var_ratio, self.\n N_cells_mean_var_ratio)\n', (10857, 10948), True, 'import pyro.distributions as dist\n'), ((11099, 11149), 'pyro.distributions.Gamma', 'dist.Gamma', (['self.A_factors_per_location', 'self.ones'], {}), '(self.A_factors_per_location, self.ones)\n', (11109, 11149), True, 'import pyro.distributions as dist\n'), ((12847, 12935), 'pyro.distributions.Gamma', 'dist.Gamma', (['self.gene_add_alpha_hyp_prior_alpha', 'self.gene_add_alpha_hyp_prior_beta'], {}), '(self.gene_add_alpha_hyp_prior_alpha, self.\n gene_add_alpha_hyp_prior_beta)\n', (12857, 12935), True, 'import pyro.distributions as dist\n'), ((13965, 14042), 'pyro.distributions.Gamma', 'dist.Gamma', (['self.alpha_g_phi_hyp_prior_alpha', 'self.alpha_g_phi_hyp_prior_beta'], {}), '(self.alpha_g_phi_hyp_prior_alpha, self.alpha_g_phi_hyp_prior_beta)\n', (13975, 14042), True, 'import pyro.distributions as dist\n'), ((15366, 15411), 'pyro.deterministic', 'pyro.deterministic', (['"""u_sf_mRNA_factors"""', 'mRNA'], {}), "('u_sf_mRNA_factors', mRNA)\n", (15384, 15411), False, 'import pyro\n'), ((11463, 11486), 'pyro.distributions.Gamma', 'dist.Gamma', (['shape', 'rate'], {}), '(shape, rate)\n', (11473, 11486), True, 'import pyro.distributions as dist\n'), ((12431, 12487), 'pyro.distributions.Gamma', 'dist.Gamma', (['(obs2sample @ detection_hyp_prior_alpha)', 'beta'], {}), '(obs2sample @ detection_hyp_prior_alpha, beta)\n', (12441, 12487), True, 'import pyro.distributions as dist\n'), ((14984, 15039), 'pyro.distributions.GammaPoisson', 'dist.GammaPoisson', ([], {'concentration': 'alpha', 'rate': '(alpha / mu)'}), '(concentration=alpha, rate=alpha / mu)\n', (15001, 15039), True, 'import pyro.distributions as dist\n'), ((16049, 16092), 'numpy.dot', 'np.dot', (['obs2sample', "samples['s_g_gene_add']"], {}), "(obs2sample, samples['s_g_gene_add'])\n", (16055, 16092), True, 'import numpy as np\n'), ((16179, 16218), 'numpy.power', 'np.power', (["samples['alpha_g_inverse']", '(2)'], {}), "(samples['alpha_g_inverse'], 2)\n", (16187, 16218), True, 'import numpy as np\n'), ((15704, 15726), 'numpy.arange', 'np.arange', (['adata.n_obs'], {}), '(adata.n_obs)\n', (15713, 15726), True, 'import numpy as np\n'), ((15961, 16017), 'numpy.dot', 'np.dot', (["samples['w_sf'][ind_x, :]", 'self.cell_state_mat.T'], {}), "(samples['w_sf'][ind_x, :], self.cell_state_mat.T)\n", (15967, 16017), True, 'import numpy as np\n'), ((9904, 10001), 'pyro.distributions.Gamma', 'dist.Gamma', (['(self.m_g_mu_mean_var_ratio_hyp * self.m_g_mu_hyp)', 'self.m_g_mu_mean_var_ratio_hyp'], {}), '(self.m_g_mu_mean_var_ratio_hyp * self.m_g_mu_hyp, self.\n m_g_mu_mean_var_ratio_hyp)\n', (9914, 10001), True, 'import pyro.distributions as dist\n'), ((10201, 10242), 'pyro.distributions.Exponential', 'dist.Exponential', (['self.m_g_alpha_hyp_mean'], {}), '(self.m_g_alpha_hyp_mean)\n', (10217, 10242), True, 'import pyro.distributions as dist\n'), ((10407, 10454), 'pyro.distributions.Gamma', 'dist.Gamma', (['m_g_alpha_e', '(m_g_alpha_e / m_g_mean)'], {}), '(m_g_alpha_e, m_g_alpha_e / m_g_mean)\n', (10417, 10454), True, 'import pyro.distributions as dist\n'), ((11821, 11932), 'pyro.distributions.Gamma', 'dist.Gamma', (['(self.ones * self.detection_mean_hyp_prior_alpha)', '(self.ones * self.detection_mean_hyp_prior_beta)'], {}), '(self.ones * self.detection_mean_hyp_prior_alpha, self.ones *\n self.detection_mean_hyp_prior_beta)\n', (11831, 11932), True, 'import pyro.distributions as dist\n'), ((13028, 13114), 'pyro.distributions.Gamma', 'dist.Gamma', (['self.gene_add_mean_hyp_prior_alpha', 'self.gene_add_mean_hyp_prior_beta'], {}), '(self.gene_add_mean_hyp_prior_alpha, self.\n gene_add_mean_hyp_prior_beta)\n', (13038, 13114), True, 'import pyro.distributions as dist\n'), ((13350, 13390), 'pyro.distributions.Exponential', 'dist.Exponential', (['s_g_gene_add_alpha_hyp'], {}), '(s_g_gene_add_alpha_hyp)\n', (13366, 13390), True, 'import pyro.distributions as dist\n'), ((13610, 13684), 'pyro.distributions.Gamma', 'dist.Gamma', (['s_g_gene_add_alpha_e', '(s_g_gene_add_alpha_e / s_g_gene_add_mean)'], {}), '(s_g_gene_add_alpha_e, s_g_gene_add_alpha_e / s_g_gene_add_mean)\n', (13620, 13684), True, 'import pyro.distributions as dist\n'), ((14136, 14169), 'pyro.distributions.Exponential', 'dist.Exponential', (['alpha_g_phi_hyp'], {}), '(alpha_g_phi_hyp)\n', (14152, 14169), True, 'import pyro.distributions as dist\n')] |
import numpy as np
import pytest
import taichi as ti
from tests import test_utils
def with_data_type(dt):
val = ti.field(ti.i32)
n = 4
ti.root.dense(ti.i, n).place(val)
@ti.kernel
def test_numpy(arr: ti.ext_arr()):
for i in range(n):
arr[i] = arr[i]**2
a = np.array([4, 8, 1, 24], dtype=dt)
for i in range(n):
a[i] = i * 2
test_numpy(a)
for i in range(n):
assert a[i] == i * i * 4
@test_utils.test()
def test_numpy_f32():
with_data_type(np.float32)
@test_utils.test(require=ti.extension.data64)
def test_numpy_f64():
with_data_type(np.float64)
@test_utils.test()
def test_numpy_i32():
with_data_type(np.int32)
@test_utils.test(require=ti.extension.data64)
def test_numpy_i64():
with_data_type(np.int64)
@test_utils.test()
def test_numpy_2d():
val = ti.field(ti.i32)
n = 4
m = 7
ti.root.dense(ti.i, n).dense(ti.j, m).place(val)
@ti.kernel
def test_numpy(arr: ti.ext_arr()):
for i in range(n):
for j in range(m):
arr[i, j] += i + j
a = np.empty(shape=(n, m), dtype=np.int32)
for i in range(n):
for j in range(m):
a[i, j] = i * j
test_numpy(a)
for i in range(n):
for j in range(m):
assert a[i, j] == i * j + i + j
@test_utils.test()
def test_numpy_2d_transpose():
val = ti.field(ti.i32)
n = 8
m = 8
ti.root.dense(ti.ij, (n, m)).place(val)
@ti.kernel
def test_numpy(arr: ti.ext_arr()):
for i in ti.grouped(val):
val[i] = arr[i]
a = np.empty(shape=(n, m), dtype=np.int32)
for i in range(n):
for j in range(m):
a[i, j] = i * j + i * 4
test_numpy(a.transpose())
for i in range(n):
for j in range(m):
assert val[i, j] == i * j + j * 4
@test_utils.test()
def test_numpy_3d():
val = ti.field(ti.i32)
n = 4
m = 7
p = 11
ti.root.dense(ti.i, n).dense(ti.j, m).dense(ti.k, p).place(val)
@ti.kernel
def test_numpy(arr: ti.ext_arr()):
for i in range(n):
for j in range(m):
for k in range(p):
arr[i, j, k] += i + j + k * 2
a = np.empty(shape=(n, m, p), dtype=np.int32)
for i in range(n):
for j in range(m):
for k in range(p):
a[i, j, k] = i * j * (k + 1)
test_numpy(a)
for i in range(n):
for j in range(m):
for k in range(p):
assert a[i, j, k] == i * j * (k + 1) + i + j + k * 2
@test_utils.test()
def test_numpy_3d_error():
val = ti.field(ti.i32)
n = 4
m = 7
p = 11
ti.root.dense(ti.i, n).dense(ti.j, m).dense(ti.k, p).place(val)
@ti.kernel
def test_numpy(arr: ti.ext_arr()):
for i in range(n):
for j in range(m):
for k in range(p):
arr[i, j] += i + j + k * 2
a = np.empty(shape=(n, m, p), dtype=np.int32)
with pytest.raises(ti.TaichiCompilationError):
test_numpy(a)
@test_utils.test()
def test_numpy_multiple_external_arrays():
n = 4
@ti.kernel
def test_numpy(a: ti.ext_arr(), b: ti.ext_arr()):
for i in range(n):
a[i] = a[i] * b[i]
b[i] = a[i] + b[i]
a = np.array([4, 8, 1, 24], dtype=np.int32)
b = np.array([5, 6, 12, 3], dtype=np.int32)
c = a * b
d = c + b
test_numpy(a, b)
for i in range(n):
assert a[i] == c[i]
assert b[i] == d[i]
@test_utils.test()
def test_index_mismatch():
with pytest.raises(AssertionError):
val = ti.field(ti.i32, shape=(1, 2, 3))
val[0, 0] = 1
@test_utils.test()
def test_numpy_zero():
@ti.kernel
def test_numpy(arr: ti.ext_arr()):
pass
test_numpy(np.empty(shape=(0), dtype=np.int32))
test_numpy(np.empty(shape=(0, 5), dtype=np.int32))
test_numpy(np.empty(shape=(5, 0), dtype=np.int32))
@test_utils.test()
def test_numpy_struct_for():
@ti.kernel
def func1(a: ti.any_arr()):
for i, j in a:
a[i, j] = i + j
m = np.zeros((123, 456), dtype=np.int32)
func1(m)
for i in range(123):
for j in range(456):
assert m[i, j] == i + j
@ti.kernel
def func2(a: ti.any_arr()):
for I in ti.grouped(a):
a[I] = I.sum()
n = np.zeros((98, 76, 54), dtype=np.int32)
func2(n)
for i, j, k in ti.ndrange(98, 76, 54):
assert n[i, j, k] == i + j + k
| [
"taichi.ndrange",
"taichi.field",
"tests.test_utils.test",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"taichi.grouped",
"pytest.raises",
"taichi.ext_arr",
"taichi.root.dense",
"taichi.any_arr"
] | [((466, 483), 'tests.test_utils.test', 'test_utils.test', ([], {}), '()\n', (481, 483), False, 'from tests import test_utils\n'), ((540, 584), 'tests.test_utils.test', 'test_utils.test', ([], {'require': 'ti.extension.data64'}), '(require=ti.extension.data64)\n', (555, 584), False, 'from tests import test_utils\n'), ((641, 658), 'tests.test_utils.test', 'test_utils.test', ([], {}), '()\n', (656, 658), False, 'from tests import test_utils\n'), ((713, 757), 'tests.test_utils.test', 'test_utils.test', ([], {'require': 'ti.extension.data64'}), '(require=ti.extension.data64)\n', (728, 757), False, 'from tests import test_utils\n'), ((812, 829), 'tests.test_utils.test', 'test_utils.test', ([], {}), '()\n', (827, 829), False, 'from tests import test_utils\n'), ((1345, 1362), 'tests.test_utils.test', 'test_utils.test', ([], {}), '()\n', (1360, 1362), False, 'from tests import test_utils\n'), ((1870, 1887), 'tests.test_utils.test', 'test_utils.test', ([], {}), '()\n', (1885, 1887), False, 'from tests import test_utils\n'), ((2586, 2603), 'tests.test_utils.test', 'test_utils.test', ([], {}), '()\n', (2601, 2603), False, 'from tests import test_utils\n'), ((3082, 3099), 'tests.test_utils.test', 'test_utils.test', ([], {}), '()\n', (3097, 3099), False, 'from tests import test_utils\n'), ((3542, 3559), 'tests.test_utils.test', 'test_utils.test', ([], {}), '()\n', (3557, 3559), False, 'from tests import test_utils\n'), ((3700, 3717), 'tests.test_utils.test', 'test_utils.test', ([], {}), '()\n', (3715, 3717), False, 'from tests import test_utils\n'), ((3974, 3991), 'tests.test_utils.test', 'test_utils.test', ([], {}), '()\n', (3989, 3991), False, 'from tests import test_utils\n'), ((119, 135), 'taichi.field', 'ti.field', (['ti.i32'], {}), '(ti.i32)\n', (127, 135), True, 'import taichi as ti\n'), ((308, 341), 'numpy.array', 'np.array', (['[4, 8, 1, 24]'], {'dtype': 'dt'}), '([4, 8, 1, 24], dtype=dt)\n', (316, 341), True, 'import numpy as np\n'), ((861, 877), 'taichi.field', 'ti.field', (['ti.i32'], {}), '(ti.i32)\n', (869, 877), True, 'import taichi as ti\n'), ((1110, 1148), 'numpy.empty', 'np.empty', ([], {'shape': '(n, m)', 'dtype': 'np.int32'}), '(shape=(n, m), dtype=np.int32)\n', (1118, 1148), True, 'import numpy as np\n'), ((1404, 1420), 'taichi.field', 'ti.field', (['ti.i32'], {}), '(ti.i32)\n', (1412, 1420), True, 'import taichi as ti\n'), ((1613, 1651), 'numpy.empty', 'np.empty', ([], {'shape': '(n, m)', 'dtype': 'np.int32'}), '(shape=(n, m), dtype=np.int32)\n', (1621, 1651), True, 'import numpy as np\n'), ((1919, 1935), 'taichi.field', 'ti.field', (['ti.i32'], {}), '(ti.i32)\n', (1927, 1935), True, 'import taichi as ti\n'), ((2244, 2285), 'numpy.empty', 'np.empty', ([], {'shape': '(n, m, p)', 'dtype': 'np.int32'}), '(shape=(n, m, p), dtype=np.int32)\n', (2252, 2285), True, 'import numpy as np\n'), ((2641, 2657), 'taichi.field', 'ti.field', (['ti.i32'], {}), '(ti.i32)\n', (2649, 2657), True, 'import taichi as ti\n'), ((2963, 3004), 'numpy.empty', 'np.empty', ([], {'shape': '(n, m, p)', 'dtype': 'np.int32'}), '(shape=(n, m, p), dtype=np.int32)\n', (2971, 3004), True, 'import numpy as np\n'), ((3322, 3361), 'numpy.array', 'np.array', (['[4, 8, 1, 24]'], {'dtype': 'np.int32'}), '([4, 8, 1, 24], dtype=np.int32)\n', (3330, 3361), True, 'import numpy as np\n'), ((3370, 3409), 'numpy.array', 'np.array', (['[5, 6, 12, 3]'], {'dtype': 'np.int32'}), '([5, 6, 12, 3], dtype=np.int32)\n', (3378, 3409), True, 'import numpy as np\n'), ((4128, 4164), 'numpy.zeros', 'np.zeros', (['(123, 456)'], {'dtype': 'np.int32'}), '((123, 456), dtype=np.int32)\n', (4136, 4164), True, 'import numpy as np\n'), ((4384, 4422), 'numpy.zeros', 'np.zeros', (['(98, 76, 54)'], {'dtype': 'np.int32'}), '((98, 76, 54), dtype=np.int32)\n', (4392, 4422), True, 'import numpy as np\n'), ((4455, 4477), 'taichi.ndrange', 'ti.ndrange', (['(98)', '(76)', '(54)'], {}), '(98, 76, 54)\n', (4465, 4477), True, 'import taichi as ti\n'), ((1559, 1574), 'taichi.grouped', 'ti.grouped', (['val'], {}), '(val)\n', (1569, 1574), True, 'import taichi as ti\n'), ((3015, 3055), 'pytest.raises', 'pytest.raises', (['ti.TaichiCompilationError'], {}), '(ti.TaichiCompilationError)\n', (3028, 3055), False, 'import pytest\n'), ((3596, 3625), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3609, 3625), False, 'import pytest\n'), ((3641, 3674), 'taichi.field', 'ti.field', (['ti.i32'], {'shape': '(1, 2, 3)'}), '(ti.i32, shape=(1, 2, 3))\n', (3649, 3674), True, 'import taichi as ti\n'), ((3824, 3857), 'numpy.empty', 'np.empty', ([], {'shape': '(0)', 'dtype': 'np.int32'}), '(shape=0, dtype=np.int32)\n', (3832, 3857), True, 'import numpy as np\n'), ((3876, 3914), 'numpy.empty', 'np.empty', ([], {'shape': '(0, 5)', 'dtype': 'np.int32'}), '(shape=(0, 5), dtype=np.int32)\n', (3884, 3914), True, 'import numpy as np\n'), ((3931, 3969), 'numpy.empty', 'np.empty', ([], {'shape': '(5, 0)', 'dtype': 'np.int32'}), '(shape=(5, 0), dtype=np.int32)\n', (3939, 3969), True, 'import numpy as np\n'), ((4333, 4346), 'taichi.grouped', 'ti.grouped', (['a'], {}), '(a)\n', (4343, 4346), True, 'import taichi as ti\n'), ((152, 174), 'taichi.root.dense', 'ti.root.dense', (['ti.i', 'n'], {}), '(ti.i, n)\n', (165, 174), True, 'import taichi as ti\n'), ((226, 238), 'taichi.ext_arr', 'ti.ext_arr', ([], {}), '()\n', (236, 238), True, 'import taichi as ti\n'), ((993, 1005), 'taichi.ext_arr', 'ti.ext_arr', ([], {}), '()\n', (1003, 1005), True, 'import taichi as ti\n'), ((1447, 1475), 'taichi.root.dense', 'ti.root.dense', (['ti.ij', '(n, m)'], {}), '(ti.ij, (n, m))\n', (1460, 1475), True, 'import taichi as ti\n'), ((1527, 1539), 'taichi.ext_arr', 'ti.ext_arr', ([], {}), '()\n', (1537, 1539), True, 'import taichi as ti\n'), ((2077, 2089), 'taichi.ext_arr', 'ti.ext_arr', ([], {}), '()\n', (2087, 2089), True, 'import taichi as ti\n'), ((2799, 2811), 'taichi.ext_arr', 'ti.ext_arr', ([], {}), '()\n', (2809, 2811), True, 'import taichi as ti\n'), ((3192, 3204), 'taichi.ext_arr', 'ti.ext_arr', ([], {}), '()\n', (3202, 3204), True, 'import taichi as ti\n'), ((3209, 3221), 'taichi.ext_arr', 'ti.ext_arr', ([], {}), '()\n', (3219, 3221), True, 'import taichi as ti\n'), ((3780, 3792), 'taichi.ext_arr', 'ti.ext_arr', ([], {}), '()\n', (3790, 3792), True, 'import taichi as ti\n'), ((4053, 4065), 'taichi.any_arr', 'ti.any_arr', ([], {}), '()\n', (4063, 4065), True, 'import taichi as ti\n'), ((4301, 4313), 'taichi.any_arr', 'ti.any_arr', ([], {}), '()\n', (4311, 4313), True, 'import taichi as ti\n'), ((904, 926), 'taichi.root.dense', 'ti.root.dense', (['ti.i', 'n'], {}), '(ti.i, n)\n', (917, 926), True, 'import taichi as ti\n'), ((1973, 1995), 'taichi.root.dense', 'ti.root.dense', (['ti.i', 'n'], {}), '(ti.i, n)\n', (1986, 1995), True, 'import taichi as ti\n'), ((2695, 2717), 'taichi.root.dense', 'ti.root.dense', (['ti.i', 'n'], {}), '(ti.i, n)\n', (2708, 2717), True, 'import taichi as ti\n')] |
# Python imports
from collections.abc import Sequence
import numpy as np
''' StateClass.py: Contains the State Class. '''
class State(Sequence):
''' Abstract State class '''
def __init__(self, data=[], is_terminal=False):
self.data = data
self._is_terminal = is_terminal
def features(self):
'''
Summary
Used by function approximators to represent the state.
Override this method in State subclasses to have functiona
approximators use a different set of features.
Returns:
(iterable)
'''
return np.array(self.data).flatten()
def get_data(self):
return self.data
def get_num_feats(self):
return len(self.features())
def is_terminal(self):
return self._is_terminal
def set_terminal(self, is_term=True):
self._is_terminal = is_term
def __hash__(self):
if type(self.data).__module__ == np.__name__:
# Numpy arrays
return hash(str(self.data))
elif self.data.__hash__ is None:
return hash(tuple(self.data))
else:
return hash(self.data)
def __str__(self):
return "s." + str(self.data)
def __eq__(self, other):
if isinstance(other, State):
return self.data == other.data
return False
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
| [
"numpy.array"
] | [((616, 635), 'numpy.array', 'np.array', (['self.data'], {}), '(self.data)\n', (624, 635), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import numpy as np
import argparse
import random
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.utils.data
import torchvision.transforms as transforms
from torch.autograd import Variable
import utils
from utils import PointLoss
from utils import distance_squre
import data_utils as d_utils
import ModelNet40Loader
import shapenet_part_loader
from model_PFNet import _netlocalD,_netG
from test_debugged.test import pointnet2_cls_ssg as pointnet2
parser = argparse.ArgumentParser()
#parser.add_argument('--dataset', default='ModelNet40', help='ModelNet10|ModelNet40|ShapeNet')
parser.add_argument('--dataroot', default='dataset/train', help='path to dataset')
parser.add_argument('--workers', type=int,default=0, help='number of data loading workers')
parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
parser.add_argument('--pnum', type=int, default=2048, help='the point number of a sample')
parser.add_argument('--crop_point_num',type=int,default=512,help='0 means do not use else use with this weight')
parser.add_argument('--nc', type=int, default=3)
parser.add_argument('--niter', type=int, default=300, help='number of epochs to train for')
parser.add_argument('--weight_decay', type=float, default=0.001)
parser.add_argument('--learning_rate', default=0.0002, type=float, help='learning rate in training')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')
parser.add_argument('--cuda', type = bool, default = False, help='enables cuda')
parser.add_argument('--ngpu', type=int, default=2, help='number of GPUs to use')
parser.add_argument('--netG', default='Checkpoint/point_netG.pth', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('--drop',type=float,default=0.2)
parser.add_argument('--num_scales',type=int,default=3,help='number of scales')
parser.add_argument('--point_scales_list',type=list,default=[2048,1024,512],help='number of points in each scales')
parser.add_argument('--each_scales_size',type=int,default=1,help='each scales size')
parser.add_argument('--wtl2',type=float,default=0.9,help='0 means do not use else use with this weight')
parser.add_argument('--cropmethod', default = 'random_center', help = 'random|center|random_center')
opt = parser.parse_args()
print(opt)
def farthest_point_sample(point, npoint):
"""
Input:
xyz: pointcloud data, [N, D]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [npoint, D]
"""
N, D = point.shape
xyz = point[:,:3]
centroids = np.zeros((npoint,))
distance = np.ones((N,)) * 1e10
farthest = np.random.randint(0, N)
for i in range(npoint):
centroids[i] = farthest
centroid = xyz[farthest, :]
dist = np.sum((xyz - centroid) ** 2, -1)
mask = dist < distance
distance[mask] = dist[mask]
farthest = np.argmax(distance, -1)
point = point[centroids.astype(np.int32)]
return point
def rs(point, npoint):
sample_idx = random.sample(range(len(point)),npoint)
new_point = point[sample_idx]
return new_point
def distance_squre1(p1,p2):
return (p1[0]-p2[0])**2+(p1[1]-p2[1])**2+(p1[2]-p2[2])**2
transforms = transforms.Compose(
[
d_utils.PointcloudToTensor(),
]
)
test_dset = ModelNet40Loader.ModelNet40Cls(opt.pnum, train=False, transforms=transforms, download = False)
test_dataloader = torch.utils.data.DataLoader(test_dset, batch_size=opt.batchSize,
shuffle=False,num_workers = int(opt.workers))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
experiment_dir = './test_debugged/test/log/classification/pointnet2_ssg_wo_normals'
classifier = pointnet2.get_model()
classifier = classifier.cuda()
checkpoint = torch.load(str(experiment_dir) + '/checkpoints/ckpt.pt')
classifier.load_state_dict(checkpoint)
classifier = classifier.eval().eval()
acc_dict = {}
name_id_map = {}
f = open('./modelnet40_ply_hdf5_2048/shape_names.txt')
for i in range(40):
cls_name = f.readline().strip()
name_id_map[i] = cls_name
acc_dict[cls_name] = {'crop_acc':[], 'complete_acc':[]}
n = 0
crop_acc = []
complete_acc = []
center_id = 9
for i, data in enumerate(test_dataloader, 0):
n = n + 1
print(i)
real_point, target = data
np_crop = np.loadtxt('test_example/'+'%02d/'%(center_id)+str(n)+'_'+'crop_label'+str(target.item())+'.txt', delimiter=';')
np_fake = np.loadtxt('test_example/'+'%02d/'%(center_id)+str(n)+'_'+'crop_label'+str(target.item())+'.txt', delimiter=';')
np_real = np.loadtxt('test_example/'+'%02d/'%(center_id)+str(n)+'_'+'crop_label'+str(target.item())+'.txt', delimiter=';')
# np.savetxt('test_example/crop_txt_l'+str(target.item())+'_'+str(n)+'.txt', np_crop, fmt = "%f,%f,%f")
# np.savetxt('test_example/fake_txt_l'+str(target.item())+'_'+str(n)+'.txt', np_fake, fmt = "%f,%f,%f")
# np.savetxt('test_example/real_txt_l'+str(target.item())+'_'+str(n)+'.txt', np_real, fmt = "%f,%f,%f")
np_crop = np.array(np_crop)
np_completed = np.vstack((np_crop,np_fake))
# points = farthest_point_sample(np.array(np_crop), 1024)
# points = rs(np_crop, 1024)
points = rs((np_crop), 1024)
points = torch.Tensor(points).cuda().unsqueeze(0)
points = points.transpose(2, 1)
# print(points.shape) # 1x3xn
pred, _ = classifier(points)
pred_choice = pred.data.max(1)[1]
if target.item() == pred_choice.item():
crop_acc.append(1)
acc_dict[name_id_map[target.item()]]['crop_acc'].append(1)
else:
crop_acc.append(0)
acc_dict[name_id_map[target.item()]]['crop_acc'].append(0)
points = rs((np_completed), 1024)
points = torch.Tensor(points).cuda().unsqueeze(0)
points = points.transpose(2, 1)
# print(points.shape) # 1x3xn
pred, _ = classifier(points)
pred_choice = pred.data.max(1)[1]
if target.item() == pred_choice.item():
complete_acc.append(1)
acc_dict[name_id_map[target.item()]]['complete_acc'].append(1)
else:
complete_acc.append(0)
acc_dict[name_id_map[target.item()]]['complete_acc'].append(0)
# print('target: ', target.item(), 'p++ prediction: ', pred_choice.item())
print('center id: ', center_id)
print('crop acc: ', sum(crop_acc)/len(crop_acc))
print('complete acc: ', sum(complete_acc)/len(complete_acc))
for key in acc_dict:
crop_acc = acc_dict[key]['crop_acc']
complete_acc = acc_dict[key]['complete_acc']
print(key,' crop_acc: ',sum(crop_acc)/len(crop_acc),' complete_acc: ', sum(complete_acc)/len(complete_acc)) | [
"numpy.ones",
"argparse.ArgumentParser",
"torch.Tensor",
"numpy.argmax",
"ModelNet40Loader.ModelNet40Cls",
"numpy.array",
"numpy.random.randint",
"numpy.zeros",
"numpy.sum",
"numpy.vstack",
"data_utils.PointcloudToTensor",
"torch.cuda.is_available",
"test_debugged.test.pointnet2_cls_ssg.get_... | [((567, 592), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (590, 592), False, 'import argparse\n'), ((3560, 3656), 'ModelNet40Loader.ModelNet40Cls', 'ModelNet40Loader.ModelNet40Cls', (['opt.pnum'], {'train': '(False)', 'transforms': 'transforms', 'download': '(False)'}), '(opt.pnum, train=False, transforms=transforms,\n download=False)\n', (3590, 3656), False, 'import ModelNet40Loader\n'), ((3997, 4018), 'test_debugged.test.pointnet2_cls_ssg.get_model', 'pointnet2.get_model', ([], {}), '()\n', (4016, 4018), True, 'from test_debugged.test import pointnet2_cls_ssg as pointnet2\n'), ((2820, 2839), 'numpy.zeros', 'np.zeros', (['(npoint,)'], {}), '((npoint,))\n', (2828, 2839), True, 'import numpy as np\n'), ((2891, 2914), 'numpy.random.randint', 'np.random.randint', (['(0)', 'N'], {}), '(0, N)\n', (2908, 2914), True, 'import numpy as np\n'), ((5311, 5328), 'numpy.array', 'np.array', (['np_crop'], {}), '(np_crop)\n', (5319, 5328), True, 'import numpy as np\n'), ((5348, 5377), 'numpy.vstack', 'np.vstack', (['(np_crop, np_fake)'], {}), '((np_crop, np_fake))\n', (5357, 5377), True, 'import numpy as np\n'), ((2855, 2868), 'numpy.ones', 'np.ones', (['(N,)'], {}), '((N,))\n', (2862, 2868), True, 'import numpy as np\n'), ((3026, 3059), 'numpy.sum', 'np.sum', (['((xyz - centroid) ** 2)', '(-1)'], {}), '((xyz - centroid) ** 2, -1)\n', (3032, 3059), True, 'import numpy as np\n'), ((3146, 3169), 'numpy.argmax', 'np.argmax', (['distance', '(-1)'], {}), '(distance, -1)\n', (3155, 3169), True, 'import numpy as np\n'), ((3507, 3535), 'data_utils.PointcloudToTensor', 'd_utils.PointcloudToTensor', ([], {}), '()\n', (3533, 3535), True, 'import data_utils as d_utils\n'), ((3860, 3885), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3883, 3885), False, 'import torch\n'), ((5518, 5538), 'torch.Tensor', 'torch.Tensor', (['points'], {}), '(points)\n', (5530, 5538), False, 'import torch\n'), ((5994, 6014), 'torch.Tensor', 'torch.Tensor', (['points'], {}), '(points)\n', (6006, 6014), False, 'import torch\n')] |
"""Module for Testing the InVEST Wave Energy module."""
import unittest
import tempfile
import shutil
import os
import re
import numpy
import numpy.testing
from osgeo import gdal
from osgeo import osr, ogr
from shapely.geometry import Polygon
from shapely.geometry import Point
import pygeoprocessing.testing
from pygeoprocessing.testing import sampledata
REGRESSION_DATA = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-test-data', 'wave_energy')
SAMPLE_DATA = os.path.join(REGRESSION_DATA, 'input')
def _make_empty_files(workspace_dir):
"""Within workspace, make intermediate and output folders with dummy files.
Parameters:
workspace_dir: path to workspace for creating intermediate/output folder.
Returns:
None.
"""
intermediate_files = [
'WEM_InputOutput_Pts.shp', 'aoi_clipped_to_extract_path.shp'
]
raster_files = [
'wp_rc.tif', 'wp_kw.tif', 'capwe_rc.tif', 'capwe_mwh.tif',
'npv_rc.tif', 'npv_usd.tif'
]
vector_files = ['GridPts_prj.shp', 'LandPts_prj.shp']
table_files = ['capwe_rc.csv', 'wp_rc.csv', 'npv_rc.csv']
output_files = raster_files + vector_files + table_files
for folder, folder_files in zip(['intermediate', 'output'],
[intermediate_files, output_files]):
folder_path = os.path.join(workspace_dir, folder)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for file_name in folder_files:
with open(os.path.join(folder_path, file_name), 'wb') as open_file:
open_file.write('')
class WaveEnergyUnitTests(unittest.TestCase):
"""Unit tests for the Wave Energy module."""
def setUp(self):
"""Overriding setUp function to create temp workspace directory."""
# this lets us delete the workspace after its done no matter the
# the rest result
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
"""Overriding tearDown function to remove temporary directory."""
shutil.rmtree(self.workspace_dir)
def test_pixel_size_based_on_coordinate_transform(self):
"""WaveEnergy: testing '_pixel_size_based_on_coordinate_transform' fn"""
from natcap.invest import wave_energy
srs = sampledata.SRS_WILLAMETTE
srs_wkt = srs.projection
spat_ref = osr.SpatialReference()
spat_ref.ImportFromWkt(srs_wkt)
# Define a Lat/Long WGS84 projection
epsg_id = 4326
reference = osr.SpatialReference()
reference.ImportFromEPSG(epsg_id)
# Get projection as WKT
latlong_proj = reference.ExportToWkt()
# Set origin to use for setting up geometries / geotransforms
latlong_origin = (-70.5, 42.5)
# Pixel size helper for defining lat/long pixel size
def pixel_size(x):
return (x, -1. * x)
# Get a point from the clipped data object to use later in helping
# determine proper pixel size
matrix = numpy.array([[1, 1, 1, 1], [1, 1, 1, 1]])
input_path = os.path.join(self.workspace_dir, 'input_raster.tif')
# Create raster to use as testing input
raster_path = pygeoprocessing.testing.create_raster_on_disk(
[matrix],
latlong_origin,
latlong_proj,
-1.0,
pixel_size(0.033333),
filename=input_path)
raster_gt = pygeoprocessing.geoprocessing.get_raster_info(raster_path)[
'geotransform']
point = (raster_gt[0], raster_gt[3])
raster_wkt = latlong_proj
# Create a Spatial Reference from the rasters WKT
raster_sr = osr.SpatialReference()
raster_sr.ImportFromWkt(raster_wkt)
# A coordinate transformation to help get the proper pixel size of
# the reprojected raster
coord_trans = osr.CoordinateTransformation(raster_sr, spat_ref)
# Call the function to test
result = wave_energy._pixel_size_based_on_coordinate_transform(
raster_path, coord_trans, point)
expected_res = (5553.933063, -1187.370813)
# Compare
for res, exp in zip(result, expected_res):
pygeoprocessing.testing.assert_close(res, exp)
def test_count_pixels_groups(self):
"""WaveEnergy: testing '_count_pixels_groups' function."""
from natcap.invest import wave_energy
raster_path = os.path.join(self.workspace_dir, 'pixel_groups.tif')
srs = sampledata.SRS_WILLAMETTE
group_values = [1, 3, 5, 7]
matrix = numpy.array([[1, 3, 5, 9], [3, 7, 1, 5], [2, 4, 5, 7]])
# Create raster to use for testing input
raster_path = pygeoprocessing.testing.create_raster_on_disk(
[matrix],
srs.origin,
srs.projection,
-1,
srs.pixel_size(100),
datatype=gdal.GDT_Int32,
filename=raster_path)
results = wave_energy._count_pixels_groups(raster_path, group_values)
expected_results = [2, 2, 3, 2]
for res, exp_res in zip(results, expected_results):
pygeoprocessing.testing.assert_close(res, exp_res, 1e-6)
def test_calculate_percentiles_from_raster(self):
"""WaveEnergy: testing '_calculate_percentiles_from_raster' function."""
from natcap.invest import wave_energy
raster_path = os.path.join(self.workspace_dir, 'percentile.tif')
srs = sampledata.SRS_WILLAMETTE
matrix = numpy.arange(1, 101)
matrix = matrix.reshape(10, 10)
raster_path = pygeoprocessing.testing.create_raster_on_disk(
[matrix],
srs.origin,
srs.projection,
-1,
srs.pixel_size(100),
datatype=gdal.GDT_Int32,
filename=raster_path)
percentiles = [1, 25, 50, 75]
results = wave_energy._calculate_percentiles_from_raster(
raster_path, percentiles)
expected_results = [1, 25, 50, 75]
for res, exp_res in zip(results, expected_results):
self.assertEqual(res, exp_res)
def test_calculate_min_distances(self):
"""WaveEnergy: testing '_calculate_min_distances' function."""
from natcap.invest import wave_energy
srs = sampledata.SRS_WILLAMETTE
pos_x = srs.origin[0]
pos_y = srs.origin[1]
set_one = numpy.array([[pos_x, pos_y], [pos_x, pos_y - 100],
[pos_x, pos_y - 200]])
set_two = numpy.array([[pos_x + 100,
pos_y], [pos_x + 100, pos_y - 100],
[pos_x + 100, pos_y - 200]])
result_dist, result_id = wave_energy._calculate_min_distances(
set_one, set_two)
expected_result_dist = [100, 100, 100]
expected_result_id = [0, 1, 2]
for res, exp_res in zip(result_dist, expected_result_dist):
self.assertEqual(res, exp_res)
for res, exp_res in zip(result_id, expected_result_id):
self.assertEqual(res, exp_res)
def test_clip_vector_by_vector_polygons(self):
"""WaveEnergy: testing clipping polygons from polygons."""
from natcap.invest import wave_energy
aoi_path = os.path.join(REGRESSION_DATA, 'aoi_proj_to_extract.shp')
extract_path = os.path.join(
SAMPLE_DATA, 'WaveData', 'Global_extract.shp')
result_path = os.path.join(self.workspace_dir, 'aoi_proj_clipped.shp')
target_projection = pygeoprocessing.get_vector_info(
extract_path)['projection']
wave_energy._clip_vector_by_vector(
aoi_path, extract_path, result_path, target_projection,
self.workspace_dir)
expected_path = os.path.join(REGRESSION_DATA, 'aoi_proj_clipped.shp')
WaveEnergyRegressionTests._assert_point_vectors_equal(
result_path, expected_path)
def test_clip_vector_by_vector_points(self):
"""WaveEnergy: testing clipping points from polygons."""
from natcap.invest import wave_energy
srs = sampledata.SRS_WILLAMETTE
pos_x = srs.origin[0]
pos_y = srs.origin[1]
fields_pt = {'id': 'int', 'myattr': 'string'}
attrs_one = [{
'id': 1,
'myattr': 'hello'
}, {
'id': 2,
'myattr': 'bye'
}, {
'id': 3,
'myattr': 'highbye'
}]
fields_poly = {'id': 'int'}
attrs_poly = [{'id': 1}]
# Create geometry for the points, which will get clipped
geom_one = [
Point(pos_x + 20, pos_y - 20),
Point(pos_x + 40, pos_y - 20),
Point(pos_x + 100, pos_y - 20)
]
# Create geometry for the polygons, which will be used to clip
geom_two = [
Polygon([(pos_x, pos_y), (pos_x + 60, pos_y),
(pos_x + 60, pos_y - 60), (pos_x, pos_y - 60),
(pos_x, pos_y)])
]
shape_to_clip_path = os.path.join(self.workspace_dir,
'shape_to_clip.shp')
# Create the point shapefile
shape_to_clip_path = pygeoprocessing.testing.create_vector_on_disk(
geom_one,
srs.projection,
fields_pt,
attrs_one,
vector_format='ESRI Shapefile',
filename=shape_to_clip_path)
binding_shape_path = os.path.join(self.workspace_dir,
'binding_shape.shp')
# Create the polygon shapefile
binding_shape_path = pygeoprocessing.testing.create_vector_on_disk(
geom_two,
srs.projection,
fields_poly,
attrs_poly,
vector_format='ESRI Shapefile',
filename=binding_shape_path)
output_path = os.path.join(self.workspace_dir, 'vector.shp')
# Call the function to test
wave_energy._clip_vector_by_vector(
shape_to_clip_path, binding_shape_path, output_path,
srs.projection, self.workspace_dir)
# Create the expected point shapefile
fields_pt = {'id': 'int', 'myattr': 'string'}
attrs_one = [{'id': 1, 'myattr': 'hello'}, {'id': 2, 'myattr': 'bye'}]
geom_three = [
Point(pos_x + 20, pos_y - 20),
Point(pos_x + 40, pos_y - 20)
]
# Need to save the expected shapefile in a sub folder since it must
# have the same layer name / filename as what it will be compared
# against.
if not os.path.isdir(os.path.join(self.workspace_dir, 'exp_vector')):
os.mkdir(os.path.join(self.workspace_dir, 'exp_vector'))
expected_path = os.path.join(self.workspace_dir, 'exp_vector',
'vector.shp')
expected_shape = pygeoprocessing.testing.create_vector_on_disk(
geom_three,
srs.projection,
fields_pt,
attrs_one,
vector_format='ESRI Shapefile',
filename=expected_path)
WaveEnergyRegressionTests._assert_point_vectors_equal(
output_path, expected_shape)
def test_clip_vector_by_vector_no_intersection(self):
"""WaveEnergy: testing '_clip_vector_by_vector' w/ no intersection."""
from natcap.invest import wave_energy
srs = sampledata.SRS_WILLAMETTE
pos_x = srs.origin[0]
pos_y = srs.origin[1]
fields_pt = {'id': 'int', 'myattr': 'string'}
attrs_one = [{'id': 1, 'myattr': 'hello'}]
fields_poly = {'id': 'int'}
attrs_poly = [{'id': 1}]
# Create geometry for the points, which will get clipped
geom_one = [Point(pos_x + 220, pos_y - 220)]
# Create geometry for the polygons, which will be used to clip
geom_two = [
Polygon([(pos_x, pos_y), (pos_x + 60, pos_y),
(pos_x + 60, pos_y - 60), (pos_x, pos_y - 60),
(pos_x, pos_y)])
]
shape_to_clip_path = os.path.join(self.workspace_dir,
'shape_to_clip.shp')
# Create the point shapefile
shape_to_clip_path = pygeoprocessing.testing.create_vector_on_disk(
geom_one,
srs.projection,
fields_pt,
attrs_one,
vector_format='ESRI Shapefile',
filename=shape_to_clip_path)
binding_shape_path = os.path.join(self.workspace_dir,
'binding_shape.shp')
# Create the polygon shapefile
binding_shape_path = pygeoprocessing.testing.create_vector_on_disk(
geom_two,
srs.projection,
fields_poly,
attrs_poly,
vector_format='ESRI Shapefile',
filename=binding_shape_path)
output_path = os.path.join(self.workspace_dir, 'vector.shp')
# Call the function to test
self.assertRaises(wave_energy.IntersectionError,
wave_energy._clip_vector_by_vector,
shape_to_clip_path,
binding_shape_path,
output_path,
srs.projection,
self.workspace_dir)
def test_binary_wave_data_to_dict(self):
"""WaveEnergy: testing '_binary_wave_data_to_dict' function."""
from natcap.invest import wave_energy
wave_file_path = os.path.join(REGRESSION_DATA, 'example_ww3_binary.bin')
result = wave_energy._binary_wave_data_to_dict(wave_file_path)
exp_res = {
'periods': numpy.array([.375, 1, 1.5, 2.0], dtype=numpy.float32),
'heights': numpy.array([.375, 1], dtype=numpy.float32),
'bin_matrix': {
(102, 370):
numpy.array(
[[0, 0, 0, 0], [0, 9, 3, 30]], dtype=numpy.float32),
(102, 371):
numpy.array(
[[0, 0, 0, 0], [0, 0, 3, 27]], dtype=numpy.float32)
}
}
for key in ['periods', 'heights']:
numpy.testing.assert_array_equal(result[key], exp_res[key])
for key in [(102, 370), (102, 371)]:
numpy.testing.assert_array_equal(result['bin_matrix'][key],
exp_res['bin_matrix'][key])
class WaveEnergyRegressionTests(unittest.TestCase):
"""Regression tests for the Wave Energy module."""
def setUp(self):
"""Overriding setUp function to create temp workspace directory."""
# this lets us delete the workspace after its done no matter the
# the rest result
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
"""Overriding tearDown function to remove temporary directory."""
shutil.rmtree(self.workspace_dir)
@staticmethod
def generate_base_args(workspace_dir):
"""Generate an args list that is consistent across regression tests."""
args = {
'workspace_dir':
workspace_dir,
'wave_base_data_path':
os.path.join(SAMPLE_DATA, 'WaveData'),
'analysis_area_path':
'West Coast of North America and Hawaii',
'machine_perf_path':
os.path.join(SAMPLE_DATA, 'Machine_Pelamis_Performance.csv'),
'machine_param_path':
os.path.join(SAMPLE_DATA, 'Machine_Pelamis_Parameter.csv'),
'dem_path':
os.path.join(SAMPLE_DATA, 'resampled_global_dem.tif'),
'n_workers': -1
}
return args
def test_valuation(self):
"""WaveEnergy: testing valuation component."""
from natcap.invest import wave_energy
args = WaveEnergyRegressionTests.generate_base_args(self.workspace_dir)
args['aoi_path'] = os.path.join(SAMPLE_DATA, 'AOI_WCVI.shp')
args['valuation_container'] = True
args['land_gridPts_path'] = os.path.join(
SAMPLE_DATA, 'LandGridPts_WCVI.csv')
args['machine_econ_path'] = os.path.join(
SAMPLE_DATA, 'Machine_Pelamis_Economic.csv')
args['number_of_machines'] = 28
# Testing if intermediate/output were overwritten
_make_empty_files(args['workspace_dir'])
wave_energy.execute(args)
raster_results = [
'wp_rc.tif', 'wp_kw.tif', 'capwe_rc.tif', 'capwe_mwh.tif',
'npv_rc.tif', 'npv_usd.tif'
]
for raster_path in raster_results:
pygeoprocessing.testing.assert_rasters_equal(
os.path.join(args['workspace_dir'], 'output', raster_path),
os.path.join(REGRESSION_DATA, 'valuation', raster_path), 1e-6)
vector_results = ['GridPts_prj.shp', 'LandPts_prj.shp']
for vector_path in vector_results:
WaveEnergyRegressionTests._assert_point_vectors_equal(
os.path.join(args['workspace_dir'], 'output', vector_path),
os.path.join(REGRESSION_DATA, 'valuation', vector_path))
table_results = ['capwe_rc.csv', 'wp_rc.csv', 'npv_rc.csv']
for table_path in table_results:
pygeoprocessing.testing.assert_csv_equal(
os.path.join(args['workspace_dir'], 'output', table_path),
os.path.join(REGRESSION_DATA, 'valuation', table_path))
def test_aoi_no_val(self):
"""WaveEnergy: testing Biophysical component w AOI but w/o valuation."""
from natcap.invest import wave_energy
args = WaveEnergyRegressionTests.generate_base_args(self.workspace_dir)
args['aoi_path'] = os.path.join(SAMPLE_DATA, 'AOI_WCVI.shp')
wave_energy.execute(args)
raster_results = [
'wp_rc.tif', 'wp_kw.tif', 'capwe_rc.tif', 'capwe_mwh.tif'
]
for raster_path in raster_results:
pygeoprocessing.testing.assert_rasters_equal(
os.path.join(args['workspace_dir'], 'output', raster_path),
os.path.join(REGRESSION_DATA, 'aoi', raster_path), 1e-6)
table_results = ['capwe_rc.csv', 'wp_rc.csv']
for table_path in table_results:
pygeoprocessing.testing.assert_csv_equal(
os.path.join(args['workspace_dir'], 'output', table_path),
os.path.join(REGRESSION_DATA, 'aoi', table_path), 1e-6)
def test_no_aoi_or_val(self):
"""WaveEnergy: testing Biophysical component w/o AOI or valuation."""
from natcap.invest import wave_energy
args = WaveEnergyRegressionTests.generate_base_args(self.workspace_dir)
wave_energy.execute(args)
raster_results = [
'wp_rc.tif', 'wp_kw.tif', 'capwe_rc.tif', 'capwe_mwh.tif'
]
for raster_path in raster_results:
pygeoprocessing.testing.assert_rasters_equal(
os.path.join(args['workspace_dir'], 'output', raster_path),
os.path.join(REGRESSION_DATA, 'noaoi', raster_path), 1e-6)
table_results = ['capwe_rc.csv', 'wp_rc.csv']
for table_path in table_results:
pygeoprocessing.testing.assert_csv_equal(
os.path.join(args['workspace_dir'], 'output', table_path),
os.path.join(REGRESSION_DATA, 'noaoi', table_path), 1e-6)
def test_valuation_suffix(self):
"""WaveEnergy: testing suffix through Valuation."""
from natcap.invest import wave_energy
args = WaveEnergyRegressionTests.generate_base_args(self.workspace_dir)
args['aoi_path'] = os.path.join(SAMPLE_DATA, 'AOI_WCVI.shp')
args['valuation_container'] = True
args['land_gridPts_path'] = os.path.join(
SAMPLE_DATA, 'LandGridPts_WCVI.csv')
args['machine_econ_path'] = os.path.join(
SAMPLE_DATA, 'Machine_Pelamis_Economic.csv')
args['number_of_machines'] = 28
args['suffix'] = 'val'
wave_energy.execute(args)
raster_results = [
'wp_rc_val.tif', 'wp_kw_val.tif', 'capwe_rc_val.tif',
'capwe_mwh_val.tif', 'npv_rc_val.tif', 'npv_usd_val.tif'
]
for raster_path in raster_results:
self.assertTrue(
os.path.exists(
os.path.join(args['workspace_dir'], 'output',
raster_path)))
vector_results = ['GridPts_prj_val.shp', 'LandPts_prj_val.shp']
for vector_path in vector_results:
self.assertTrue(
os.path.exists(
os.path.join(args['workspace_dir'], 'output',
vector_path)))
table_results = ['capwe_rc_val.csv', 'wp_rc_val.csv', 'npv_rc_val.csv']
for table_path in table_results:
self.assertTrue(
os.path.exists(
os.path.join(args['workspace_dir'], 'output', table_path)))
def test_missing_required_keys(self):
"""WaveEnergy: testing missing required keys from args."""
from natcap.invest import wave_energy
args = {}
with self.assertRaises(KeyError) as cm:
wave_energy.execute(args)
expected_message = (
"Keys are missing from args: ['workspace_dir', " +
"'wave_base_data_path', 'analysis_area_path', " +
"'machine_perf_path', 'machine_param_path', 'dem_path']")
actual_message = str(cm.exception)
self.assertTrue(expected_message in actual_message, actual_message)
def test_incorrect_analysis_area_path_value(self):
"""WaveEnergy: testing incorrect analysis_area_path value."""
from natcap.invest import wave_energy
args = WaveEnergyRegressionTests.generate_base_args(self.workspace_dir)
args['analysis_area_path'] = 'Incorrect Analysis Area'
with self.assertRaises(ValueError) as cm:
wave_energy.execute(args)
expected_message = (
"'analysis_area_path'], 'Parameter must be a known analysis area.")
actual_message = str(cm.exception)
self.assertTrue(expected_message in actual_message, actual_message)
def test_validate_keys_missing_values(self):
"""WaveEnergy: testing validate when keys are missing values."""
from natcap.invest import wave_energy
args = WaveEnergyRegressionTests.generate_base_args(self.workspace_dir)
args['wave_base_data_path'] = None
args['dem_path'] = None
validation_error_list = wave_energy.validate(args)
expected_errors = [
(['wave_base_data_path'],
'Parameter not found or is not a folder.'),
(['dem_path'],
'Parameter must be a filepath to a GDAL-compatible raster file.')]
for expected_error in expected_errors:
self.assertTrue(expected_error in validation_error_list)
def test_validate_bad_aoi_format(self):
"""WaveEnergy: testing bad AOI vector format with validate."""
from natcap.invest import wave_energy
args = WaveEnergyRegressionTests.generate_base_args(self.workspace_dir)
args['aoi_path'] = os.path.join(SAMPLE_DATA, 'bad_AOI_WCVI.shp')
validation_error_list = wave_energy.validate(args)
expected_errors = [
(['aoi_path'], 'Vector must contain only polygons.'),
(['aoi_path'], 'Vector must be projected in meters.'),
(['aoi_path'], 'Vector must use the WGS_1984 datum.'),]
for expected_error in expected_errors:
self.assertTrue(expected_error in validation_error_list)
@staticmethod
def _assert_point_vectors_equal(a_path, b_path):
"""Assert that two point geometries in the vectors are equal.
Parameters:
a_path (str): a path to an OGR vector.
b_path (str): a path to an OGR vector.
Returns:
None.
Raises:
AssertionError when the two point geometries are not equal up to
desired precision (default is 6).
"""
a_shape = ogr.Open(a_path)
a_layer = a_shape.GetLayer(0)
a_feat = a_layer.GetNextFeature()
b_shape = ogr.Open(b_path)
b_layer = b_shape.GetLayer(0)
b_feat = b_layer.GetNextFeature()
while a_feat is not None:
# Get coordinates from point geometry and store them in a list
a_geom = a_feat.GetGeometryRef()
a_geom_list = re.findall(r'\d+\.\d+', a_geom.ExportToWkt())
a_geom_list = [float(x) for x in a_geom_list]
b_geom = b_feat.GetGeometryRef()
b_geom_list = re.findall(r'\d+\.\d+', b_geom.ExportToWkt())
b_geom_list = [float(x) for x in b_geom_list]
try:
numpy.testing.assert_array_almost_equal(
a_geom_list, b_geom_list)
except AssertionError:
a_feature_fid = a_feat.GetFID()
b_feature_fid = b_feat.GetFID()
raise AssertionError('Geometries are not equal in feature %s, '
'regression feature %s in layer 0' %
(a_feature_fid, b_feature_fid))
a_feat = None
b_feat = None
a_feat = a_layer.GetNextFeature()
b_feat = b_layer.GetNextFeature()
a_shape = None
b_shape = None
| [
"natcap.invest.wave_energy._pixel_size_based_on_coordinate_transform",
"shapely.geometry.Point",
"numpy.array",
"shapely.geometry.Polygon",
"natcap.invest.wave_energy._count_pixels_groups",
"numpy.arange",
"osgeo.osr.CoordinateTransformation",
"os.path.exists",
"numpy.testing.assert_array_almost_equ... | [((504, 542), 'os.path.join', 'os.path.join', (['REGRESSION_DATA', '"""input"""'], {}), "(REGRESSION_DATA, 'input')\n", (516, 542), False, 'import os\n'), ((413, 438), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (428, 438), False, 'import os\n'), ((1400, 1435), 'os.path.join', 'os.path.join', (['workspace_dir', 'folder'], {}), '(workspace_dir, folder)\n', (1412, 1435), False, 'import os\n'), ((2012, 2030), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (2028, 2030), False, 'import tempfile\n'), ((2142, 2175), 'shutil.rmtree', 'shutil.rmtree', (['self.workspace_dir'], {}), '(self.workspace_dir)\n', (2155, 2175), False, 'import shutil\n'), ((2466, 2488), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (2486, 2488), False, 'from osgeo import osr, ogr\n'), ((2623, 2645), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (2643, 2645), False, 'from osgeo import osr, ogr\n'), ((3141, 3182), 'numpy.array', 'numpy.array', (['[[1, 1, 1, 1], [1, 1, 1, 1]]'], {}), '([[1, 1, 1, 1], [1, 1, 1, 1]])\n', (3152, 3182), False, 'import numpy\n'), ((3205, 3257), 'os.path.join', 'os.path.join', (['self.workspace_dir', '"""input_raster.tif"""'], {}), "(self.workspace_dir, 'input_raster.tif')\n", (3217, 3257), False, 'import os\n'), ((3819, 3841), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (3839, 3841), False, 'from osgeo import osr, ogr\n'), ((4022, 4071), 'osgeo.osr.CoordinateTransformation', 'osr.CoordinateTransformation', (['raster_sr', 'spat_ref'], {}), '(raster_sr, spat_ref)\n', (4050, 4071), False, 'from osgeo import osr, ogr\n'), ((4127, 4217), 'natcap.invest.wave_energy._pixel_size_based_on_coordinate_transform', 'wave_energy._pixel_size_based_on_coordinate_transform', (['raster_path', 'coord_trans', 'point'], {}), '(raster_path,\n coord_trans, point)\n', (4180, 4217), False, 'from natcap.invest import wave_energy\n'), ((4598, 4650), 'os.path.join', 'os.path.join', (['self.workspace_dir', '"""pixel_groups.tif"""'], {}), "(self.workspace_dir, 'pixel_groups.tif')\n", (4610, 4650), False, 'import os\n'), ((4749, 4804), 'numpy.array', 'numpy.array', (['[[1, 3, 5, 9], [3, 7, 1, 5], [2, 4, 5, 7]]'], {}), '([[1, 3, 5, 9], [3, 7, 1, 5], [2, 4, 5, 7]])\n', (4760, 4804), False, 'import numpy\n'), ((5149, 5208), 'natcap.invest.wave_energy._count_pixels_groups', 'wave_energy._count_pixels_groups', (['raster_path', 'group_values'], {}), '(raster_path, group_values)\n', (5181, 5208), False, 'from natcap.invest import wave_energy\n'), ((5596, 5646), 'os.path.join', 'os.path.join', (['self.workspace_dir', '"""percentile.tif"""'], {}), "(self.workspace_dir, 'percentile.tif')\n", (5608, 5646), False, 'import os\n'), ((5708, 5728), 'numpy.arange', 'numpy.arange', (['(1)', '(101)'], {}), '(1, 101)\n', (5720, 5728), False, 'import numpy\n'), ((6103, 6175), 'natcap.invest.wave_energy._calculate_percentiles_from_raster', 'wave_energy._calculate_percentiles_from_raster', (['raster_path', 'percentiles'], {}), '(raster_path, percentiles)\n', (6149, 6175), False, 'from natcap.invest import wave_energy\n'), ((6635, 6708), 'numpy.array', 'numpy.array', (['[[pos_x, pos_y], [pos_x, pos_y - 100], [pos_x, pos_y - 200]]'], {}), '([[pos_x, pos_y], [pos_x, pos_y - 100], [pos_x, pos_y - 200]])\n', (6646, 6708), False, 'import numpy\n'), ((6760, 6855), 'numpy.array', 'numpy.array', (['[[pos_x + 100, pos_y], [pos_x + 100, pos_y - 100], [pos_x + 100, pos_y - 200]]'], {}), '([[pos_x + 100, pos_y], [pos_x + 100, pos_y - 100], [pos_x + 100,\n pos_y - 200]])\n', (6771, 6855), False, 'import numpy\n'), ((6953, 7007), 'natcap.invest.wave_energy._calculate_min_distances', 'wave_energy._calculate_min_distances', (['set_one', 'set_two'], {}), '(set_one, set_two)\n', (6989, 7007), False, 'from natcap.invest import wave_energy\n'), ((7527, 7583), 'os.path.join', 'os.path.join', (['REGRESSION_DATA', '"""aoi_proj_to_extract.shp"""'], {}), "(REGRESSION_DATA, 'aoi_proj_to_extract.shp')\n", (7539, 7583), False, 'import os\n'), ((7608, 7667), 'os.path.join', 'os.path.join', (['SAMPLE_DATA', '"""WaveData"""', '"""Global_extract.shp"""'], {}), "(SAMPLE_DATA, 'WaveData', 'Global_extract.shp')\n", (7620, 7667), False, 'import os\n'), ((7707, 7763), 'os.path.join', 'os.path.join', (['self.workspace_dir', '"""aoi_proj_clipped.shp"""'], {}), "(self.workspace_dir, 'aoi_proj_clipped.shp')\n", (7719, 7763), False, 'import os\n'), ((7876, 7990), 'natcap.invest.wave_energy._clip_vector_by_vector', 'wave_energy._clip_vector_by_vector', (['aoi_path', 'extract_path', 'result_path', 'target_projection', 'self.workspace_dir'], {}), '(aoi_path, extract_path, result_path,\n target_projection, self.workspace_dir)\n', (7910, 7990), False, 'from natcap.invest import wave_energy\n'), ((8041, 8094), 'os.path.join', 'os.path.join', (['REGRESSION_DATA', '"""aoi_proj_clipped.shp"""'], {}), "(REGRESSION_DATA, 'aoi_proj_clipped.shp')\n", (8053, 8094), False, 'import os\n'), ((9358, 9411), 'os.path.join', 'os.path.join', (['self.workspace_dir', '"""shape_to_clip.shp"""'], {}), "(self.workspace_dir, 'shape_to_clip.shp')\n", (9370, 9411), False, 'import os\n'), ((9789, 9842), 'os.path.join', 'os.path.join', (['self.workspace_dir', '"""binding_shape.shp"""'], {}), "(self.workspace_dir, 'binding_shape.shp')\n", (9801, 9842), False, 'import os\n'), ((10218, 10264), 'os.path.join', 'os.path.join', (['self.workspace_dir', '"""vector.shp"""'], {}), "(self.workspace_dir, 'vector.shp')\n", (10230, 10264), False, 'import os\n'), ((10311, 10438), 'natcap.invest.wave_energy._clip_vector_by_vector', 'wave_energy._clip_vector_by_vector', (['shape_to_clip_path', 'binding_shape_path', 'output_path', 'srs.projection', 'self.workspace_dir'], {}), '(shape_to_clip_path, binding_shape_path,\n output_path, srs.projection, self.workspace_dir)\n', (10345, 10438), False, 'from natcap.invest import wave_energy\n'), ((11116, 11176), 'os.path.join', 'os.path.join', (['self.workspace_dir', '"""exp_vector"""', '"""vector.shp"""'], {}), "(self.workspace_dir, 'exp_vector', 'vector.shp')\n", (11128, 11176), False, 'import os\n'), ((12479, 12532), 'os.path.join', 'os.path.join', (['self.workspace_dir', '"""shape_to_clip.shp"""'], {}), "(self.workspace_dir, 'shape_to_clip.shp')\n", (12491, 12532), False, 'import os\n'), ((12910, 12963), 'os.path.join', 'os.path.join', (['self.workspace_dir', '"""binding_shape.shp"""'], {}), "(self.workspace_dir, 'binding_shape.shp')\n", (12922, 12963), False, 'import os\n'), ((13339, 13385), 'os.path.join', 'os.path.join', (['self.workspace_dir', '"""vector.shp"""'], {}), "(self.workspace_dir, 'vector.shp')\n", (13351, 13385), False, 'import os\n'), ((13964, 14019), 'os.path.join', 'os.path.join', (['REGRESSION_DATA', '"""example_ww3_binary.bin"""'], {}), "(REGRESSION_DATA, 'example_ww3_binary.bin')\n", (13976, 14019), False, 'import os\n'), ((14040, 14093), 'natcap.invest.wave_energy._binary_wave_data_to_dict', 'wave_energy._binary_wave_data_to_dict', (['wave_file_path'], {}), '(wave_file_path)\n', (14077, 14093), False, 'from natcap.invest import wave_energy\n'), ((15244, 15262), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (15260, 15262), False, 'import tempfile\n'), ((15374, 15407), 'shutil.rmtree', 'shutil.rmtree', (['self.workspace_dir'], {}), '(self.workspace_dir)\n', (15387, 15407), False, 'import shutil\n'), ((16426, 16467), 'os.path.join', 'os.path.join', (['SAMPLE_DATA', '"""AOI_WCVI.shp"""'], {}), "(SAMPLE_DATA, 'AOI_WCVI.shp')\n", (16438, 16467), False, 'import os\n'), ((16549, 16598), 'os.path.join', 'os.path.join', (['SAMPLE_DATA', '"""LandGridPts_WCVI.csv"""'], {}), "(SAMPLE_DATA, 'LandGridPts_WCVI.csv')\n", (16561, 16598), False, 'import os\n'), ((16650, 16707), 'os.path.join', 'os.path.join', (['SAMPLE_DATA', '"""Machine_Pelamis_Economic.csv"""'], {}), "(SAMPLE_DATA, 'Machine_Pelamis_Economic.csv')\n", (16662, 16707), False, 'import os\n'), ((16885, 16910), 'natcap.invest.wave_energy.execute', 'wave_energy.execute', (['args'], {}), '(args)\n', (16904, 16910), False, 'from natcap.invest import wave_energy\n'), ((18252, 18293), 'os.path.join', 'os.path.join', (['SAMPLE_DATA', '"""AOI_WCVI.shp"""'], {}), "(SAMPLE_DATA, 'AOI_WCVI.shp')\n", (18264, 18293), False, 'import os\n'), ((18305, 18330), 'natcap.invest.wave_energy.execute', 'wave_energy.execute', (['args'], {}), '(args)\n', (18324, 18330), False, 'from natcap.invest import wave_energy\n'), ((19261, 19286), 'natcap.invest.wave_energy.execute', 'wave_energy.execute', (['args'], {}), '(args)\n', (19280, 19286), False, 'from natcap.invest import wave_energy\n'), ((20223, 20264), 'os.path.join', 'os.path.join', (['SAMPLE_DATA', '"""AOI_WCVI.shp"""'], {}), "(SAMPLE_DATA, 'AOI_WCVI.shp')\n", (20235, 20264), False, 'import os\n'), ((20346, 20395), 'os.path.join', 'os.path.join', (['SAMPLE_DATA', '"""LandGridPts_WCVI.csv"""'], {}), "(SAMPLE_DATA, 'LandGridPts_WCVI.csv')\n", (20358, 20395), False, 'import os\n'), ((20447, 20504), 'os.path.join', 'os.path.join', (['SAMPLE_DATA', '"""Machine_Pelamis_Economic.csv"""'], {}), "(SAMPLE_DATA, 'Machine_Pelamis_Economic.csv')\n", (20459, 20504), False, 'import os\n'), ((20603, 20628), 'natcap.invest.wave_energy.execute', 'wave_energy.execute', (['args'], {}), '(args)\n', (20622, 20628), False, 'from natcap.invest import wave_energy\n'), ((23234, 23260), 'natcap.invest.wave_energy.validate', 'wave_energy.validate', (['args'], {}), '(args)\n', (23254, 23260), False, 'from natcap.invest import wave_energy\n'), ((23891, 23936), 'os.path.join', 'os.path.join', (['SAMPLE_DATA', '"""bad_AOI_WCVI.shp"""'], {}), "(SAMPLE_DATA, 'bad_AOI_WCVI.shp')\n", (23903, 23936), False, 'import os\n'), ((23972, 23998), 'natcap.invest.wave_energy.validate', 'wave_energy.validate', (['args'], {}), '(args)\n', (23992, 23998), False, 'from natcap.invest import wave_energy\n'), ((24838, 24854), 'osgeo.ogr.Open', 'ogr.Open', (['a_path'], {}), '(a_path)\n', (24846, 24854), False, 'from osgeo import osr, ogr\n'), ((24958, 24974), 'osgeo.ogr.Open', 'ogr.Open', (['b_path'], {}), '(b_path)\n', (24966, 24974), False, 'from osgeo import osr, ogr\n'), ((1452, 1479), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (1466, 1479), False, 'import os\n'), ((1494, 1518), 'os.makedirs', 'os.makedirs', (['folder_path'], {}), '(folder_path)\n', (1505, 1518), False, 'import os\n'), ((8924, 8953), 'shapely.geometry.Point', 'Point', (['(pos_x + 20)', '(pos_y - 20)'], {}), '(pos_x + 20, pos_y - 20)\n', (8929, 8953), False, 'from shapely.geometry import Point\n'), ((8968, 8997), 'shapely.geometry.Point', 'Point', (['(pos_x + 40)', '(pos_y - 20)'], {}), '(pos_x + 40, pos_y - 20)\n', (8973, 8997), False, 'from shapely.geometry import Point\n'), ((9012, 9042), 'shapely.geometry.Point', 'Point', (['(pos_x + 100)', '(pos_y - 20)'], {}), '(pos_x + 100, pos_y - 20)\n', (9017, 9042), False, 'from shapely.geometry import Point\n'), ((9161, 9275), 'shapely.geometry.Polygon', 'Polygon', (['[(pos_x, pos_y), (pos_x + 60, pos_y), (pos_x + 60, pos_y - 60), (pos_x, \n pos_y - 60), (pos_x, pos_y)]'], {}), '([(pos_x, pos_y), (pos_x + 60, pos_y), (pos_x + 60, pos_y - 60), (\n pos_x, pos_y - 60), (pos_x, pos_y)])\n', (9168, 9275), False, 'from shapely.geometry import Polygon\n'), ((10683, 10712), 'shapely.geometry.Point', 'Point', (['(pos_x + 20)', '(pos_y - 20)'], {}), '(pos_x + 20, pos_y - 20)\n', (10688, 10712), False, 'from shapely.geometry import Point\n'), ((10727, 10756), 'shapely.geometry.Point', 'Point', (['(pos_x + 40)', '(pos_y - 20)'], {}), '(pos_x + 40, pos_y - 20)\n', (10732, 10756), False, 'from shapely.geometry import Point\n'), ((12142, 12173), 'shapely.geometry.Point', 'Point', (['(pos_x + 220)', '(pos_y - 220)'], {}), '(pos_x + 220, pos_y - 220)\n', (12147, 12173), False, 'from shapely.geometry import Point\n'), ((12282, 12396), 'shapely.geometry.Polygon', 'Polygon', (['[(pos_x, pos_y), (pos_x + 60, pos_y), (pos_x + 60, pos_y - 60), (pos_x, \n pos_y - 60), (pos_x, pos_y)]'], {}), '([(pos_x, pos_y), (pos_x + 60, pos_y), (pos_x + 60, pos_y - 60), (\n pos_x, pos_y - 60), (pos_x, pos_y)])\n', (12289, 12396), False, 'from shapely.geometry import Polygon\n'), ((14141, 14195), 'numpy.array', 'numpy.array', (['[0.375, 1, 1.5, 2.0]'], {'dtype': 'numpy.float32'}), '([0.375, 1, 1.5, 2.0], dtype=numpy.float32)\n', (14152, 14195), False, 'import numpy\n'), ((14220, 14264), 'numpy.array', 'numpy.array', (['[0.375, 1]'], {'dtype': 'numpy.float32'}), '([0.375, 1], dtype=numpy.float32)\n', (14231, 14264), False, 'import numpy\n'), ((14644, 14703), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['result[key]', 'exp_res[key]'], {}), '(result[key], exp_res[key])\n', (14676, 14703), False, 'import numpy\n'), ((14765, 14857), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (["result['bin_matrix'][key]", "exp_res['bin_matrix'][key]"], {}), "(result['bin_matrix'][key], exp_res[\n 'bin_matrix'][key])\n", (14797, 14857), False, 'import numpy\n'), ((15679, 15716), 'os.path.join', 'os.path.join', (['SAMPLE_DATA', '"""WaveData"""'], {}), "(SAMPLE_DATA, 'WaveData')\n", (15691, 15716), False, 'import os\n'), ((15855, 15915), 'os.path.join', 'os.path.join', (['SAMPLE_DATA', '"""Machine_Pelamis_Performance.csv"""'], {}), "(SAMPLE_DATA, 'Machine_Pelamis_Performance.csv')\n", (15867, 15915), False, 'import os\n'), ((15965, 16023), 'os.path.join', 'os.path.join', (['SAMPLE_DATA', '"""Machine_Pelamis_Parameter.csv"""'], {}), "(SAMPLE_DATA, 'Machine_Pelamis_Parameter.csv')\n", (15977, 16023), False, 'import os\n'), ((16063, 16116), 'os.path.join', 'os.path.join', (['SAMPLE_DATA', '"""resampled_global_dem.tif"""'], {}), "(SAMPLE_DATA, 'resampled_global_dem.tif')\n", (16075, 16116), False, 'import os\n'), ((21846, 21871), 'natcap.invest.wave_energy.execute', 'wave_energy.execute', (['args'], {}), '(args)\n', (21865, 21871), False, 'from natcap.invest import wave_energy\n'), ((22608, 22633), 'natcap.invest.wave_energy.execute', 'wave_energy.execute', (['args'], {}), '(args)\n', (22627, 22633), False, 'from natcap.invest import wave_energy\n'), ((10970, 11016), 'os.path.join', 'os.path.join', (['self.workspace_dir', '"""exp_vector"""'], {}), "(self.workspace_dir, 'exp_vector')\n", (10982, 11016), False, 'import os\n'), ((11041, 11087), 'os.path.join', 'os.path.join', (['self.workspace_dir', '"""exp_vector"""'], {}), "(self.workspace_dir, 'exp_vector')\n", (11053, 11087), False, 'import os\n'), ((14340, 14403), 'numpy.array', 'numpy.array', (['[[0, 0, 0, 0], [0, 9, 3, 30]]'], {'dtype': 'numpy.float32'}), '([[0, 0, 0, 0], [0, 9, 3, 30]], dtype=numpy.float32)\n', (14351, 14403), False, 'import numpy\n'), ((14473, 14536), 'numpy.array', 'numpy.array', (['[[0, 0, 0, 0], [0, 0, 3, 27]]'], {'dtype': 'numpy.float32'}), '([[0, 0, 0, 0], [0, 0, 3, 27]], dtype=numpy.float32)\n', (14484, 14536), False, 'import numpy\n'), ((17187, 17245), 'os.path.join', 'os.path.join', (["args['workspace_dir']", '"""output"""', 'raster_path'], {}), "(args['workspace_dir'], 'output', raster_path)\n", (17199, 17245), False, 'import os\n'), ((17264, 17319), 'os.path.join', 'os.path.join', (['REGRESSION_DATA', '"""valuation"""', 'raster_path'], {}), "(REGRESSION_DATA, 'valuation', raster_path)\n", (17276, 17319), False, 'import os\n'), ((17525, 17583), 'os.path.join', 'os.path.join', (["args['workspace_dir']", '"""output"""', 'vector_path'], {}), "(args['workspace_dir'], 'output', vector_path)\n", (17537, 17583), False, 'import os\n'), ((17602, 17657), 'os.path.join', 'os.path.join', (['REGRESSION_DATA', '"""valuation"""', 'vector_path'], {}), "(REGRESSION_DATA, 'valuation', vector_path)\n", (17614, 17657), False, 'import os\n'), ((17846, 17903), 'os.path.join', 'os.path.join', (["args['workspace_dir']", '"""output"""', 'table_path'], {}), "(args['workspace_dir'], 'output', table_path)\n", (17858, 17903), False, 'import os\n'), ((17922, 17976), 'os.path.join', 'os.path.join', (['REGRESSION_DATA', '"""valuation"""', 'table_path'], {}), "(REGRESSION_DATA, 'valuation', table_path)\n", (17934, 17976), False, 'import os\n'), ((18565, 18623), 'os.path.join', 'os.path.join', (["args['workspace_dir']", '"""output"""', 'raster_path'], {}), "(args['workspace_dir'], 'output', raster_path)\n", (18577, 18623), False, 'import os\n'), ((18642, 18691), 'os.path.join', 'os.path.join', (['REGRESSION_DATA', '"""aoi"""', 'raster_path'], {}), "(REGRESSION_DATA, 'aoi', raster_path)\n", (18654, 18691), False, 'import os\n'), ((18872, 18929), 'os.path.join', 'os.path.join', (["args['workspace_dir']", '"""output"""', 'table_path'], {}), "(args['workspace_dir'], 'output', table_path)\n", (18884, 18929), False, 'import os\n'), ((18948, 18996), 'os.path.join', 'os.path.join', (['REGRESSION_DATA', '"""aoi"""', 'table_path'], {}), "(REGRESSION_DATA, 'aoi', table_path)\n", (18960, 18996), False, 'import os\n'), ((19521, 19579), 'os.path.join', 'os.path.join', (["args['workspace_dir']", '"""output"""', 'raster_path'], {}), "(args['workspace_dir'], 'output', raster_path)\n", (19533, 19579), False, 'import os\n'), ((19598, 19649), 'os.path.join', 'os.path.join', (['REGRESSION_DATA', '"""noaoi"""', 'raster_path'], {}), "(REGRESSION_DATA, 'noaoi', raster_path)\n", (19610, 19649), False, 'import os\n'), ((19830, 19887), 'os.path.join', 'os.path.join', (["args['workspace_dir']", '"""output"""', 'table_path'], {}), "(args['workspace_dir'], 'output', table_path)\n", (19842, 19887), False, 'import os\n'), ((19906, 19956), 'os.path.join', 'os.path.join', (['REGRESSION_DATA', '"""noaoi"""', 'table_path'], {}), "(REGRESSION_DATA, 'noaoi', table_path)\n", (19918, 19956), False, 'import os\n'), ((25565, 25630), 'numpy.testing.assert_array_almost_equal', 'numpy.testing.assert_array_almost_equal', (['a_geom_list', 'b_geom_list'], {}), '(a_geom_list, b_geom_list)\n', (25604, 25630), False, 'import numpy\n'), ((1584, 1620), 'os.path.join', 'os.path.join', (['folder_path', 'file_name'], {}), '(folder_path, file_name)\n', (1596, 1620), False, 'import os\n'), ((20937, 20995), 'os.path.join', 'os.path.join', (["args['workspace_dir']", '"""output"""', 'raster_path'], {}), "(args['workspace_dir'], 'output', raster_path)\n", (20949, 20995), False, 'import os\n'), ((21237, 21295), 'os.path.join', 'os.path.join', (["args['workspace_dir']", '"""output"""', 'vector_path'], {}), "(args['workspace_dir'], 'output', vector_path)\n", (21249, 21295), False, 'import os\n'), ((21543, 21600), 'os.path.join', 'os.path.join', (["args['workspace_dir']", '"""output"""', 'table_path'], {}), "(args['workspace_dir'], 'output', table_path)\n", (21555, 21600), False, 'import os\n')] |
"""
Tools for Projected Entangled Pair States
Author: <NAME> <<EMAIL>>
Date: July 2019
.. Note, peps tensors are stored in order:
(5) top
|
(1) left ___|___ (4) right
|\
| \
(2) bottom (3) physical
"""
from cyclopeps.tools.gen_ten import rand,einsum,eye,ones,svd_ten,zeros
#from cyclopeps.tools.params import *
from cyclopeps.tools.utils import *
from cyclopeps.tools.mps_tools import MPS,identity_mps
from numpy import float_
import numpy as np
import copy
FLIP = {'+':'-','-':'+'}
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# PEPS ENVIRONMENT FUNCTIONS
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def copy_tensor_list(ten_list):
"""
Create a copy of a list of tensors
"""
ten_list_cp = [None]*len(ten_list)
for i in range(len(ten_list)):
ten_list_cp[i] = ten_list[i].copy()
return ten_list_cp
def init_left_bmpo_sl(bra, ket=None, chi=4, truncate=True,allow_normalize=False):
"""
Create the initial boundary mpo for a peps
Args:
bra : list
A list containing the tensors for a single peps column
Kwargs:
chi : int
The maximum bond dimension for the boundary mpo
truncate : bool
Whether or not to do an svd and truncate the resulting
boundary mpo
ket : PEPS Object
A second peps column, to use as the ket
If None, then the bra col will be used
Returns:
bound_mpo : list
An updated boundary mpo
"""
mpiprint(3,'Initial Layer of left boundary mpo (sl)')
# Find size of peps column and dims of tensors
Ny = len(bra)
# Copy the ket column if needed
bra = copy_tensor_list(bra)
if ket is None:
ket = copy_tensor_list(bra)
# Make list to hold resulting mpo
bound_mpo = []
for row in range(Ny):
#tmpprint('\t\t\t\tAdding row: {}'.format(row))
# Remove l and L empty indices
ket[row] = ket[row].remove_empty_ind(0)
bra[row] = bra[row].remove_empty_ind(0)
# Add Bra-ket contraction
res = einsum('dpru,DpRU->dDRurU',ket[row],bra[row])
ressgn = res.get_signs()
resleg = res.legs
# Merge inds to make it an MPO
res.merge_inds([0,1])
res.merge_inds([2,3,4])
# Append to boundary_mpo
bound_mpo.append(res)
# Add correct identity
Dr = ket[row].shape[ket[row].legs[2][0]]
Du = ket[row].shape[ket[row].legs[3][0]]
Zr = ket[row].qn_sectors[ket[row].legs[2][0]]
Zu = ket[row].qn_sectors[ket[row].legs[3][0]]
I1 = eye(Dr,
Zr,
is_symmetric=ket[row].is_symmetric,
backend=ket[row].backend)
I2 = eye(Du,
Zu,
is_symmetric=ket[row].is_symmetric,
backend=ket[row].backend)
I3 = eye(Du,
Zu,
is_symmetric=ket[row].is_symmetric,
backend=ket[row].backend)
# Make sure signs are correct
if ressgn is not None:
if ''.join(ressgn[i] for i in resleg[4]) == ''.join(I1.get_signs()[i] for i in I1.legs[0]):
I1.flip_signs()
if ''.join(ressgn[i] for i in resleg[5]) == ''.join(I2.get_signs()[i] for i in I2.legs[0]):
I2.flip_signs()
if ''.join(ressgn[i] for i in resleg[3]) == ''.join(I3.get_signs()[i] for i in I3.legs[0]):
I3.flip_signs()
# Contract to form Identity
Itmp = einsum('du,DU->dDuU',I3,I2)
I = einsum('dDuU,lr->dlDruU',Itmp,I1)
# Merge inds to make it an MPO
I.merge_inds([0,1,2])
I.merge_inds([2,3])
# Append to the boundary mpo
bound_mpo.append(I)
# Put result into an MPS -------------------------------------------
bound_mps = MPS(bound_mpo)
# Reduce bond dimension
if truncate:
mpiprint(5,'Truncating Boundary MPS')
if DEBUG:
mpiprint(6,'Computing initial bmpo norm')
norm0 = bound_mps.norm()
#tmpprint('\t\t\t\tDoing MPS apply svd')
bound_mps = bound_mps.apply_svd(chi)
if DEBUG:
mpiprint(6,'Computing resulting bmpo norm')
norm1 = bound_mps.norm()
mpiprint(0,'Init BMPO Canonicalization Norm Difference for chi={}: {} ({},{})'.format(chi,abs(norm0-norm1)/abs(norm0),norm0,norm1))
if allow_normalize:
bound_mps[0] /= bound_mps[0].abs().max()**(1./2.)
return bound_mps
def left_bmpo_sl_add_ket(ket,bound_mpo,Ny,chi=4,truncate=True,allow_normalize=False):
"""
Add the ket layer to the boundary mpo
"""
mpiprint(4,'Adding Ket')
# Make list to hold resulting mpo
bound_mpo_new = []
for row in range(Ny):
#tmpprint(' Adding row: {}'.format(row))
mpiprint(5,'Adding Site {} to Ket'.format(row))
# Calculate ket contraction first (so we can use it to determine symmetry signs of identity)
res = einsum('mln,ldpru->mdrpnu',bound_mpo[2*row+1],ket[row])
ressgn = res.get_signs()
resleg = res.legs
# Reshape it into an MPO
if row == Ny-1:
res = res.remove_empty_ind(len(res.legs)-1)
res.merge_inds([0,1])
res.merge_inds([1,2])
else:
res.merge_inds([0,1])
res.merge_inds([1,2])
res.merge_inds([2,3])
# Create Correct Identity
Dd = ket[row].shape[ket[row].legs[1][0]]
Zd = ket[row].qn_sectors[ket[row].legs[1][0]]
I1 = eye(Dd,
Zd,
is_symmetric=ket[row].is_symmetric,
backend=ket[row].backend)
# Adjust symmetry signs
if ressgn is not None:
if ''.join(ressgn[i] for i in resleg[0]) == ''.join(ressgn[i] for i in resleg[1]):
I1.update_signs(''.join(FLIP[bound_mpo[2*row].get_signs()[i]] for i in bound_mpo[2*row].legs[2]) +
''.join(bound_mpo[2*row].get_signs()[i] for i in bound_mpo[2*row].legs[2]))
else:
I1.update_signs(''.join(bound_mpo[2*row].get_signs()[i] for i in bound_mpo[2*row].legs[2]) +
''.join(FLIP[bound_mpo[2*row].get_signs()[i]] for i in bound_mpo[2*row].legs[2]))
# Contract with previous bmpo
I = einsum('mLn,du->mdLnu',bound_mpo[2*row],I1)
# Reshape it into an MPO
I.merge_inds([0,1])
I.merge_inds([2,3])
# Append identity to boundary MPO
bound_mpo_new.append(I)
# Append ket to boundary MPO
bound_mpo_new.append(res)
# Put result into an MPS -------------------------------------------
bound_mps = MPS(bound_mpo_new)
# Reduce bond dimension
if truncate:
mpiprint(5,'Truncating Boundary MPS')
if DEBUG:
mpiprint(6,'Computing initial bmpo norm')
norm0 = bound_mps.norm()
#tmpprint(' Doing MPS apply svd')
bound_mps = bound_mps.apply_svd(chi)
if DEBUG:
mpiprint(6,'Computing resulting bmpo norm')
norm1 = bound_mps.norm()
mpiprint(0,'Add ket BMPO Canonicalization Norm Difference for chi={}: {} ({},{})'.format(chi,abs(norm0-norm1)/abs(norm0),norm0,norm1))
if allow_normalize:
bound_mps[0] /= bound_mps[0].abs().max()**(1./2.)
return bound_mps
def left_bmpo_sl_add_bra(bra,bound_mpo,Ny,chi=4,truncate=True,allow_normalize=False):
"""
Add the bra layer to the boundary mpo
"""
mpiprint(4,'Adding Bra')
# Make list to hold resulting mpo
bound_mpo_new = []
for row in range(Ny):
#tmpprint(' Adding row: {}'.format(row))
# Add bra contraction
res = einsum('mLn,LDPRU->mDRnUP',bound_mpo[2*row],bra[row])
# Save some useful info
ressgn = res.get_signs()
resleg = res.legs
# Reshape it into an MPO
if row == 0:
res = res.remove_empty_ind(0)
res.merge_inds([2,3,4])
else:
res.merge_inds([0,1])
res.merge_inds([2,3,4])
# Append to new boundary MPO
bound_mpo_new.append(res)
# Add correct identity
mpiprint(6,'Adding Identity to boundary mps')
# Unmerge bmps tensor
bound_tens = bound_mpo[2*row+1]
thermal = (len(bound_tens.legs[1]) == 3)
bound_tens.unmerge_ind(1)
if thermal: bound_tens.merge_inds([2,3])
# Create identity tensor
Du = bra[row].shape[bra[row].legs[4][0]]
Zu = bra[row].qn_sectors[bra[row].legs[4][0]]
I1 = eye(Du,
Zu,
is_symmetric=bra[row].is_symmetric,
backend=bra[row].backend)
# Adjust symmetry signs
if ressgn is not None:
if ''.join(ressgn[i] for i in resleg[3]) == ''.join(ressgn[i] for i in resleg[4]):
I1.update_signs(''.join(bound_tens.get_signs()[i] for i in bound_tens.legs[0]) +
''.join(FLIP[bound_tens.get_signs()[i]] for i in bound_tens.legs[0]))
else:
I1.update_signs(''.join(FLIP[bound_tens.get_signs()[i]] for i in bound_tens.legs[0]) +
''.join(bound_tens.get_signs()[i] for i in bound_tens.legs[0]))
# Contract with previous bmpo
I = einsum('mrPn,DU->mDPrnU',bound_tens,I1)
# Reshape into an MPO
if row == Ny-1:
I = I.remove_empty_ind(len(I.legs)-1)
I.merge_inds([0,1,2])
else:
I.merge_inds([0,1,2])
I.merge_inds([2,3])
# Append to new boundary MPO
bound_mpo_new.append(I)
# Put result into an MPS -------------------------------------------
bound_mps = MPS(bound_mpo_new)
# Reduce bond dimension
if truncate:
mpiprint(5,'Truncating Boundary MPS')
if DEBUG:
mpiprint(6,'Computing initial bmpo norm')
norm0 = bound_mps.norm()
#tmpprint(' Doing MPS apply svd')
bound_mps = bound_mps.apply_svd(chi)
if DEBUG:
mpiprint(6,'Computing resulting bmpo norm')
norm1 = bound_mps.norm()
mpiprint(0,'Add bra BMPO Canonicalization Norm Difference for chi={}: {} ({},{})'.format(chi,abs(norm0-norm1)/abs(norm0),norm0,norm1))
if allow_normalize:
bound_mps[0] /= bound_mps[0].abs().max()**(1./2.)
return bound_mps
def left_bmpo_sl(bra, bound_mpo, chi=4,truncate=True,ket=None,allow_normalize=False):
"""
Add two layers to the single layer boundary mpo environment
Args:
bra : list
A list containing the tensors for a single peps column
bound_mpo : list
A list containing the tensors for the left neighboring
boundary mpo
Kwargs:
chi : int
The maximum bond dimension for the boundary mpo
truncate : bool
Whether or not to do an svd and truncate the resulting
boundary mpo
ket : PEPS Object
A second peps column, to use as the ket
If None, then the bra col will be used
Returns:
bound_mpo : list
An updated boundary mpo
"""
mpiprint(3,'Updating boundary mpo (sl)')
# Find size of peps column and dims of tensors
Ny = len(bra)
# Copy the ket column if needed
bra = copy_tensor_list(bra)
if ket is None:
ket = copy_tensor_list(bra)
# First Layer (ket) #####################################
#tmpprint(' Adding ket')
bound_mpo = left_bmpo_sl_add_ket(ket,bound_mpo,Ny,chi=chi,truncate=truncate,allow_normalize=allow_normalize)
# Second Layer (bra) ####################################
#tmpprint(' Adding bra')
bound_mpo = left_bmpo_sl_add_bra(bra,bound_mpo,Ny,chi=chi,truncate=truncate,allow_normalize=allow_normalize)
# Return result
return bound_mpo
#@profile
def left_update_sl(peps_col, bound_mpo, chi=4,truncate=True,ket=None,allow_normalize=False):
"""
Update the boundary mpo, from the left, moving right, using single layer
Args:
peps_col : list
A list containing the tensors for a single peps column
bound_mpo : list
The neighboring boundary mpo, which will be updated
Kwargs:
chi : int
The maximum bond dimension for the boundary mpo
truncate : bool
Whether or not to do an svd and truncate the resulting
boundary mpo
ket : PEPS Object
A second peps column, to use as the ket
Returns:
bound_mpo : list
An updated boundary mpo
"""
# Check if we are at left edge
if bound_mpo is None:
#tmpprint(' Initial BMPO')
bound_mpo = init_left_bmpo_sl(peps_col,chi=chi,truncate=truncate,ket=ket,allow_normalize=allow_normalize)
# Otherwise update is generic
else:
# Start from bottom of the column
#tmpprint(' Adding BMPO')
bound_mpo = left_bmpo_sl(peps_col,bound_mpo,chi=chi,truncate=truncate,ket=ket,allow_normalize=allow_normalize)
return bound_mpo
def left_update(peps_col,bound_mpo,chi=4,ket=None):
mpiprint(0,'Only single layer environment implemented')
raise NotImplemented
def update_left_bound_mpo(peps_col, bound_mpo, chi=4, singleLayer=True,truncate=True,ket_col=None,allow_normalize=False):
"""
Update the boundary mpo, from the left, moving right
Args:
peps_col : list
A list containing the tensors for a single peps column
bound_mpo : list
The neighboring boundary mpo, which will be updated
Kwargs:
chi : int
The maximum bond dimension for the boundary mpo
singleLayer : bool
Indicates whether to use a single layer environment
(currently it is the only option...)
truncate : bool
Whether or not to do an svd and truncate the resulting
boundary mpo
ket_col : PEPS Object
A second peps column, to use as the ket
Returns:
bound_mpo : list
An updated boundary mpo
"""
if singleLayer:
return left_update_sl(peps_col,bound_mpo,chi=chi,truncate=truncate,ket=ket_col,allow_normalize=allow_normalize)
else:
return left_update(peps_col,bound_mpo,chi=chi,truncate=truncate,ket=ket_col)
def calc_left_bound_mpo(peps,col,chi=4,singleLayer=True,truncate=True,return_all=False,ket=None,allow_normalize=False,in_mem=True):
"""
Calculate the left boundary MPO
Args:
peps : List
A list of lists containing the peps tensors
col : int
The last column for which you need the environment
Kwargs:
chi : int
The maximum bond dimension of the boundary MPO
single_layer : bool
Indicates whether to use a single layer environment
(currently it is the only option...)
truncate : bool
Whether or not to do an svd and truncate the resulting
boundary mpo
return_all : bool
Whether to return a list of boundary mpos upto col or just
return the boundary mpo for col.
ket : PEPS Object
A second peps, to use as the ket, in the operator contraction
in_mem: bool
if True, then the peps tensors will all be loaded into memory
and all calculations will be done with them in memory. If False,
then the peps tensors will all be written to disk, then loaded as
needed. All bmpo tensors will be written to disk. Default
is True
returns:
bound_mpo : list
An mpo stored as a list, corresponding to the
resulting boundary mpo.
"""
mpiprint(2,'Computing Left boundary MPO')
# Ensure peps are in or out of memory
if in_mem:
peps.from_disk()
else:
peps.to_disk()
# Determine the dimensions of the peps
Nx = len(peps)
Ny = len(peps[0])
# Set up initial list to store boundary mpos
bound_mpo = [None]*(col-1)
# Loop through the columns, creating a boundary mpo for each
for colind in range(col-1):
#tmpprint(' Doing Column {}'.format(colind))
mpiprint(4,'Updating left boundary mpo')
# Load appropriate peps/ket column
if not in_mem:
peps.col_from_disk(colind)
if ket is not None:
ket.col_from_disk(colind)
# Specify ket column (if not None)
if ket is not None:
ket_col = ket[colind][:]
else: ket_col = None
# Update the bmpo
if colind == 0:
# Update for the initial column (use None as previous boundary mpo)
bound_mpo[colind] = update_left_bound_mpo(peps[colind][:],
None,
chi=chi,
singleLayer=singleLayer,
truncate=truncate,
ket_col=ket_col,
allow_normalize=allow_normalize)
else:
# Update for remaining columns
bound_mpo[colind] = update_left_bound_mpo(peps[colind][:],
bound_mpo[colind-1],
chi=chi,
singleLayer=singleLayer,
truncate=truncate,
ket_col=ket_col,
allow_normalize=allow_normalize)
# Write previous bound_mpo to disk (if not in_mem)
if not in_mem:
bound_mpo[colind-1].to_disk()
# Write the peps/ket column to disk
if not in_mem:
peps.col_to_disk(colind)
if ket is not None:
ket.col_to_disk(colind)
# Write final bmpo to disk
if not in_mem:
bound_mpo[-1].to_disk()
# Return result
if return_all:
return bound_mpo
else:
return bound_mpo[-1]
#@profile
def calc_right_bound_mpo(peps,col,chi=4,singleLayer=True,truncate=True,return_all=False,ket=None,allow_normalize=False,in_mem=True):
"""
Calculate the right boundary MPO
Args:
peps : List
A list of lists containing the peps tensors
col : int or list of ints
The column(s) for which you need the environment
Kwargs:
chi : int
The maximum bond dimension of the boundary MPO
single_layer : bool
Indicates whether to use a single layer environment
(currently it is the only option...)
truncate : bool
Whether or not to do an svd and truncate the resulting
boundary mpo
return_all : bool
Whether to return a list of boundary mpos upto col or just
return the boundary mpo for col.
ket : PEPS Object
A second peps, to use as the ket, in the operator contraction
in_mem: bool
if True, then the peps tensors will all be loaded into memory
and all calculations will be done with them in memory. If False,
then the peps tensors will all be written to disk, then loaded as
needed. All bmpo tensors will be written to disk. Default
is True
returns:
bound_mpo : list
An mpo stored as a list, corresponding to the
resulting boundary mpo.
"""
mpiprint(2,'Computing Left boundary MPO')
# Ensure peps are in or out of memory
if in_mem:
peps.from_disk()
else:
peps.to_disk()
# Determine the dimensions of the peps
Nx = len(peps)
Ny = len(peps[0])
# Flip the peps
peps.flip()
#peps = flip_peps(peps)
if ket is not None:
ket.flip()
#ket = flip_peps(ket)
col = Nx-col
# Set up initial list to store boundary mpos
bound_mpo = [None]*(col-1)
# Loop through the columns, creating a boundary mpo for each
for colind in range(col-1):
#tmpprint(' Doing Column {}'.format(colind))
mpiprint(4,'Updating boundary mpo')
# Load appropriate peps column
if not in_mem:
peps.col_from_disk(colind)
if ket is not None:
ket.col_from_disk(colind)
# Specify ket column (if not None)
if ket is not None:
ket_col = ket[colind][:]
else: ket_col = None
# Update the boundary MPO
if colind == 0:
# Update for the initial column (use None as previous boundary mpo)
bound_mpo[colind] = update_left_bound_mpo(peps[colind][:],
None,
chi=chi,
singleLayer=singleLayer,
truncate=truncate,
ket_col=ket_col,
allow_normalize=allow_normalize)
else:
# Update for remaining columns
bound_mpo[colind] = update_left_bound_mpo(peps[colind][:],
bound_mpo[colind-1],
chi=chi,
singleLayer=singleLayer,
truncate=truncate,
ket_col=ket_col,
allow_normalize=allow_normalize)
# Write previous bound_mpo to disk (if not in_mem)
if not in_mem:
bound_mpo[colind-1].to_disk()
# Write the peps/ket column to disk
if not in_mem:
peps.col_to_disk(colind)
if ket is not None:
ket.col_to_disk(colind)
# Write final bmpo to disk
if not in_mem:
bound_mpo[-1].to_disk()
# Unflip the peps
peps.flip()
#peps = flip_peps(peps)
if ket is not None:
ket.flip()
#ket = flip_peps(ket)
# Return results
if return_all:
return bound_mpo[::-1]
else:
return bound_mpo[-1]
def rotate_peps(peps,clockwise=True):
"""
Rotate a peps
Args:
peps : a list of a list containing peps tensors
The initial peps tensor
Kwargs:
clockwise : bool
Rotates clockwise if True, counter-clockwise
otherwise
Returns:
peps : a list of a list containing peps tensors
The horizontally flipped version of the peps
tensor. This is flipped such that ...
"""
# Get system size
Nx = len(peps)
Ny = len(peps[0])
# Create empty peps
rpeps = []
for y in range(Ny):
tmp = []
for x in range(Nx):
tmp += [None]
rpeps += [tmp]
# Copy peps, but rotated
for x in range(Nx):
for y in range(Ny):
# Rotate clockwise
if clockwise:
# Copy Correct Tensor
rpeps[y][Nx-1-x] = peps[x][y].copy()
# Load tensor if not initially in memory
init_in_mem = rpeps[y][Nx-1-x].in_mem
if not init_in_mem:
rpeps[y][Nx-1-x].from_disk()
# Do the rotation
rpeps[y][Nx-1-x] = rpeps[y][Nx-1-x].transpose([1,3,2,4,0])
# Write tensor back to disk if initially not in memory
if not init_in_mem:
rpeps[y][Nx-1-x].to_disk()
# Rotate counter clockwise
else:
# Copy Correct Tensor
rpeps[Ny-1-y][x] = peps[x][y].copy()
# Load tensor if not initially in memory
init_in_mem = rpeps[Ny-1-y][x].in_mem
if not init_in_mem:
rpeps[Ny-1-y][x].from_disk()
# Reorder Indices to do rotation
rpeps[Ny-1-y][x] = rpeps[Ny-1-y][x].transpose([4,0,2,1,3])
# Write tensor back to disk if initially not in memory
if not init_in_mem:
rpeps[Ny-1-y][x].to_disk()
# Return Rotated peps
return rpeps
def rotate_lambda(Lambda,clockwise=True):
"""
Rotate the Lambda tensors for the canonical PEPS representation
"""
if Lambda is not None:
# Get system size (of rotated lambda)
Ny = len(Lambda[0])
Nx = len(Lambda[1][0])
# Lambda tensors along vertical bonds
vert = []
for x in range(Nx):
tmp = []
for y in range(Ny-1):
if clockwise:
tmp += [Lambda[1][Ny-2-y][x].copy()]
else:
tmp += [Lambda[1][y][Nx-1-x].copy()]
vert += [tmp]
# Lambda tensors along horizontal bonds
horz = []
for x in range(Nx-1):
tmp = []
for y in range(Ny):
if clockwise:
tmp += [Lambda[0][Ny-1-y][x].copy()]
else:
tmp += [Lambda[0][y][Nx-2-x].copy()]
horz += [tmp]
# Combine vertical and horizontal lambdas
rLambda = [vert,horz]
return rLambda
else:
return None
def flip_peps(peps,mk_copy=True):
"""
Flip a peps horizontally
Args:
peps : a list of a list containing peps tensors
The initial peps tensor
Kwargs:
mk_copy : bool
Whether to make this a copy of the original peps
Returns:
peps : a list of a list containing peps tensors
The horizontally flipped version of the peps
tensor. This is a copy of the original peps
"""
# Get system size
Nx = len(peps)
Ny = len(peps[0])
# Create empty peps
fpeps = []
for x in range(Nx):
tmp = []
for y in range(Ny):
tmp += [None]
fpeps += [tmp]
# Copy peps, but flipped
for x in range(Nx):
for y in range(Ny):
# Copy Correct Tensor
fpeps[x][y] = peps[(Nx-1)-x][y].copy()
# Load tensors for transpose (if out of memory)
init_in_mem = fpeps[x][y].in_mem
if not init_in_mem:
fpeps[x][y].from_disk()
# Transpose to reorder indices
fpeps[x][y] = fpeps[x][y].transpose([3,1,2,0,4])
# Write tensors back to disk (if originally out of memory)
if not init_in_mem:
fpeps[x][y].to_disk()
# Return Flipped peps
return fpeps
def flip_lambda(Lambda):
"""
Flip the lambda tensors (part of the canonical peps) horizontally
Args:
Lambda :
Returns:
Lambda :
The horizontally flipped version of the lambda
tensor. This is flipped such that ...
"""
if Lambda is not None:
# Get system size
Nx = len(Lambda[0])
Ny = len(Lambda[1][0])
# Lambda tensors along vertical bonds
vert = []
for x in range(Nx):
tmp = []
for y in range(Ny-1):
tmp += [Lambda[0][(Nx-1)-x][y].copy()]
vert += [tmp]
# Lambda tensors along horizontal bonds
horz = []
for x in range(Nx-1):
tmp = []
for y in range(Ny):
tmp += [Lambda[1][(Nx-2)-x][y].copy()]
horz += [tmp]
# Add to tensors
fLambda = [vert,horz]
# Return Flipped peps
return fLambda
else:
return None
def peps_col_to_mps(peps_col):
"""
Convert a PEPS column into an MPS.
The structure of the resulting MPS tensors is:
left/phys/right
|
|
bottom ---+--- top
Args:
peps_col : 1D Array
A list containing the tensors for each site in a peps column
Returns:
mps : 1D Array
The resulting 1D array containing the PEPS column's tensor
"""
# Ensure all peps column elements are in memory
for i in range(len(peps_col)):
if not peps_col[i].in_mem:
raise ValueError('PEPS column tensor {} not in memory for calc_peps_col_norm'.format(i))
# Determine number of rows
Ny = len(peps_col)
# Create a list to hold the copy
peps_col_cp = [None]*Ny
for i in range(len(peps_col)):
peps_col_cp[i] = peps_col[i].copy()
peps_col = peps_col_cp
for row in range(Ny):
# Transpose to put left, physical, and right bonds in middle
peps_col[row] = peps_col[row].transpose([1,0,2,3,4])
# lump left, physical, and right tensors
peps_col[row].merge_inds([1,2,3])
# Convert PEPS column into an MPS
mps = MPS(peps_col)
# Return resulting mps
return mps
def calc_peps_col_norm(peps_col):
"""
Convert a PEPS column into an MPS, then take the norm of that MPS
.. Note : Used to keep PEPS norm near 1.
Args:
peps_col : 1D Array
A list containing the tensors for each site in a peps column
Returns:
norm : float
The norm of the peps column (reshaped as an MPS)
"""
# Ensure all peps column elements are in memory
for i in range(len(peps_col)):
if not peps_col[i].in_mem:
raise ValueError('PEPS column tensor {} not in memory for calc_peps_col_norm'.format(i))
# Convert peps column to an mps by lumping indices
mps = peps_col_to_mps(peps_col)
# Compute the norm of that mps
norm = 0.5*mps.norm()
# Return the resulting norm
return norm
def thermal_peps_tensor(Nx,Ny,x,y,d,D,Zn=None,dZn=None,backend='numpy',dtype=float_,in_mem=True):
"""
Create a thermal (beta=0) tensor for a PEPS
Args:
Nx : int
The PEPS lattice size in the x-direction
Ny : int
The PEPS lattice size in the y-direction
x : int
The x-coordinate of the tensor
y : int
The y-coordinate of the tensor
Kwargs:
Zn : int
Create a PEPS which preserves this Zn symmetry,
i.e. if Zn=2, then Z2 symmetry is preserved.
dZn : int
The number of symmetry sectors for the physical bond dimension
if None, then Zn will be used
backend : str
This specifies the backend to be used for the calculation.
Options are currently 'numpy' or 'ctf'. If using symmetries,
this will be adapted to using symtensors with numpy or ctf as
the backend.
dtype : dtype
The data type of the tensor
Default : np.float_
in_mem : bool
Whether the peps tensors should be stored in memory or on disk
Returns:
ten : ndarray
A random tensor with the correct dimensions
for the given site
"""
# Determine the correct bond dimensions
Dl = D
Dr = D
Du = D
Dd = D
# Set to one if at an edge
if x == 0: Dl = 1
if x == Nx-1: Dr = 1
if y == 0: Dd = 1
if y == Ny-1: Du = 1
# Set default value of sym
sym = None
# Deal with Zn symmetry (if needed)
if Zn is not None:
# And correct symmetries
Znl= Zn
Znr= Zn
Znu= Zn
Znd= Zn
# Set to one if at an edge
if x == 0: Znl = 1
if x == Nx-1: Znr = 1
if y == 0: Znd = 1
if y == Ny-1: Znu = 1
# Resize D->Dnew so Dnew*Zn = D
Dl = int(Dl/Znl)
Dr = int(Dr/Znr)
Dd = int(Dd/Znd)
Du = int(Du/Znu)
d = int(d/dZn)
# Create sym argument
sym = ['+++---',
[range(Znl),range(Znd),range(dZn),range(dZn),range(Znr),range(Znu)],
0,
Zn]
# Create an empty tensor
dims = (Dl,Dd,d,d,Dr,Du)
ten = zeros(dims,sym,backend=backend,dtype=dtype,legs=[[0],[1],[2,3],[4],[5]])
# Fill tensor entries where needed
if sym is None:
for i in range(d):
for j in range(d):
if i == j:
ten.ten[0,0,i,j,0,0] = 1./ten.backend.sqrt(float(d))
else:
for i in range(d):
for j in range(d):
for k in range(dZn):
for l in range(dZn):
if (i == j) and (k == l):
ten.ten.array[0,0,k,l,0,0,0,i,j,0,0] = 1./ten.backend.sqrt(float(d))
# Store on disk (if wanted)
if not in_mem: ten.to_disk()
# Return result
return ten
def rand_peps_tensor(Nx,Ny,x,y,d,D,Zn=None,dZn=None,backend='numpy',dtype=float_,in_mem=True):
"""
Create a random tensor for a PEPS
Args:
Nx : int
The PEPS lattice size in the x-direction
Ny : int
The PEPS lattice size in the y-direction
x : int
The x-coordinate of the tensor
y : int
The y-coordinate of the tensor
Kwargs:
Zn : int
Create a PEPS which preserves this Zn symmetry,
i.e. if Zn=2, then Z2 symmetry is preserved.
dZn : int
The number of symmetry sectors for the physical bond dimension
if None, then Zn will be used
backend : str
This specifies the backend to be used for the calculation.
Options are currently 'numpy' or 'ctf'. If using symmetries,
this will be adapted to using symtensors with numpy or ctf as
the backend.
dtype : dtype
The data type of the tensor
Default : np.float_
in_mem : bool
Whether the PEPS tensor should be stored in memory or on disk.
Default is True (i.e. in memory)
Returns:
ten : ndarray
A random tensor with the correct dimensions
for the given site
"""
# Determine the correct bond dimensions
Dl = D
Dr = D
Du = D
Dd = D
# Set to one if at an edge
if x == 0: Dl = 1
if x == Nx-1: Dr = 1
if y == 0: Dd = 1
if y == Ny-1: Du = 1
# Set default value of sym
sym = None
# Deal with Zn symmetry (if needed)
if Zn is not None:
# And correct symmetries
Znl= Zn
Znr= Zn
Znu= Zn
Znd= Zn
# Set to one if at an edge
if x == 0: Znl = 1
if x == Nx-1: Znr = 1
if y == 0: Znd = 1
if y == Ny-1: Znu = 1
# Resize D->Dnew so Dnew*Zn = D
Dl = int(Dl/Znl)
Dr = int(Dr/Znr)
Dd = int(Dd/Znd)
Du = int(Du/Znu)
d = int(d/dZn)
# Create sym argument
sym = ['+++--',
[range(Znl),range(Znd),range(dZn),range(Znr),range(Znu)],
0,
Zn]
# Create the random tensor
dims = (Dl,Dd,d,Dr,Du)
ten = rand(dims,sym,backend=backend,dtype=dtype)
#ten = 0.95*ones(dims,sym,backend=backend,dtype=dtype) + 0.1*rand(dims,sym,backend=backend,dtype=dtype)
#ten = 0.9995*ones(dims,sym,backend=backend,dtype=dtype) + 0.001*rand(dims,sym,backend=backend,dtype=dtype)
# Push to disk (if wanted)
if not in_mem:
ten.to_disk()
# Return result
return ten
def normalize_peps_col(peps_col):
"""
Try to keep the norm of a PEPS column near 1.
Args:
peps_col : 1D Array
A list containing the tensors for each site in a peps column
Returns:
peps_col : 1D Array
A normalized version of the input peps_col
"""
# Ensure all peps column elements are in memory
for i in range(len(peps_col)):
if not peps_col[i].in_mem:
raise ValueError('PEPS column tensor {} not in memory for normalize peps column'.format(i))
# Figure out column height
Ny = len(peps_col)
# Compute the norm
norm = calc_peps_col_norm(peps_col)
# Normalize each of the tensors
for row in range(Ny):
peps_col[row] *= 1. / (norm ** (0.5 / Ny) )
# Return the normalized peps column
return peps_col
def multiply_peps_elements(peps,const):
"""
Multiply all elements in a peps by a constant
Args:
peps : A PEPS object or a list of lists containing the peps tensors
const : float
The constant with which to multiply each peps tensor
Returns:
peps : a PEPS object, or list of lists, depending on input
"""
if not np.isfinite(const):
raise ValueError('Multiplying PEPS by {} is not valid'.format(const))
Nx = len(peps)
Ny = len(peps[0])
for xind in range(Nx):
for yind in range(Ny):
peps[xind][yind] *= const
return peps
def normalize_peps(peps,max_iter=100,norm_tol=1e-2,exact_norm_tol=3,chi=10,up=5.0,
down=0.0,singleLayer=True,ket=None,pf=False,in_mem=True):
"""
Normalize the full PEPS by doing a binary search on the
interval [down, up] for the factor which, when multiplying
every element of the PEPS tensors, yields a rescaled PEPS
with norm equal to 1.0.
Args:
peps : A PEPS object
The PEPS to be normalized, given as a PEPS object
Kwargs:
max_iter : int
The maximum number of iterations of the normalization
procedure. Default is 20.
exact_norm_tol : float
We require the measured norm to be within the bounds
10^(-norm_tol) < norm < 10^(norm_tol) before we do
exact arithmetic to get the norm very close to 1. Default
is 1.
norm_tol : int
How close the norm must be to 1. to consider the norm
to be sufficiently well converged
chi : int
Boundary MPO maximum bond dimension
up : float
The upper bound for the binary search factor. Default is 1.0,
which assumes that the norm of the initial PEPS is greater
than 10^(-norm_tol) (this is almost always true).
down : float
The lower bound for the binary search factor. Default is 0.0.
The intial guess for the scale factor is the midpoint
between up and down. It's not recommended to adjust the
up and down parameters unless you really understand what
they are doing.
single_layer : bool
Indicates whether to use a single layer environment
(currently it is the only option...)
ket : peps object
If you would like the ket to be 'normalized', such that
when contracted with another peps, the contraction is equal
to one. Only the peps (not ket) will be altered to attempt
the normalization
pf: bool
If True, then we will normalize as though this is a partition
function instead of a contraction between to peps
in_mem: bool
if True, then the peps tensors will all be loaded into memory
and all calculations will be done with them in memory, default
is True
Returns:
norm : float
The approximate norm of the PEPS after the normalization
procedure
peps : list
The normalized version of the PEPS, given as a PEPS object
"""
# Make sure PEPS tensors are in or out of mem
#tmpprint(' Normalizing PEPS')
if in_mem:
peps.from_disk()
else:
peps.to_disk()
# Figure out peps size
Nx = peps.Nx
Ny = peps.Ny
be = peps[0][0].backend
# Power changes if partition function or norm
if pf: pwr = -1./(Nx*Ny)
else: pwr = -1./(2*Nx*Ny)
# Make sure PEPS entries are not really huge or miniscule
maxval = peps.max_entry()
if (maxval > 10**4) or (maxval < 10**-4):
peps = multiply_peps_elements(peps.copy(),2/maxval)
if ket is not None:
maxval = peps.max_entry()
if (maxval > 10**-4) or (maxval < 10**-4):
peps = multiply_peps_elements(peps.copy(),2/maxval)
# Check if state is already easily normalized
try:
z = calc_peps_norm(peps,chi=chi,singleLayer=singleLayer,ket=ket,in_mem=in_mem)
except Exception as e:
z = None
#tmpprint(' Initial Norm = {}'.format(z))
if (z is None) or (not (z < 10.**(-1*norm_tol) or z > 10.**(norm_tol))):
if z is not None:
sfac = z**pwr
peps_try = multiply_peps_elements(peps.copy(),sfac)
z = calc_peps_norm(peps_try,chi=chi,singleLayer=singleLayer,ket=ket,in_mem=in_mem)
if abs(z-1.) < norm_tol:
return z, peps_try
else:
z = None
peps_try = peps.copy()
# Begin search --------
niter = 0
scale = (up+down)/2.
z = None
converged = False
while not converged:
# Update Iteration count
niter += 1
# Calculate norm
peps_try = multiply_peps_elements(peps.copy(),scale)
zprev = z
z = None
try:
z = calc_peps_norm(peps_try,chi=chi,singleLayer=singleLayer,ket=ket,in_mem=in_mem)
z = abs(z)
except Exception as e:
pass
#print('scale: {}, up: {}, down: {}, norm: {}'.format(scale,up,down,z))
# Determine next scale (for infinite or failed norm result)
if (z == None) or (not np.isfinite(z)):
# Replace None with nan (so can be compared using '<')
if z == None: z = np.nan
# Set up -> scale (if previous norm was infinite/None/nan/<1)
if ((zprev == None) or (not np.isfinite(zprev))) or (zprev < 1.):
up = scale if (scale is not None) else up
scale = (up+down)/2.
# Set down -> scale (if previous norm was > 1)
else:
down = scale if (scale is not None) else down
scale = (up+down)/2.
# adjust scale to make norm in target region
else:
# Check if sufficiently well converged
if abs(z-1.0) < norm_tol:
mpiprint(2, 'converged scale = {}, norm = {}'.format(scale,z))
converged = True
# Check if we are still far away from convergence
elif z < 10.0**(-1*norm_tol) or z > 10.0**(norm_tol) or be.isnan(z):
if z > 1.0 or be.isnan(z):
up = scale if (scale is not None) else up
scale = (up+down)/2.0
else:
down = scale if (scale is not None) else down
scale = (up+down)/2.0
# Close to convergence, apply "exact" scale
else:
sfac = z**pwr
scale = sfac*scale if (scale is not None) else sfac
mpiprint(2, 'apply exact scale: {}'.format(scale))
# Print Results of current step
mpiprint(2, 'step={}, (down,up)=({},{}), scale={}, norm={}'.format(
niter,down,up,scale,z))
# Check if we have exceeded the maximum number of iterations
if niter == max_iter:
mpiprint(4, 'binarySearch normalization exceeds max_iter... terminating')
converged=True
# Return normalized PEPS and norm
return z, peps_try
def calc_peps_norm(_peps,chi=4,singleLayer=True,ket=None,allow_normalize=False,in_mem=True):
"""
Calculate the norm of the PEPS
Args:
peps : A PEPS object
The PEPS for which we will compute the norm
Kwargs:
chi : int
The boundary MPO bond dimension
single_layer : bool
Indicates whether to use a single layer environment
(currently it is the only option...)
in_mem: bool
if True, then the peps tensors will all be loaded into memory
and all calculations will be done with them in memory, default
is True
Returns:
norm : float
The (approximate) norm of the PEPS
"""
#tmpprint('Calculating PEPS norm')
# Absorb Lambda tensors if needed
if _peps.ltensors is not None:
peps = _peps.copy()
peps.absorb_lambdas()
else:
#tmpprint(' Copying PEPS')
peps = _peps.copy()
if ket is not None and ket.ltensors is not None:
ket = ket.copy()
ket.absorb_lambdas()
elif ket is not None:
ket = ket.copy()
# Load tensors or send to memory
if in_mem:
peps.from_disk()
else:
peps.to_disk()
# Get PEPS Dims
Nx = len(peps)
Ny = len(peps[0])
# Get the boundary MPO from the left (for the furthest right column)
#tmpprint(' Calculating lbmpo')
left_bound_mpo = calc_left_bound_mpo(peps,Nx,chi=chi,singleLayer=singleLayer,ket=ket,allow_normalize=allow_normalize,in_mem=in_mem)
# Get the boundary MPO from the right (for the furthest right column)
#tmpprint(' Calculating rbmpo')
right_bound_mpo = calc_right_bound_mpo(peps,Nx-2,chi=chi,singleLayer=singleLayer,ket=ket,allow_normalize=allow_normalize,in_mem=in_mem)
# Load needed bmpos
if not in_mem:
left_bound_mpo.from_disk()
right_bound_mpo.from_disk()
# Contract the two MPOs
#tmpprint(' Contracting two bmpos')
norm = left_bound_mpo.contract(right_bound_mpo)
# Return result
return abs(norm)
def make_thermal_peps(Nx,Ny,d,D,Zn=None,dZn=None,backend='numpy',dtype=float_,in_mem=True):
"""
Make a thermal (beta=0) PEPS
Args:
d : int
The local bond dimension
D : int
The auxilliary bond dimension
Nx : int
The PEPS lattice size in the x-direction
Ny : int
The PEPS lattice size in the y-direction
Kwargs:
Zn : int
Create a PEPS which preserves this Zn symmetry,
i.e. if Zn=2, then Z2 symmetry is preserved.
dZn : int
The number of symmetry sectors for the physical bond dimension
If None, then will be the same as Zn
backend : str
This specifies the backend to be used for the calculation.
Options are currently 'numpy' or 'ctf'. If using symmetries,
this will be adapted to using symtensors with numpy or ctf as
the backend.
dtype : dtype
The data type of the tensor
Default : np.float_
in_mem : bool
Whether the peps tensors should be stored in memory or on disk
Returns:
peps : array of arrays
A random peps held as an array of arrays
"""
# Create a list of lists to hold PEPS tensors
tensors = []
for x in range(Nx):
tmp = []
for y in range(Ny):
tmp += [None]
tensors += [tmp]
# Place thermal tensors into the PEPS
for x in range(Nx):
for y in range(Ny):
tensors[x][y] = thermal_peps_tensor(Nx,Ny,x,y,d,D,
Zn=Zn,
dZn=dZn,
backend=backend,
dtype=dtype,
in_mem=in_mem)
return tensors
def make_rand_peps(Nx,Ny,d,D,Zn=None,dZn=None,backend='numpy',dtype=float_,in_mem=True):
"""
Make a random PEPS
Args:
d : int
The local bond dimension
D : int
The auxilliary bond dimension
Nx : int
The PEPS lattice size in the x-direction
Ny : int
The PEPS lattice size in the y-direction
Kwargs:
Zn : int
Create a PEPS which preserves this Zn symmetry,
i.e. if Zn=2, then Z2 symmetry is preserved.
dZn : int
The number of symmetry sectors for the physical bond dimension
If None, then will be the same as Zn
backend : str
This specifies the backend to be used for the calculation.
Options are currently 'numpy' or 'ctf'. If using symmetries,
this will be adapted to using symtensors with numpy or ctf as
the backend.
dtype : dtype
The data type of the tensor
Default : np.float_
in_mem : bool
Whether the peps tensors should be stored in memory or on disk.
Default is True
Returns:
peps : array of arrays
A random peps held as an array of arrays
"""
# Create a list of lists to hold PEPS tensors
tensors = []
for x in range(Nx):
tmp = []
for y in range(Ny):
tmp += [None]
tensors += [tmp]
# Place random tensors into the PEPS
for x in range(Nx):
for y in range(Ny):
tensors[x][y] = rand_peps_tensor(Nx,Ny,x,y,d,D,
Zn=Zn,
dZn=dZn,
backend=backend,
dtype=dtype,
in_mem=True)
# At the end of each column, make the norm smaller
tensors[x][:] = normalize_peps_col(tensors[x][:])
# And write to disk
if not in_mem:
for y in range(Ny):
tensors[x][y].to_disk()
return tensors
def thermal_lambda_tensor(D,Zn=None,backend='numpy',dtype=float_,in_mem=True):
"""
Create a thermal (currently identity) lambda tensor for a canonical PEPS
Args:
D : int
The PEPS Bond Dimension
Kwargs:
Zn : int
Create a PEPS which preserves this Zn symmetry,
i.e. if Zn=2, then Z2 symmetry is preserved.
backend : str
This specifies the backend to be used for the calculation.
Options are currently 'numpy' or 'ctf'. If using symmetries,
this will be adapted to using symtensors with numpy or ctf as
the backend.
dtype : dtype
The data type of the tensor
Default : np.float_
in_mem : bool
If True, then the tensor will be stored in local memory. Otherwise,
it will be written to disk
Returns:
ten : ndarray
A random tensor with the correct dimensions
for the given site
"""
# Determine symmetry
sym = None
if Zn is not None:
sym = ['+-',[range(Zn)]*2,0,Zn]
D = int(D/Zn)
# Create empty tensor
l = zeros((D,D),
sym=sym,
backend=backend,
dtype=dtype)
# Fill Diagonal Elements
if l.sym is None:
l.ten = l.backend.diag(l.backend.ones(D))
else:
for i in range(Zn):
l.ten.array[i,:,:] = l.backend.diag(l.backend.ones(D))
# Write to disk if needed
if not in_mem:
l.to_disk()
# Return result
return l
def rand_lambda_tensor(D,Zn=None,backend='numpy',dtype=float_,in_mem=True):
"""
Create a random lambda tensor for a canonical PEPS
Args:
D : int
The PEPS Bond Dimension
Kwargs:
Zn : int
Create a PEPS which preserves this Zn symmetry,
i.e. if Zn=2, then Z2 symmetry is preserved.
backend : str
This specifies the backend to be used for the calculation.
Options are currently 'numpy' or 'ctf'. If using symmetries,
this will be adapted to using symtensors with numpy or ctf as
the backend.
dtype : dtype
The data type of the tensor
Default : np.float_
in_mem : bool
If True (default), then the tensor will be stored in local
memory. Otherwise it will be written to disk
Returns:
ten : ndarray
A random tensor with the correct dimensions
for the given site
"""
# Determine symmetry
sym = None
if Zn is not None:
sym = ['+-',[range(Zn)]*2,0,Zn]
D = int(D/Zn)
# Create empty tensor
l = zeros((D,D),
sym=sym,
backend=backend,
dtype=dtype)
# Fill Diagonal Elements
if l.sym is None:
l.ten = l.backend.diag(l.backend.random(D))
else:
for i in range(Zn):
l.ten.array[i,:,:] = l.backend.diag(l.backend.random(D))
# Write to disk (if wanted)
if not in_mem:
l.to_disk()
# Return result
return l
def make_thermal_lambdas(Nx,Ny,D,Zn=None,backend='numpy',dtype=float_,in_mem=True):
"""
Make identites as diagonal matrices to serve as the
singular values for the Gamma-Lambda canonical
form of the thermal PEPS
Used primarily for the simple update contraction scheme
"""
# Lambda tensors along vertical bonds
vert = []
for x in range(Nx):
tmp = []
for y in range(Ny-1):
tmp += [thermal_lambda_tensor(D,Zn=Zn,backend=backend,dtype=dtype,in_mem=in_mem)]
vert += [tmp]
# Lambda tensors along horizontal bonds
horz = []
for x in range(Nx-1):
tmp = []
for x in range(Ny):
tmp += [thermal_lambda_tensor(D,Zn=Zn,backend=backend,dtype=dtype,in_mem=in_mem)]
horz += [tmp]
# Add horizontal and vertical lambdas to tensor list
tensors = [vert,horz]
return tensors
def make_rand_lambdas(Nx,Ny,D,Zn=None,backend='numpy',dtype=float_,in_mem=True):
"""
Make random diagonal matrices to serve as the
singular values for the Gamma-Lambda canonical
form of the PEPS
Used primarily for the simple update contraction scheme
"""
# Lambda tensors along vertical bonds
vert = []
for x in range(Nx):
tmp = []
for y in range(Ny-1):
tmp += [rand_lambda_tensor(D,Zn=Zn,backend=backend,dtype=dtype,in_mem=in_mem)]
vert += [tmp]
# Lambda tensors along horizontal bonds
horz = []
for x in range(Nx-1):
tmp = []
for x in range(Ny):
tmp += [rand_lambda_tensor(D,Zn=Zn,backend=backend,dtype=dtype,in_mem=in_mem)]
horz += [tmp]
# Add horizontal and vertical lambdas to tensor list
tensors = [vert,horz]
return tensors
def update_top_env_gen(row,bra,ket,left1,left2,right1,right2,prev_env,chi=10,truncate=True):
"""
Doing the following contraction:
+----+ +----+ +----+ +----+
| p1 |-- ... -| p2 |-----| p3 |- ... ---| p6 |
+----+ +----+ +----+ +----+
| | | |
a b c f
| | | |
+----+ +----+ | +----+
| l2 |-g ... -| k1 |-----H--^--h ... ---| r2 |
+----+ +----+ | +----+
| | \ | |
| | \ | |
j | l | q
| | \ | |
| | \ | |
+----+ | +----+ +----+
| l1 |-- ... -R--^--r----| b1 |-s ... ---| r1 |
+----+ | +----+ +----+
| | | |
| | | |
u k v x
"""
# Determine if it is a thermal state
thermal = len(bra[0][row].legs[2]) == 2
# Figure out number of columns
ncol = len(bra)
# Create the new top environment
if prev_env is None:
# Create the first top environment
top_env = []
# First site is the current left bound_mpo
res = einsum('urj,jga->augr',left1,left2)
# Merge needed inds
res.merge_inds([2,3])
top_env.append(res)
# Loop through and add bras and kets
for col in range(ncol):
# Copy the needed tensors
ketten = ket[col][row].copy()
braten = bra[col][row].copy()
# Add ket -----------------------------------
# Remove top ind
ketten = ketten.remove_empty_ind(4)
# Create correct identity
# TODO - Make sure signs are correct (will give error in symmetric case)
Dl = braten.shape[braten.legs[0][0]]
Zl = braten.qn_sectors[braten.legs[0][0]]
I = eye(Dl,
Zl,
is_symmetric=braten.is_symmetric,
backend=braten.backend)
if len(braten.legs[0]) > 1:
for legind in range(1,len(braten.legs[0])):
Dli = braten.shape[braten.legs[0][legind]]
Zli = braten.qn_sectors[braten.legs[0][legind]]
Ii = eye(Dli,
Zli,
is_symmetric=braten.is_symmetric,
backend=braten.backend)
I = einsum('ij,IJ->iIjJ',I,Ii)
I.merge_inds([0,1])
I.merge_inds([1,2])
# Contract identity with the ket
res = einsum('gklh,Rr->gRkhlr',ketten,I)
# Merge Correct inds
res.merge_inds([0,1])
res.merge_inds([2,3,4])
# Add to top_env
top_env.append(res)
# Add bra ----------------------------------
# Remove top ind
braten = braten.remove_empty_ind(4)
# Create correct identity
# TODO - Make sure signs are correct (will give error in symmetric case)
Dl = ketten.shape[ketten.legs[3][0]]
Zl = ketten.qn_sectors[ketten.legs[3][0]]
I = eye(Dl,
Zl,
is_symmetric=ketten.is_symmetric,
backend=ketten.backend)
if len(ketten.legs[3]) > 1:
for legind in range(1,len(ketten.legs[3])):
Dli = ketten.shape[ketten.legs[3][legind]]
Zli = ketten.qn_sectors[ketten.legs[3][legind]]
Ii = eye(Dli,
Zli,
is_symmetric=ketten.is_symmetric,
backend=ketten.backend)
I = einsum('ij,IJ->iIjJ',I,Ii)
I.merge_inds([0,1])
I.merge_inds([1,2])
# Contract identity with the ket
res = einsum('rvls,Hh->Hlrvhs',braten,I)
# Merge Correct inds
res.merge_inds([0,1,2])
res.merge_inds([2,3])
# Add to top_env
top_env.append(res)
# Last site is the current right bound_mpo
res = einsum('xsq,qhf->hsxf',right1,right2)
# Merge needed inds
res.merge_inds([0,1])
top_env.append(res)
# Put result into an MPS -------------------------------------------
top_env = MPS(top_env)
# Reduce bond dimension
if truncate:
mpiprint(5,'Truncating Boundary MPS')
if DEBUG:
mpiprint(6,'Computing initial bmpo norm')
norm0 = top_env.norm()
top_env = top_env.apply_svd(chi)
if DEBUG:
mpiprint(6,'Computing resulting bmpo norm')
norm1 = top_env.norm()
mpiprint(0,'Init top BMPO Canonicalization Norm Difference for chi={}: {} ({},{})'.format(chi,abs(norm0-norm1)/abs(norm0),norm0,norm1))
else:
# Add the ket layer --------------------------------------------------
"""
Doing the following contraction:
+----+ +----+ +----+ +----+ +----+ +----+
z--| p1 |-y-| p2 |--x--| p3 |-w ... -| p4 |-v--| p5 |-u-| p6 |--t
+----+ +----+ +----+ +----+ +----+ +----+
| | | | | |
a b c d e f
| | | | | |
+----+ +----+ | +----+ | +----+
| l2 |-g-| k1 |-----h--^--- ... -| k2 |----i--^-----| r2 |
+----+ +----+-------+| +----+------+| +----+
| | || | || |
| | || | || |
j k lc n ow q
"""
# Create the next top environment
top_env = []
# First absorb left boundary mpo
res = einsum('jga,zay->zjyg',left2,prev_env[0])
# Merge correct inds
res.merge_inds([2,3])
# Add to top_env
top_env.append(res)
# Loop through and add kets
for col in range(ncol):
# Add ket --------------------------
ketten = ket[col][row].copy()
# Contract with previous top env
res = einsum('gklhb,ybx->ygkxhl',ketten,prev_env[2*col+1])
# Merge correct indices
res.merge_inds([0,1])
res.merge_inds([2,3,4])
# Add to top_env
top_env.append(res)
# Add identity ---------------------
# TODO - Make sure signs are correct (will give error in symmetric case)
D1 = ketten.shape[ketten.legs[3][0]]
Z1 = ketten.qn_sectors[ketten.legs[3][0]]
I1 = eye(D1,
Z1,
is_symmetric=ketten.is_symmetric,
backend=ketten.backend)
if len(ketten.legs[3]) > 1:
for legind in range(1,len(ketten.legs[3])):
Dli = ketten.shape[ketten.legs[3][legind]]
Zli = ketten.qn_sectors[ketten.legs[3][legind]]
Ii = eye(Dli,
Zli,
is_symmetric=ketten.is_symmetric,
backend=ketten.backend)
I1 = einsum('ij,IJ->iIjJ',I1,Ii)
I1.merge_inds([0,1])
I1.merge_inds([1,2])
D2 = ketten.shape[ketten.legs[2][0]]
Z2 = ketten.qn_sectors[ketten.legs[2][0]]
I2 = eye(D2,
Z2,
is_symmetric=ketten.is_symmetric,
backend=ketten.backend)
if len(ketten.legs[2]) > 1:
for legind in range(1,len(ketten.legs[2])):
Dli = ketten.shape[ketten.legs[2][legind]]
Zli = ketten.qn_sectors[ketten.legs[2][legind]]
Ii = eye(Dli,
Zli,
is_symmetric=ketten.is_symmetric,
backend=ketten.backend)
I2 = einsum('ij,IJ->iIjJ',I2,Ii)
I2.merge_inds([0,1])
I2.merge_inds([1,2])
# Contract with previous environment
res = einsum('xcw,Hh->xHcwh',prev_env[2*col+2],I1)
res = einsum('xHcwh,Ll->xHLlcwh',res,I2)
# Merge correct indices
res.merge_inds([0,1,2])
res.merge_inds([1,2])
res.merge_inds([2,3])
# Add to top_env
top_env.append(res)
# Last, absorb right boundary mpo
res = einsum('qif,uft->uiqt',right2,prev_env[2*ncol+1])
# Merge needed inds
res.merge_inds([0,1])
# Add to top_env
top_env.append(res)
# Put result into an MPS ------------------
top_env = MPS(top_env)
# Reduce bond dimension
if truncate:
mpiprint(5,'Truncating Boundary MPS')
if DEBUG:
mpiprint(6,'Computing initial bmpo norm')
norm0 = top_env.norm()
top_env = top_env.apply_svd(chi)
if DEBUG:
mpiprint(6,'Computing resulting bmpo norm')
norm1 = top_env.norm()
mpiprint(0,'Add ket top BMPO Canonicalization Norm Difference for chi={}: {} ({},{})'.format(chi,abs(norm0-norm1)/abs(norm0),norm0,norm1))
# Update prev_env
prev_env = top_env
# Add the bra layer --------------------------------------------------
"""
Doing the following contraction:
+----+ +----+ +----+ +----+ +----+ +----+
z--| p1 |-y-| p2 |--x--| p3 |-w ... -| p4 |-v--| p5 |-u-| p6 |--g
+----+ +----+ +----+ +----+ +----+ +----+
| | || | || |
a | lc d oe f
| | || | || |
+----+ | +----+ | +----+ +----+
| l1 |---r--^-------| b1 |- ... ---^-s-----| b2 |-t-| r1 |
+----+ | +----+ | +----+ +----+
| | | | | |
| | | | | |
u b v n w x
"""
# Create the next top environment
top_env = []
# First absorb left boundary mpo
res = einsum('zay,ura->zuyr',prev_env[0],left1)
# Merge correct inds
res.merge_inds([2,3])
top_env.append(res)
# Loop through and add bras
for col in range(ncol):
# Get the bra tensor
braten = bra[col][row].copy()
# Add identity ---------------------
# TODO - Make sure signs are correct (will give error in symmetric case)
D1 = braten.shape[braten.legs[0][0]]
Z1 = braten.qn_sectors[braten.legs[0][0]]
I1 = eye(D1,
Z1,
is_symmetric=braten.is_symmetric,
backend=braten.backend)
if len(braten.legs[0]) > 1:
for legind in range(1,len(braten.legs[0])):
Dli = braten.shape[braten.legs[0][legind]]
Zli = braten.qn_sectors[braten.legs[0][legind]]
Ii = eye(Dli,
Zli,
is_symmetric=braten.is_symmetric,
backend=braten.backend)
I1 = einsum('ij,IJ->iIjJ',I1,Ii)
I1.merge_inds([0,1])
I1.merge_inds([1,2])
# Contract with previous environment
res = einsum('ybx,Rr->yRbxr',prev_env[2*col+1],I1)
# Merge correct indices
res.merge_inds([0,1])
res.merge_inds([2,3])
# Add to top_env
top_env.append(res)
# Add bra --------------------------
envten = prev_env[2*col+2].copy()
# Unmerge physical index
if thermal:
envten.unmerge_ind(1)
envten.merge_inds([1,2])
else:
envten.unmerge_ind(1)
# Contract with bra
res = einsum('xlcw,rvlsc->xrvws',envten,braten)
# Merge correct inds
res.merge_inds([0,1])
res.merge_inds([2,3])
# Add to top_env
top_env.append(res)
# Last, absorb right boundary mpo
res = einsum('ufz,xtf->utxz',prev_env[2*ncol+1],right1)
# Merge needed inds
res.merge_inds([0,1])
# Add to top_env
top_env.append(res)
# Put result into an MPS ------------------
top_env = MPS(top_env)
# Reduce bond dimension
if truncate:
mpiprint(5,'Truncating Boundary MPS')
if DEBUG:
mpiprint(6,'Computing initial bmpo norm')
norm0 = top_env.norm()
top_env = top_env.apply_svd(chi)
if DEBUG:
mpiprint(6,'Computing resulting bmpo norm')
norm1 = top_env.norm()
mpiprint(0,'Add bra top BMPO Canonicalization Norm Difference for chi={}: {} ({},{})'.format(chi,abs(norm0-norm1)/abs(norm0),norm0,norm1))
return top_env
def calc_top_envs_gen(bra,left_bmpo,right_bmpo,ket=None,chi=10):
"""
"""
# Figure out height of peps column
Ny = len(bra[0])
# Copy bra if needed
copy_ket = False
if ket is None: copy_ket = True
elif hasattr(ket,'__len__'):
if ket[0] is None: copy_ket = True
if copy_ket:
ket = [None]*len(bra)
for i in range(len(bra)):
ketcol = [None]*len(bra[i])
for j in range(len(bra[i])):
ketcol[j] = bra[i][j].copy()
# TODO - Conjugate this ket col?
ket[i] = ketcol
# Compute top environment
top_env = [None]*Ny
for row in reversed(range(Ny)):
# Figure out previous environment MPO
if row == Ny-1: prev_env = None
else: prev_env = top_env[row+1]
# Compute next environment MPO
top_env[row] = update_top_env_gen(row,
bra,
ket,
left_bmpo[2*row],
left_bmpo[2*row+1],
right_bmpo[2*row],
right_bmpo[2*row+1],
prev_env,
chi=chi)
return top_env
def update_bot_env_gen(row,bra,ket,left1,left2,right1,right2,prev_env,chi=10,truncate=True):
"""
Doing the following contraction:
s t l v n x
| | | | | |
| | | | | |
+----+ +----+ | +----+ | +----+
| l2 |-p-| k1 |----q---^---- ... -| k2 |---r----^-----| r2 |
+----+ +----+ | +----+ | +----+
| | \ | | \ | |
| | \ | | \ | |
j | k | | m | o
| | \ | | \ | |
| | \ | | \ | |
+----+ | +----+ | +----+ +----+
| l1 |---g--^-------| b1 |-h ... ----^-------| b2 |-i-| r1 |
+----+ | +----+ | +----+ +----+
| | | | | |
a b c d e f
| | | | | |
+----+ +----+ +----+ +----+ +----+ +----+
z--| p1 |-y-| p2 |--x--| p3 |-w ... --| p4 |--v-| p5 |-y-| p6 |--t
+----+ +----+ +----+ +----+ +----+ +----+
"""
# Figure out number of columns
ncol = len(bra)
# Determine if it is a thermal state
thermal = len(bra[0][row].legs[2]) == 2
# Create the new top environment
if prev_env is None:
# Create the first top environment
bot_env = []
# First site is the current left bound_mpo
res = einsum('agj,jps->asgp',left1,left2)
# Merge correct inds
res.merge_inds([2,3])
# Add to bot env
bot_env.append(res)
for col in range(ncol):
# Copy the needed tensors
ketten = ket[col][row].copy()
braten = bra[col][row].copy()
# Add ket -----------------------------------
# Remove bottom index
ketten = ketten.remove_empty_ind(1)
# Create needed identity
# TODO - Make sure signs are correct (will give error in symmetric case)
Dl = braten.shape[braten.legs[0][0]]
Zl = braten.qn_sectors[braten.legs[0][0]]
I = eye(Dl,
Zl,
is_symmetric=braten.is_symmetric,
backend=braten.backend)
if len(braten.legs[0]) > 1:
for legind in range(1,len(braten.legs[0])):
Dli = braten.shape[braten.legs[0][legind]]
Zli = braten.qn_sectors[braten.legs[0][legind]]
Ii = eye(Dli,
Zli,
is_symmetric=braten.is_symmetric,
backend=braten.backend)
I = einsum('ij,IJ->iIjJ',I,Ii)
I.merge_inds([0,1])
I.merge_inds([1,2])
# Contract identity with the ket
res = einsum('pkqt,Gg->Gptgkq',ketten,I)
# Merge correct inds
res.merge_inds([0,1])
res.merge_inds([2,3,4])
# Add to bot_env
bot_env.append(res)
# Add bra ----------------------------------
# Remove bottom index
braten = braten.remove_empty_ind(1)
# Create correect identity
# TODO - Make sure signs are correct (will give error in symmetric case)
Dl = ketten.shape[ketten.legs[2][0]]
Zl = ketten.qn_sectors[ketten.legs[2][0]]
I = eye(Dl,
Zl,
is_symmetric=ketten.is_symmetric,
backend=ketten.backend)
if len(ketten.legs[2]) > 1:
for legind in range(1,len(ketten.legs[2])):
Dli = ketten.shape[ketten.legs[2][legind]]
Zli = ketten.qn_sectors[ketten.legs[2][legind]]
Ii = eye(Dli,
Zli,
is_symmetric=braten.is_symmetric,
backend=braten.backend)
I = einsum('ij,IJ->iIjJ',I,Ii)
I.merge_inds([0,1])
I.merge_inds([1,2])
# Contract identity with the bra
res = einsum('gkhl,Qq->gkQlhq',braten,I)
# Merge correct inds
res.merge_inds([0,1,2])
res.merge_inds([2,3])
# Add to bot_env
bot_env.append(res)
# Last site is the current right bound_mpo
res = einsum('fio,orx->irxf',right1,right2)
# Merge correct inds
res.merge_inds([0,1])
# Add to bot env
bot_env.append(res)
# Put result into an MPS -------------------------------------------
bot_env = MPS(bot_env)
# Reduce bond dimension
if truncate:
mpiprint(5,'Truncating Boundary MPS')
if DEBUG:
mpiprint(6,'Computing initial bmpo norm')
norm0 = bot_env.norm()
bot_env = bot_env.apply_svd(chi)
if DEBUG:
mpiprint(6,'Computing resulting bmpo norm')
norm1 = bot_env.norm()
mpiprint(0,'Init bot BMPO Canonicalization Norm Difference for chi={}: {} ({},{})'.format(chi,abs(norm0-norm1)/abs(norm0),norm0,norm1))
else:
# Add the bra layer --------------------------------------------------
"""
Doing the following contraction:
j bk l vm n x
| || | || | |
| || | || | |
+----+ |+------+----+ |+------+----+ +----+
| l1 |---g--^-------| b1 |-h ... ----^-------| b2 |-i-| r1 |
+----+ | +----+ | +----+ +----+
| | | | | |
a b c d e f
| | | | | |
+----+ +----+ +----+ +----+ +----+ +----+
z--| p1 |-y-| p2 |--x--| p3 |-w ... --| p4 |--v-| p5 |-u-| p6 |--t
+----+ +----+ +----+ +----+ +----+ +----+
"""
# Create the next bot environment
bot_env = []
# First, absorb left boundary mps
res = einsum('agj,zay->zjyg',left1,prev_env[0])
# Merge correct inds
res.merge_inds([2,3])
# Add to bottom env
bot_env.append(res)
# Loop through to add bras
for col in range(ncol):
braten = bra[col][row].copy()
# Add identity ---------------------
# TODO - Make sure signs are correct (will give error in symmetric case)
D1 = braten.shape[braten.legs[0][0]]
Z1 = braten.qn_sectors[braten.legs[0][0]]
I1 = eye(D1,
Z1,
is_symmetric=braten.is_symmetric,
backend=braten.backend)
if len(braten.legs[0]) > 1:
for legind in range(1,len(braten.legs[0])):
Dli = braten.shape[braten.legs[0][legind]]
Zli = braten.qn_sectors[braten.legs[0][legind]]
Ii = eye(Dli,
Zli,
is_symmetric=braten.is_symmetric,
backend=braten.backend)
I1 = einsum('ij,IJ->iIjJ',I1,Ii)
I1.merge_inds([0,1])
I1.merge_inds([1,2])
D2 = braten.shape[braten.legs[2][0]]
Z2 = braten.qn_sectors[braten.legs[2][0]]
I2 = eye(D2,
Z2,
is_symmetric=braten.is_symmetric,
backend=braten.backend)
if len(braten.legs[2]) > 1:
for legind in range(1,len(braten.legs[2])):
Dli = braten.shape[braten.legs[2][legind]]
Zli = braten.qn_sectors[braten.legs[2][legind]]
Ii = eye(Dli,
Zli,
is_symmetric=braten.is_symmetric,
backend=braten.backend)
I2 = einsum('ij,IJ->iIjJ',I2,Ii)
I2.merge_inds([0,1])
I2.merge_inds([1,2])
# Contract with previous environment
res = einsum('ybx,Gg->yGbxg',prev_env[2*col+1],I1)
res = einsum('yGbxg,Kk->yGbKxgk',res,I2)
# Merge correct indices
res.merge_inds([0,1])
res.merge_inds([1,2])
res.merge_inds([2,3,4])
# Add to bot_env
bot_env.append(res)
# Add ket --------------------------
# Contract with previous bot_env
res = einsum('gckhl,xcw->xgklwh',braten,prev_env[2*col+2])
# Merge correct indices
res.merge_inds([0,1,2])
res.merge_inds([2,3])
# Add to bot_env
bot_env.append(res)
# Last, absorb right boundary mpo
res = einsum('fix,uft->uixt',right1,prev_env[2*ncol+1])
# Merge needed inds
res.merge_inds([0,1])
# Add to bot_env
bot_env.append(res)
# Put result into an MPS ------------------
bot_env = MPS(bot_env)
# Reduce bond dimension
if truncate:
mpiprint(5,'Truncating Boundary MPS')
if DEBUG:
mpiprint(6,'Computing initial bmpo norm')
norm0 = bot_env.norm()
bot_env = bot_env.apply_svd(chi)
if DEBUG:
mpiprint(6,'Computing resulting bmpo norm')
norm1 = bot_env.norm()
mpiprint(0,'Add ket bot BMPO Canonicalization Norm Difference for chi={}: {} ({},{})'.format(chi,abs(norm0-norm1)/abs(norm0),norm0,norm1))
# Update prev_env
prev_env = bot_env
# Add the bra layer --------------------------------------------------
"""
Doing the following contraction:
s t v x
| | | | | |
| | | | | |
+----+ +----+ | +----+ | +----+
| l2 |-p-| k1 |----q---^---- ... --| k2 |---r---^-----| r2 |
+----+ +----+ | +----+ | +----+
| || | || | |
a bk c dm e f
| || | || | |
+----+ +----+ +----+ +----+ +----+ +----+
z--| p1 |-y-| p2 |--x--| p3 |-w ... --| p4 |--v-| p5 |-y-| p6 |--t
+----+ +----+ +----+ +----+ +----+ +----+
"""
# Create the next bottom environment
bot_env = []
# First, absorb left boundary mpo
res = einsum('zay,aps->zsyp',prev_env[0],left2)
# Merge correct inds
res.merge_inds([2,3])
# Add to bot_env
bot_env.append(res)
# Loop through and add ket tensors
for col in range(ncol):
# Get the ket tensor
ketten = ket[col][row].copy()
# Add ket --------------------------
envten = prev_env[2*col+1].copy()
# Unmerge physical index
if thermal:
envten.unmerge_ind(1)
envten.merge_inds([2,3])
else:
envten.unmerge_ind(1)
# Contract with ket
res = einsum('ybkx,pbkqt->yptxq',envten,ketten)
# Merge correct indices
res.merge_inds([0,1])
res.merge_inds([2,3])
# Add to bot_env
bot_env.append(res)
# Add identity ---------------------
# TODO - Make sure signs are correct (will give error in symmetric case)
D1 = ketten.shape[ketten.legs[3][0]]
Z1 = ketten.qn_sectors[ketten.legs[3][0]]
I1 = eye(D1,
Z1,
is_symmetric=ketten.is_symmetric,
backend=ketten.backend)
if len(ketten.legs[3]) > 1:
for legind in range(1,len(ketten.legs[3])):
Dli = ketten.shape[ketten.legs[3][legind]]
Zli = ketten.qn_sectors[ketten.legs[3][legind]]
Ii = eye(Dli,
Zli,
is_symmetric=braten.is_symmetric,
backend=braten.backend)
I1 = einsum('ij,IJ->iIjJ',I1,Ii)
I1.merge_inds([0,1])
I1.merge_inds([1,2])
# Contract with previous environment
res = einsum('xcw,Qq->xQcwq',prev_env[2*col+2],I1)
# Merge correct indices
res.merge_inds([0,1])
res.merge_inds([2,3])
# Add to bot_env
bot_env.append(res)
# Last, absorb right boundary mpo
res = einsum('yft,frx->yrxt',prev_env[2*ncol+1],right2)
# Merge needed inds
res.merge_inds([0,1])
# Add to bot_env
bot_env.append(res)
# Put result into an MPS ------------------
bot_env = MPS(bot_env)
# Reduce bond dimension
if truncate:
mpiprint(5,'Truncating Boundary MPS')
if DEBUG:
mpiprint(6,'Computing initial bmpo norm')
norm0 = bot_env.norm()
bot_env = bot_env.apply_svd(chi)
if DEBUG:
mpiprint(6,'Computing resulting bmpo norm')
norm1 = bot_env.norm()
mpiprint(0,'Add bra bot BMPO Canonicalization Norm Difference for chi={}: {} ({},{})'.format(chi,abs(norm0-norm1)/abs(norm0),norm0,norm1))
# return result
return bot_env
def calc_bot_envs_gen(bra,left_bmpo,right_bmpo,ket=None,chi=10):
"""
"""
Ny = len(bra[0])
# Copy bra if needed
copy_ket = False
if ket is None: copy_ket = True
elif hasattr(ket,'__len__'):
if ket[0] is None: copy_ket = True
if copy_ket:
ket = [None]*len(bra)
for i in range(len(bra)):
ketcol = [None]*len(bra[i])
for j in range(len(bra[i])):
ketcol[j] = bra[i][j].copy()
# TODO - Conjugate this ket col?
ket[i] = ketcol
# Compute the bottom environment
bot_env = [None]*Ny
for row in range(Ny):
if row == 0: prev_env = None
else: prev_env = bot_env[row-1]
bot_env[row] = update_bot_env_gen(row,
bra,
ket,
left_bmpo[2*row],
left_bmpo[2*row+1],
right_bmpo[2*row],
right_bmpo[2*row+1],
prev_env,
chi=chi)
return bot_env
def update_top_env2(row,bra,ket,left1,left2,right1,right2,prev_env,chi=10,truncate=True,contracted_env=False):
"""
Doing the following contraction:
+-----------------------------------------------+
| prev_env |
+-----------------------------------------------+
| | | | | |
a b c d e f
| | | | | |
+----+ +----+ | +----+ | +----+
| l2 |-g-| k1 |-----h--^----| k2 |-----i--^-----| r2 |
+----+ +----+ | +----+ | +----+
| | \ | | \ | |
| | \ | | \ | |
j | l | | o | q
| | \ | | \ | |
| | \ | | \ | |
+----+ | +----+ | +----+ +----+
| l1 |---r--^-------| b1 |-----^-s-----| b2 |-t-| r1 |
+----+ | +----+ | +----+ +----+
| | | | | |
| | | | | |
u k v n w x
"""
if not contracted_env:
top_env = update_top_env_gen(row,
bra,
ket,
left1,
left2,
right1,
right2,
prev_env,
chi=chi,
truncate=truncate)
else:
bra1 = bra[0][row]
bra2 = bra[1][row]
ket1 = ket[0][row]
ket2 = ket[1][row]
if prev_env is None:
# Create first top env
tmp = einsum('jga,gklhb->abjklh',left2,ket1).remove_empty_ind(0).remove_empty_ind(0)
tmp = einsum('jklh,hnoid->djklnoi',tmp,ket2).remove_empty_ind(0)
tmp = einsum('jklnoi,qif->fjklnoq',tmp,right2).remove_empty_ind(0)
tmp = einsum('jklnoq,urj->urklnoq',tmp,left1)
tmp = einsum('urklnoq,rvlsc->cukvsnoq',tmp,bra1).remove_empty_ind(0)
tmp = einsum('ukvsnoq,swote->eukvnwtq',tmp,bra2).remove_empty_ind(0)
top_env = einsum('ukvnwtq,xtq->ukvnwx',tmp,right1)
else:
tmp = einsum('jga,abcdef->jgbcdef',left2,prev_env)
tmp = einsum('jgbcdef,gklhb->jklhcdef',tmp,ket1)
tmp = einsum('jklhcdef,hnoid->jklcnoief',tmp,ket2)
tmp = einsum('jklcnoief,qif->jklcnoeq',tmp,right2)
tmp = einsum('jklcnoeq,urj->urklcnoeq',tmp,left1)
tmp = einsum('urklcnoeq,rvlsc->ukvnsoeq',tmp,bra1)
tmp = einsum('ukvnsoeq,swote->ukvnwtq',tmp,bra2)
top_env = einsum('ukvnwtq,xtq->ukvnwx',tmp,right1)
return top_env
def calc_top_envs2(bra,left_bmpo,right_bmpo,ket=None,chi=10,truncate=True,contracted_env=False):
"""
"""
# Figure out height of peps column
Ny = len(bra[0])
# Copy bra if needed
copy_ket = False
if ket is None: copy_ket = True
elif hasattr(ket,'__len__'):
if ket[0] is None: copy_ket = True
if copy_ket:
ket = [None]*len(bra)
for i in range(len(bra)):
ketcol = [None]*len(bra[i])
for j in range(len(bra[i])):
ketcol[j] = bra[i][j].copy()
# TODO - Conjugate this ket col?
ket[i] = ketcol
# Compute the bottom environment
top_env = [None]*Ny
for row in reversed(range(Ny)):
if row == Ny-1: prev_env = None
else: prev_env = top_env[row+1]
top_env[row] = update_top_env2(row,
bra,
ket,
left_bmpo[2*row],
left_bmpo[2*row+1],
right_bmpo[2*row],
right_bmpo[2*row+1],
prev_env,
chi=chi,
truncate=truncate,
contracted_env=contracted_env)
return top_env
def update_bot_env2(row,bra,ket,left1,left2,right1,right2,prev_env,chi=10,truncate=True,contracted_env=False):
"""
Doing the following contraction:
s t l v n x
| | | | | |
| | | | | |
+----+ +----+ | +----+ | +----+
| l2 |-p-| k1 |----q---^----| k2 |---r----^-----| r2 |
+----+ +----+ | +----+ | +----+
| | \ | | \ | |
| | \ | | \ | |
j | k | | m | o
| | \ | | \ | |
| | \ | | \ | |
+----+ | +----+ | +----+ +----+
| l1 |---g--^-------| b1 |--h--^-------| b2 |-i-| r1 |
+----+ | +----+ | +----+ +----+
| | | | | |
a b c d e f
| | | | | |
+-----------------------------------------------+
| prev_env |
+-----------------------------------------------+
"""
if not contracted_env:
bot_env = update_bot_env_gen(row,
bra,
ket,
left1,
left2,
right1,
right2,
prev_env,
chi=chi,
truncate=truncate)
else:
bra1 = bra[0][row]
bra2 = bra[1][row]
ket1 = ket[0][row]
ket2 = ket[1][row]
if prev_env is None:
tmp = einsum('agj,gckhl->acjklh',left1,bra1).remove_empty_ind(0).remove_empty_ind(0)
tmp = einsum('jklh,hemin->ejklmni',tmp,bra2).remove_empty_ind(0)
tmp = einsum('jklmni,fio->fjklmno',tmp,right1).remove_empty_ind(0)
tmp = einsum('jklmno,jps->spklmno',tmp,left2)
tmp = einsum('spklmno,pbkqt->bstqlmno',tmp,ket1).remove_empty_ind(0)
tmp = einsum('stqlmno,qdmrv->dstlvrno',tmp,ket2).remove_empty_ind(0)
bot_env = einsum('stlvrno,orx->stlvnx',tmp,right2)
else:
tmp = einsum('agj,abcdef->jgbcdef',left1,prev_env)
tmp = einsum('jgbcdef,gckhl->jbklhdef',tmp,bra1)
tmp = einsum('jbklhdef,hemin->jbkldmnif',tmp,bra2)
tmp = einsum('jbkldmnif,fio->jbkldmno',tmp,right1)
tmp = einsum('jbkldmno,jps->spbkldmno',tmp,left2)
tmp = einsum('spbkldmno,pbkqt->stqldmno',tmp,ket1)
tmp = einsum('stqldmno,qdmrv->stlvrno',tmp,ket2)
bot_env = einsum('stlvrno,orx->stlvnx',tmp,right2)
return bot_env
def calc_bot_envs2(bra,left_bmpo,right_bmpo,ket=None,chi=10,truncate=True,contracted_env=False):
"""
"""
# Figure out height of peps column
Ny = len(bra[0])
# Copy bra if needed
copy_ket = False
if ket is None: copy_ket = True
elif hasattr(ket,'__len__'):
if ket[0] is None: copy_ket = True
if copy_ket:
ket = [None]*len(bra)
for i in range(len(bra)):
ketcol = [None]*len(bra[i])
for j in range(len(bra[i])):
ketcol[j] = bra[i][j].copy()
# TODO - Conjugate this ket col?
ket[i] = ketcol
# Compute the bottom environment
bot_env = [None]*Ny
for row in range(Ny):
if row == 0: prev_env = None
else: prev_env = bot_env[row-1]
bot_env[row] = update_bot_env2(row,
bra,
ket,
left_bmpo[2*row],
left_bmpo[2*row+1],
right_bmpo[2*row],
right_bmpo[2*row+1],
prev_env,
chi=chi,
truncate=truncate,
contracted_env=contracted_env)
return bot_env
def update_top_env(bra,ket,left1,left2,right1,right2,prev_env):
"""
Doing the following contraction:
+-------+-------+-------+
| | | |
O u | o
| | | |
+---l---+---r---^-------+
| |\ | |
| | \ | |
N | p U n
| | \ | |
| | \| |
+-------^---L---+---R---+
| | | |
M d D m
"""
# Check if stuff is in memory (or needs loading)
in_mem_bra = bra.in_mem
in_mem_ket = ket.in_mem
in_mem_left1 = left1.in_mem
in_mem_left2 = left2.in_mem
in_mem_right1 = right1.in_mem
in_mem_right2 = right2.in_mem
if prev_env is not None:
in_mem_prev_env = prev_env.in_mem
else:
in_mem_prev_env = True
# Load stuff that is not in memory
if not in_mem_bra: bra.from_disk()
if not in_mem_ket: ket.from_disk()
if not in_mem_left1: left1.from_disk()
if not in_mem_left2: left2.from_disk()
if not in_mem_right1: right1.from_disk()
if not in_mem_right2: right2.from_disk()
if not in_mem_prev_env: prev_env.from_disk()
# Compute first bottom environment
if prev_env is None:
tmp = einsum('ldpru,NlO->uONdpr',ket,left2).remove_empty_ind(0).remove_empty_ind(0)
tmp = einsum('Ndpr,nro->oNdpn',tmp,right2).remove_empty_ind(0)
tmp = einsum('Ndpn,LDpRU->UNdLDRn',tmp,bra).remove_empty_ind(0)
tmp = einsum('NdLDRn,MLN->MdDRn',tmp,left1)
top_env = einsum('MdDRn,mRn->MdDm',tmp,right1)
# Add on to top env
else:
tmp = einsum('ldpru,OuUo->OldprUo',ket,prev_env)
tmp = einsum('OldprUo,NlO->NdprUo',tmp,left2)
tmp = einsum('NdprUo,nro->NdpUn',tmp,right2)
tmp = einsum('NdpUn,LDpRU->NdLDRn',tmp,bra)
tmp = einsum('NdLDRn,MLN->MdDRn',tmp,left1)
top_env = einsum('MdDRn,mRn->MdDm',tmp,right1)
# Cache stuff that is not in memory
if not in_mem_bra: bra.to_disk()
if not in_mem_ket: ket.to_disk()
if not in_mem_left1: left1.to_disk()
if not in_mem_left2: left2.to_disk()
if not in_mem_right1: right1.to_disk()
if not in_mem_right2: right2.to_disk()
if not in_mem_prev_env: prev_env.to_disk()
# Return result
return top_env
#@profile
def calc_top_envs(bra_col,left_bmpo,right_bmpo,ket_col=None,in_mem=True):
"""
Doing the following contraction:
+-------+-------+-------+
| | | |
O U | o
| | | |
+---L---+---R---^-------+
| |\ | |
| | \ | |
N D P u n
| \ | |
| \| |
+-------l-------+---r---+
| | |
M d m
"""
# Figure out height of peps column
Ny = len(bra_col)
# Copy bra if needed
if ket_col is None:
ket_col = [None]*len(bra_col)
for i in range(len(ket_col)):
ket_col[i] = bra_col[i].copy()
# Compute top environment
top_env = [None]*Ny
for row in reversed(range(Ny)):
# Get previous Environemnt
if row == Ny-1: prev_env = None
else: prev_env = top_env[row+1]
# Make sure everything we need is loaded
if not in_mem:
bra_col[row].from_disk()
ket_col[row].from_disk()
left_bmpo[2*row].from_disk()
left_bmpo[2*row+1].from_disk()
right_bmpo[2*row].from_disk()
right_bmpo[2*row+1].from_disk()
if prev_env is not None:
prev_env.from_disk()
# Update the top environments
top_env[row] = update_top_env(bra_col[row],
ket_col[row],
left_bmpo[2*row],
left_bmpo[2*row+1],
right_bmpo[2*row],
right_bmpo[2*row+1],
prev_env)
# Write tensors back to disk (if needed)
if not in_mem:
bra_col[row].to_disk()
ket_col[row].to_disk()
left_bmpo[2*row].to_disk()
left_bmpo[2*row+1].to_disk()
right_bmpo[2*row].to_disk()
right_bmpo[2*row+1].to_disk()
if row != Ny-1:
top_env[row+1].to_disk()
# Write final top env to disk
if not in_mem:
top_env[row].to_disk()
# Return Result
return top_env
def update_bot_env(bra,ket,left1,left2,right1,right2,prev_env):
"""
Doing the following contraction:
O u | o
| | | |
| | | |
+---l---+---r---^-------+
| |\ | |
| | \ | |
N d P U n
| | \ | |
| | \ | |
+-------^---L---+---R---+
| | | |
| | | |
M | D m
| | | |
+-------+-------+-------+
"""
# Check if stuff is in memory (or needs loading)
in_mem_bra = bra.in_mem
in_mem_ket = ket.in_mem
in_mem_left1 = left1.in_mem
in_mem_left2 = left2.in_mem
in_mem_right1 = right1.in_mem
in_mem_right2 = right2.in_mem
if prev_env is not None:
in_mem_prev_env = prev_env.in_mem
else:
in_mem_prev_env = True
# Load stuff that is not in memory
if not in_mem_bra: bra.from_disk()
if not in_mem_ket: ket.from_disk()
if not in_mem_left1: left1.from_disk()
if not in_mem_left2: left2.from_disk()
if not in_mem_right1: right1.from_disk()
if not in_mem_right2: right2.from_disk()
if not in_mem_prev_env: prev_env.from_disk()
# Compute first bottom environment
if prev_env is None:
tmp = einsum('LDPRU,MLN->DMNPUR',bra,left1).remove_empty_ind(0).remove_empty_ind(0)
tmp = einsum('NPUR,mRn->mNPUn',tmp,right1).remove_empty_ind(0)
tmp = einsum('NPUn,ldPru->dNlurUn',tmp,ket).remove_empty_ind(0)
tmp = einsum('NlurUn,NlO->OurUn',tmp,left2)
bot_env = einsum('OurUn,nro->OuUo',tmp,right2)
# Update bottom environemnt
else:
tmp = einsum('LDPRU,MdDm->MdLPURm',bra,prev_env)
tmp = einsum('MdLPURm,MLN->NdPURm',tmp,left1)
tmp = einsum('NdPURm,mRn->NdPUn',tmp,right1)
tmp = einsum('NdPUn,ldPru->NlurUn',tmp,ket)
tmp = einsum('NlurUn,NlO->OurUn',tmp,left2)
bot_env = einsum('OurUn,nro->OuUo',tmp,right2)
# Cache stuff that is not in memory
if not in_mem_bra: bra.to_disk()
if not in_mem_ket: ket.to_disk()
if not in_mem_left1: left1.to_disk()
if not in_mem_left2: left2.to_disk()
if not in_mem_right1: right1.to_disk()
if not in_mem_right2: right2.to_disk()
if not in_mem_prev_env: prev_env.to_disk()
# Return result
return bot_env
#@profile
def calc_bot_envs(bra_col,left_bmpo,right_bmpo,ket_col=None,in_mem=True):
"""
Doing the following contraction:
O u | o
| | | |
| | | |
+---l---+---r---^-------+
| |\ | |
| | \ | |
N d P U n
| | \ | |
| | \| |
+-------^---L---+---R---+
| | | |
| | | |
M | D m
| | | |
+-------+-------+-------+
"""
# Figure out height of peps column
Ny = len(bra_col)
# Copy bra if needed
if ket_col is None:
ket_col = [None]*len(bra_col)
for i in range(len(ket_col)):
ket_col[i] = bra_col[i].copy()
# Compute the bottom environment
bot_env = [None]*Ny
for row in range(Ny):
# Get previous environment
if row == 0: prev_env = None
else: prev_env = bot_env[row-1]
# Make sure everything we need is loaded
if not in_mem:
bra_col[row].from_disk()
ket_col[row].from_disk()
left_bmpo[2*row].from_disk()
left_bmpo[2*row+1].from_disk()
right_bmpo[2*row].from_disk()
right_bmpo[2*row+1].from_disk()
if prev_env is not None:
prev_env.from_disk()
# Update the top environments
bot_env[row] = update_bot_env(bra_col[row],
ket_col[row],
left_bmpo[2*row],
left_bmpo[2*row+1],
right_bmpo[2*row],
right_bmpo[2*row+1],
prev_env)
# Write tensors back to disk (if needed)
if not in_mem:
bra_col[row].to_disk()
ket_col[row].to_disk()
left_bmpo[2*row].to_disk()
left_bmpo[2*row+1].to_disk()
right_bmpo[2*row].to_disk()
right_bmpo[2*row+1].to_disk()
if row-1 > 0:
bot_env[row-1].to_disk()
# Write final top env to disk
if not in_mem:
bot_env[row].to_disk()
# Return result
return bot_env
def reduce_tensors(peps1,peps2):
"""
Reduce the two peps tensors, i.e. pull off physical index
"""
if DEBUG:
# Figure out combined tensor (for check)
original = einsum('LDPRU,lUpru->lLDPRpru',peps1,peps2)
# Reduce bottom tensor
peps1 = peps1.transpose([0,1,3,2,4])
output = peps1.svd(3,return_ent=False,return_wgt=False)
(ub,sb,vb) = peps1.svd(3,return_ent=False,return_wgt=False)
phys_b = einsum('ab,bPU->aPU',sb,vb)
# Reduce top tensor
peps2 = peps2.transpose([1,2,0,3,4])
(ut,st,vt) = peps2.svd(2,return_ent=False,return_wgt=False)
phys_t = einsum('DPa,ab->DPb',ut,st)
vt = vt.transpose([1,0,2,3])
if DEBUG:
# Check to make sure initial and reduced peps tensors are identical
final = einsum('LDRa,aPb->LDRPb',ub,phys_b)
final = einsum('LDRPb,bpc->LDRPpc',final,phys_t)
final = einsum('LDRPpc,lcru->lLDPRpru',final,vt)
mpiprint(0,'Reduced Difference = {}'.format((original-final).abs().sum()))
# Return result
return ub,phys_b,phys_t,vt
def pos_sqrt_vec(vec):
"""
"""
for i in range(vec.shape[0]):
if vec[i] > 0.:
vec[i] = vec[i]**(1./2.)
else:
vec[i] = 0.
return vec
#@profile
def make_N_positive(N,hermitian=True,positive=True,reduced=True):
"""
"""
hermitian,positive=False,False
# Get a hermitian approximation of the environment
if hermitian:
if reduced:
N1 = N.copy()
N1 = N1.transpose([0,2,1,3])
N = N.transpose([1,3,0,2])
N = (N+N1)/2.
N1 = N.copy()
N = einsum('UDab,abud->UuDd',N,N1)
else:
N1 = N.copy()
N1 = N1.transpose([0,2,4,6,8,10,1,3,5,7,9,11])
N = N.transpose([1,3,5,7,9,11,0,2,4,6,8,10])
N = (N+N1)/2.
N1 = N.copy()
N = einsum('ldrkustvwxyz,tvwxyzLDRKUS->lLdDrRkKuUsS',N,N1)
# Get a positive approximation of the environment
if positive:
try:
if reduced:
if N.sym is None:
N = N.transpose([0,2,1,3])
n1 = np.prod([N.ten.shape[i] for i in N.legs[0]])
n2 = np.prod([N.ten.shape[i] for i in N.legs[1]])
n3 = np.prod([N.ten.shape[i] for i in N.legs[2]])
n4 = np.prod([N.ten.shape[i] for i in N.legs[3]])
Nmat = N.backend.reshape(N.ten,(n1*n2,n3*n4))
u,v = N.backend.eigh(Nmat)
u = pos_sqrt_vec(u)
Nmat = N.backend.einsum('ij,j,kj->ik',v,u,v)
N.ten = Nmat.reshape(N.shape)
N = N.transpose([0,2,1,3])
else:
N = N.copy().transpose([0,2,1,3])
Nmat = N.ten.make_sparse()
(N1,N2,N3,N4,n1,n2,n3,n4) = Nmat.shape
Nmat = Nmat.transpose([0,4,1,5,2,6,3,7])
Nmat = Nmat.reshape((N1*n1*N2*n2,N3*n3*N4*n4))
u,v = N.backend.eigh(Nmat)
u = pos_sqrt_vec(u)
Nmat = N.backend.einsum('ij,j,kj->ik',v,u,v)
Nmat = Nmat.reshape((N1,n1,N2,n2,N3,n3,N4,n4))
Nmat = Nmat.transpose([0,2,4,6,1,3,5,7])
# Cast back into a symtensor
delta = N.ten.get_irrep_map()
Nmat = N.backend.einsum('ABCDabcd,ABCD->ABCabcd',Nmat,delta)
N.ten.array = Nmat
# Retranspose
N = N.transpose([0,2,1,3])
else:
if N.sym is None:
N = N.transpose([0,2,4,6,8,10,1,3,5,7,9,11])
n0 = np.prod([N.ten.shape[i] for i in N.legs[0]])
n1 = np.prod([N.ten.shape[i] for i in N.legs[1]])
n2 = np.prod([N.ten.shape[i] for i in N.legs[2]])
n3 = np.prod([N.ten.shape[i] for i in N.legs[3]])
n4 = np.prod([N.ten.shape[i] for i in N.legs[4]])
n5 = np.prod([N.ten.shape[i] for i in N.legs[5]])
n6 = np.prod([N.ten.shape[i] for i in N.legs[6]])
n7 = np.prod([N.ten.shape[i] for i in N.legs[7]])
n8 = np.prod([N.ten.shape[i] for i in N.legs[8]])
n9 = np.prod([N.ten.shape[i] for i in N.legs[9]])
n10 = np.prod([N.ten.shape[i] for i in N.legs[10]])
n11 = np.prod([N.ten.shape[i] for i in N.legs[11]])
Nmat = N.backend.reshape(N.ten,(n0*n1*n2*n3*n4*n5,n6*n7*n8*n9*n10*n11))
u,v = N.backend.eigh(Nmat)
u = pos_sqrt_vec(u)
Nmat = N.backend.einsum('ij,j,kj->ik',v,u,v)
N.ten = Nmat.reshape(N.shape)
N = N.transpose([0,6,1,7,2,8,3,9,4,10,5,11])
else:
N = N.copy().transpose([0,2,4,6,8,10,1,3,5,7,9,11])
Nmat = N.ten.make_sparse()
(N0,N1,N2,N3,N4,N5,N6,N7,N8,N9,N10,N11,n0,n1,n2,n3,n4,n5,n6,n7,n8,n9,n10,n11) = Nmat.shape
Nmat = Nmat.transpose([0,12,1,13,2,14,3,15,4,16,5,17,6,18,7,19,8,20,9,21,10,22,11,23])
Nmat = Nmat.reshape((N0*n0*N1*n1*N2*n2*N3*n3*N4*n4*N5*n5,N6*n6*N7*n7*N8*n8*N9*n9*N10*n10*N11*n11))
u,v = N.backend.eigh(Nmat)
u = pos_sqrt_vec(u)
Nmat = N.backend.einsum('ij,j,kj->ik',v,u,v)
Nmat = Nmat.reshape((N0,n0,N1,n1,N2,n2,N3,n3,N4,n4,N5,n5,N6,n6,N7,n7,N8,n8,N9,n9,N10,n10,N11,n11))
Nmat = Nmat.transpose([0,2,4,6,8,10,12,14,16,18,20,22,1,3,5,7,9,11,13,15,17,19,21,23])
delta = N.ten.get_irrep_map()
Nmat = N.backend.einsum('ABCDEFGHIJKLabcdefghijkl,ABCDEFGHIJKL->ABCDEFGHIJKabcdefghijkl',Nmat,delta)
N.ten.array = Nmat
N = N.transpose([0,6,1,7,2,8,3,9,4,10,5,11])
except Exception as e:
mpiprint(0,'Failed to make N positive:\n\t{}'.format(e))
return N
#@profile
def calc_local_env(bra1,bra2,ket1,ket2,env_top,env_bot,lbmpo,rbmpo,
reduced=True,hermitian=True,positive=True,in_mem=True):
"""
Calculate the local environment around two peps tensors
Args:
bra1 : peps tensor
The peps tensor for the bottom site
bra2 : peps tensor
The peps tensor for the top site
ket1 : peps tensor
The peps tensor for the bottom site
ket2 : peps tensor
The peps tensor for the top site
env_top : env tensor
The top environment for the given sites
env_bot : env tensor
The bottom environment for the given sites
lbmpo : list of left boundary mpo tensors
The four left boundary mpo tensors surrounding
the two peps tensors
rbmpo : list of right boundary mpo tensors
The four right boundary mpo tensors surrounding
the two peps tensors
Kwargs:
reduced : bool
If true, then this function returns the reduced
environment. Currently, this is the only option
available.
hermitian : bool
Approximate the environment with its nearest
hermitian approximate
positive : bool
Approximate the environment with its nearest
possible positive approximate
in_mem : bool
Whether the tensors input to this function are in
memory. If not, tensors should be loaded first (and rewritten
to disk afterwards). The output of this funciton, i.e.
the local env, will always be in memory.
"""
# Load tensors (as needed)
if not in_mem:
bra1.from_disk()
bra2.from_disk()
ket1.from_disk()
ket2.from_disk()
if env_top is not None: env_top.from_disk()
if env_bot is not None: env_bot.from_disk()
for i in range(len(lbmpo)):
lbmpo[i].from_disk()
for i in range(len(rbmpo)):
rbmpo[i].from_disk()
if reduced:
# Get reduced tensors
peps_b,phys_b,phys_t,peps_t = reduce_tensors(bra1,bra2)
ket_b,phys_bk,phys_tk,ket_t = reduce_tensors(ket1,ket2)
# Compute bottom half of environment
if env_bot is None:
tmp = einsum('CLB,LDRU->CDBUR',lbmpo[0],peps_b).remove_empty_ind(0).remove_empty_ind(0)
tmp = einsum('BUR,cRb->cBUb',tmp,rbmpo[0]).remove_empty_ind(0)
tmp = einsum('BUb,BlA->AlUb',tmp,lbmpo[1])
tmp = einsum('AlUb,ldru->dAurUb',tmp,ket_b).remove_empty_ind(0)
envb= einsum('AurUb,bra->AuUa',tmp,rbmpo[1])
else:
tmp = einsum('CdDc,CLB->BLdDc',env_bot,lbmpo[0])
tmp = einsum('BLdDc,LDRU->BdURc',tmp,peps_b)
tmp = einsum('BdURc,cRb->BdUb',tmp,rbmpo[0])
tmp = einsum('BdUb,BlA->AldUb',tmp,lbmpo[1])
tmp = einsum('AldUb,ldru->AurUb',tmp,ket_b)
envb= einsum('AurUb,bra->AuUa',tmp,rbmpo[1])
# Compute top half of environment
if env_top is None:
tmp = einsum('BlC,ldru->CuBdr',lbmpo[3],ket_t).remove_empty_ind(0).remove_empty_ind(0)
tmp = einsum('Bdr,brc->cBdb',tmp,rbmpo[3]).remove_empty_ind(0)
tmp = einsum('Bdb,ALB->ALdb',tmp,lbmpo[2])
tmp = einsum('ALdb,LDRU->UAdDRb',tmp,peps_t).remove_empty_ind(0)
envt= einsum('AdDRb,aRb->AdDa',tmp,rbmpo[2])
else:
tmp = einsum('CuUc,BlC->BluUc',env_top,lbmpo[3])
tmp = einsum('BluUc,ldru->BdrUc',tmp,ket_t)
tmp = einsum('BdrUc,brc->BdUb',tmp,rbmpo[3])
tmp = einsum('BdUb,ALB->ALdUb',tmp,lbmpo[2])
tmp = einsum('ALdUb,LDRU->AdDRb',tmp,peps_t)
envt= einsum('AdDRb,aRb->AdDa',tmp,rbmpo[2])
# Compute Environment
N = einsum('AdDa,AuUa->uUdD',envt,envb)
N = make_N_positive(N,hermitian=hermitian,positive=positive)
# write tensors to disk (as needed)
if not in_mem:
bra1.to_disk()
bra2.to_disk()
ket1.to_disk()
ket2.to_disk()
if env_top is not None: env_top.to_disk()
if env_bot is not None: env_bot.to_disk()
for i in range(len(lbmpo)):
lbmpo[i].to_disk()
for i in range(len(rbmpo)):
rbmpo[i].to_disk()
# Return Results
return peps_b, phys_b, phys_t, peps_t, ket_b, phys_bk, phys_tk, ket_t, N
else:
# Get the PEPS tensors
peps_b, peps_t = bra1, bra2
ket_b, ket_t = ket1, ket2
# Compute bottom half of environment
if env_bot is None:
if lbmpo[0].is_symmetric:
# Must determine correct signs for empty tensor (a bit overly complicated)
symtmp = einsum('CLB,BlA->CLlA',lbmpo[0],lbmpo[1])
symtmp = einsum('LDPRU,CLlA->DPRUClA',peps_b,symtmp)
symtmp = einsum('cRb,DPRUClA->DPUClAcb',rbmpo[0],symtmp)
symtmp = einsum('bra,DPUClAcb->DPUClAcra',rbmpo[1],symtmp)
symtmp = einsum('ldPru,DPUClAcra->CdDcAuUa',ket_b,symtmp)
# Create an empty environment
env_bot = ones((1,1,1,1),
sym=[symtmp.sym[0][:4],
symtmp.sym[1][:4],
None,
None],
backend=lbmpo[0].
backend,dtype=lbmpo[0].dtype)
else:
# Create an empty environment
env_bot = ones((1,1,1,1),
sym=None,
backend=lbmpo[0].backend,
dtype=lbmpo[0].dtype)
# Contract bottom half of environment
tmp = einsum('CdDc,CLB->BLdDc',env_bot,lbmpo[0])
tmp = einsum('BLdDc,cRb->BLdDRb',tmp,rbmpo[0])
tmp = einsum('BLdDRb,BlA->AlLdDRb',tmp,lbmpo[1])
envb = einsum('AlLdDRb,bra->AlLdDrRa',tmp,rbmpo[1])
# Compute top half of environment
if env_top is None:
if lbmpo[3].is_symmetric:
# Must determine correct signs for empty tensor (a bit overly complicated)
symtmp = einsum('ALB,BlC->ALlC',lbmpo[2],lbmpo[3])
symtmp = einsum('LDPRU,ALlC->DPRUAlC',peps_t,symtmp)
symtmp = einsum('aRb,DPRUAlC->DPUAlCab',rbmpo[2],symtmp)
symtmp = einsum('brc,DPUAlCab->DPUAlCarc',rbmpo[3],symtmp)
symtmp = einsum('ldPru,DPUAlCarc->CuUcDaAd',ket_t,symtmp)
# Create an empty environment
env_top = ones((1,1,1,1),
sym=[symtmp.sym[0][:4],
symtmp.sym[1][:4],
None,
None],
backend=lbmpo[0].backend,
dtype=lbmpo[0].dtype)
else:
# Create an empty environment
env_top = ones((1,1,1,1),
sym=None,
backend=lbmpo[0].backend,dtype=lbmpo[0].dtype)
tmp = einsum('CuUc,BlC->BluUc',env_top,lbmpo[3])
tmp = einsum('BluUc,brc->BluUrb',tmp,rbmpo[3])
tmp = einsum('BluUrb,ALB->ALluUrb',tmp,lbmpo[2])
envt = einsum('ALluUrb,aRb->AlLuUrRa',tmp,rbmpo[2])
# Compute Environment
N = einsum('AkKuUsSa,AlLdDrRa->lLdDrRkKuUsS',envt,envb)
N = make_N_positive(N,
hermitian=hermitian,
positive=positive,
reduced=reduced)
# write tensors to disk (as needed)
if not in_mem:
bra1.to_disk()
bra2.to_disk()
ket1.to_disk()
ket2.to_disk()
if env_top is not None: env_top.to_disk()
if env_bot is not None: env_bot.to_disk()
for i in range(len(lbmpo)):
lbmpo[i].to_disk()
for i in range(len(rbmpo)):
rbmpo[i].to_disk()
# Return Results
return N
def calc_local_op(phys_b_bra,phys_t_bra,N,ham,
phys_b_ket=None,phys_t_ket=None,
reduced=True,normalize=True,return_norm=False):
"""
Calculate the normalized Energy of the system
"""
# Make some copies
if phys_t_ket is None:
phys_t_ket = phys_t_bra.copy().conj()
if phys_b_ket is None:
phys_b_ket = phys_b_bra.copy().conj()
# Compute Energy (or op value
if reduced:
tmp = einsum('APU,UQB->APQB',phys_b_bra,phys_t_bra)
tmp1= einsum('APQB,aAbB->aPQb',tmp,N)
tmp2= einsum('apu,uqb->apqb',phys_b_ket,phys_t_ket)
norm = einsum('apqb,apqb->',tmp1,tmp2)
if ham is not None:
tmp = einsum('aPQb,apqb->PQpq',tmp1,tmp2)
if len(tmp.legs[0]) == 2:
# Thermal state
tmp.unmerge_ind(3)
tmp.unmerge_ind(2)
tmp.unmerge_ind(1)
tmp.unmerge_ind(0)
E = einsum('PaQbpaqb,PQpq->',tmp,ham)
tmp.merge_inds([0,1])
tmp.merge_inds([1,2])
tmp.merge_inds([2,3])
tmp.merge_inds([3,4])
else:
# Normal peps
E = einsum('PQpq,PQpq->',tmp,ham)
else:
E = norm
else:
# (Bra is capital, ket is lower case)
comb1 = einsum('LDPRZ,KZQSU->LDPRKQSU', phys_b_bra, phys_t_bra)
comb1 = einsum('LDPRKQSU,lLdDrRkKuUsS->PQldrkus', comb1, N)
comb2 = einsum('ldprz,kzqsu->ldprkqsu', phys_b_ket, phys_t_ket)
norm = einsum('PQldrkus,ldPrkQsu->', comb1, comb2)
if ham is not None:
phys_inds = einsum('PQldrkus,ldprkqsu->PQpq', comb1, comb2)
if len(phys_inds.legs[0]) == 2:
# Thermal state
phys_inds.unmerge_ind(3)
phys_inds.unmerge_ind(2)
phys_inds.unmerge_ind(1)
phys_inds.unmerge_ind(0)
E = einsum('PaQbpaqb,PQpq->', phys_inds, ham)
phys_inds.merge_inds([0,1])
phys_inds.merge_inds([1,2])
phys_inds.merge_inds([2,3])
phys_inds.merge_inds([3,4])
else:
# Normal peps
E = einsum('PQpq,PQpq->', phys_inds, ham)
else:
E = norm
# Return Result
if normalize:
if return_norm:
return E/norm,norm
else:
return E/norm
else:
if return_norm:
return E,norm
else:
return E
def calc_N(row,bra_col,left_bmpo,right_bmpo,top_envs,bot_envs,hermitian=True,positive=True,ket_col=None,in_mem=True,reduced=True):
"""
Calculate the environment tensor
"""
# Copy bra if needed
_ket_col = ket_col
if ket_col is None:
ket_col = [None]*len(bra_col)
for i in range(len(ket_col)):
ket_col[i] = bra_col[i].copy()
# Compute Local Environment (N)
if row == 0:
if len(bra_col) == 2:
# Only two sites in column, use identity at both ends
res = calc_local_env(bra_col[row],
bra_col[row+1],
ket_col[row],
ket_col[row+1],
None,
None,
left_bmpo[row*2,row*2+1,row*2+2,row*2+3],
right_bmpo[row*2,row*2+1,row*2+2,row*2+3],
hermitian=hermitian,
positive=positive,
in_mem=in_mem,
reduced=reduced)
else:
# Identity only on bottom
res = calc_local_env(bra_col[row],
bra_col[row+1],
ket_col[row],
ket_col[row+1],
top_envs[row+2],
None,
left_bmpo[row*2,row*2+1,row*2+2,row*2+3],
right_bmpo[row*2,row*2+1,row*2+2,row*2+3],
hermitian=hermitian,
positive=positive,
in_mem=in_mem,
reduced=reduced)
elif row == len(bra_col)-2:
# Identity needed on top
res = calc_local_env(bra_col[row],
bra_col[row+1],
ket_col[row],
ket_col[row+1],
None,
bot_envs[row-1],
left_bmpo[row*2,row*2+1,row*2+2,row*2+3],
right_bmpo[row*2,row*2+1,row*2+2,row*2+3],
hermitian=hermitian,
positive=positive,
in_mem=in_mem,
reduced=reduced)
else:
# Get the local environment tensor (no identity needed)
res = calc_local_env(bra_col[row],
bra_col[row+1],
ket_col[row],
ket_col[row+1],
top_envs[row+2],
bot_envs[row-1],
left_bmpo[row*2,row*2+1,row*2+2,row*2+3],
right_bmpo[row*2,row*2+1,row*2+2,row*2+3],
hermitian=hermitian,
positive=positive,
in_mem=in_mem,
reduced=reduced)
return res
def calc_local_nn_op_lb(mpo,bra,ket,top,bot,left,right,normalize=True,contracted_env=False,chi=10):
"""
Calculate the value of an operator as an mpo acting on the left
and bottom bonds of a 2x2 peps grid
"""
# Check if it is a thermal state:
thermal = len(bra[0][1].legs[2]) == 2
# Absorb MPO into bra
Hbra = [[None,None],[None,None]]
if thermal:
bra[0][1].unmerge_ind(2)
Hbra[0][1] = einsum('ldparu,pPx->ldxParu',bra[0][1],mpo[0]) # Top left site
Hbra[0][1].merge_inds([1,2])
Hbra[0][1].merge_inds([2,3])
bra[0][1].merge_inds([2,3])
bra[0][0].unmerge_ind(2)
Hbra[0][0] = einsum('ldparu,xpPy->ldParyux',bra[0][0],mpo[1]) # Bottom left site
Hbra[0][0].merge_inds([2,3])
Hbra[0][0].merge_inds([3,4])
Hbra[0][0].merge_inds([4,5])
bra[0][0].merge_inds([2,3])
bra[1][0].unmerge_ind(2)
Hbra[1][0] = einsum('ldparu,ypP->lydParu',bra[1][0],mpo[2]) # Bottom right site
Hbra[1][0].merge_inds([0,1])
Hbra[1][0].merge_inds([2,3])
Hbra[1][1] = bra[1][1].copy()
bra[1][0].merge_inds([2,3])
else:
Hbra[0][1] = einsum('ldpru,pPx->ldxPru',bra[0][1],mpo[0]) # Top left site
Hbra[0][1].merge_inds([1,2])
Hbra[0][0] = einsum('ldpru,xpPy->ldPryux',bra[0][0],mpo[1]) # Bottom left site
Hbra[0][0].merge_inds([3,4])
Hbra[0][0].merge_inds([4,5])
Hbra[1][0] = einsum('ldpru,ypP->lydPru',bra[1][0],mpo[2]) # Bottom right site
Hbra[1][0].merge_inds([0,1])
Hbra[1][1] = bra[1][1].copy()
# Calculate Operator -------------------------------------
# Compute bottom environment as a boundary mpo
Hbot = update_bot_env2(0,
Hbra,
ket,
left[0],
left[1],
right[0],
right[1],
bot,
truncate=True,
chi=chi,
contracted_env=contracted_env)
# Compute top environment as a boundary mpo
Htop = update_top_env2(1,
Hbra,
ket,
left[2],
left[3],
right[2],
right[3],
top,
truncate=True,
chi=chi,
contracted_env=contracted_env)
# Contract top and bottom boundary mpos to get result
if contracted_env:
E = einsum('lkbKBr,lkbKBr->',Hbot,Htop)
else:
E = Hbot.contract(Htop)
# Calculate Norm -------------------------------------
if normalize:
# Compute bottom environment as a boundary mpo
Nbot = update_bot_env2(0,
bra,
ket,
left[0],
left[1],
right[0],
right[1],
bot,
truncate=True,
chi=chi,
contracted_env=contracted_env)
# Compute top environment as a boundary mpo
Ntop = update_top_env2(1,
bra,
ket,
left[2],
left[3],
right[2],
right[3],
top,
truncate=True,
chi=chi,
contracted_env=contracted_env)
# Contract top and bottom boundary mpos to get result
if contracted_env:
norm = einsum('lkbKBr,lkbKBr->',Nbot,Ntop)
else:
norm = Nbot.contract(Ntop)
if not isinstance(E,float): E = E.to_val()
if not isinstance(norm,float): norm = norm.to_val()
E /= norm
# Return result
return E
def calc_local_nn_op_ru(mpo,bra,ket,top,bot,left,right,normalize=True,contracted_env=False,chi=10):
"""
Calculate the value of an operator as an mpo acting on the right
and top bonds of a 2x2 peps grid
"""
# Check if it is a thermal state:
thermal = len(bra[0][1].legs[2]) == 2
# Absorb MPO into bra
Hbra = [[None,None],[None,None]]
if thermal:
bra[0][1].unmerge_ind(2)
Hbra[0][1] = einsum('ldparu,pPx->ldParxu',bra[0][1],mpo[0]) # Top Left Site
Hbra[0][1].merge_inds([2,3])
Hbra[0][1].merge_inds([3,4])
bra[0][1].merge_inds([2,3])
bra[1][1].unmerge_ind(2)
Hbra[1][1] = einsum('ldparu,xpPy->lxdyParu',bra[1][1],mpo[1]) # Top Right Site
Hbra[1][1].merge_inds([0,1])
Hbra[1][1].merge_inds([1,2])
Hbra[1][1].merge_inds([2,3])
bra[1][1].merge_inds([2,3])
bra[1][0].unmerge_ind(2)
Hbra[1][0] = einsum('ldparu,ypP->ldParuy',bra[1][0],mpo[2]) # Bottom right site
Hbra[1][0].merge_inds([2,3])
Hbra[1][0].merge_inds([4,5])
bra[1][0].merge_inds([2,3])
Hbra[0][0] = bra[0][0].copy()
else:
Hbra[0][1] = einsum('ldpru,pPx->ldPrxu',bra[0][1],mpo[0]) # Top Left Site
Hbra[0][1].merge_inds([3,4])
Hbra[1][1] = einsum('ldpru,xpPy->lxdyPru',bra[1][1],mpo[1]) # Top Right Site
Hbra[1][1].merge_inds([0,1])
Hbra[1][1].merge_inds([1,2])
Hbra[1][0] = einsum('ldpru,ypP->ldPruy',bra[1][0],mpo[2]) # Bottom right site
Hbra[1][0].merge_inds([4,5])
Hbra[0][0] = bra[0][0].copy()
# Calculate Operator -------------------------------------
# Compute bottom environment as a boundary mpo
Hbot = update_bot_env2(0,
Hbra,
ket,
left[0],
left[1],
right[0],
right[1],
bot,
truncate=True,
chi=chi,
contracted_env=contracted_env)
# Compute top environment as a boundary mpo
Htop = update_top_env2(1,
Hbra,
ket,
left[2],
left[3],
right[2],
right[3],
top,
truncate=True,
chi=chi,
contracted_env=contracted_env)
# Contract top and bottom boundary mpos to get result
if contracted_env:
E = einsum('lkbKBr,lkbKBr->',Hbot,Htop)
else:
E = Hbot.contract(Htop)
# Calculate Norm -------------------------------------
if normalize:
# Compute bottom environment as a boundary mpo
Nbot = update_bot_env2(0,
bra,
ket,
left[0],
left[1],
right[0],
right[1],
bot,
truncate=True,
chi=chi,
contracted_env=contracted_env)
# Compute top environment as a boundary mpo
Ntop = update_top_env2(1,
bra,
ket,
left[2],
left[3],
right[2],
right[3],
top,
truncate=True,
chi=chi,
contracted_env=contracted_env)
# Contract top and bottom boundary mpos to get result
if contracted_env:
norm = einsum('lkbKBr,lkbKBr->',Nbot,Ntop)
else:
norm = Nbot.contract(Ntop)
E /= norm
# Return result
return E
def calc_local_nn_op(row,bra,ops_col,left_bmpo,right_bmpo,bot_envs,top_envs,ket=None,normalize=True,contracted_env=False,chi=10):
"""
Calculate the value of an operator on a 2x2 square
Args:
row: int
The row of the ops_col to be evaluated
bra: list of list of ndarrays
The needed columns of the peps
left_bmpo:
The boundary mpo to the left of the two peps columns
right_bmpo:
The boundary mpo to the right of the two peps columns
bot_envs:
The boundary mpo version of the bottom environment
top_envs:
The boundary mpo version of the top environment
ops_col: list of list of ndarrays
The operators acting on next nearest neighboring sites
within the two columns
Kwargs:
normalize: bool
Whether to normalize the operator evaluations
ket: List of list of ndarrays
The needed columns of the ket
contracted_env: bool
Whether to contract the upper and lower environment
or leave it as a boundary mps
chi: int
Max bond dimension for the boundary mps on the top
and bottom
Returns:
E: float
The operator value for the given 2x2 plaquette
"""
# Copy bra if needed ----------------------------------
copy_ket = False
if ket is None: copy_ket = True
elif hasattr(ket,'__len__'):
if ket[0] is None: copy_ket = True
if copy_ket:
ket = [None]*len(bra)
for i in range(len(bra)):
ketcol = [None]*len(bra[i])
for j in range(len(bra[i])):
ketcol[j] = bra[i][j].copy()
# TODO - Conjugate this ket col?
ket[i] = ketcol
# Extract needed tensors -------------------------------
# Upper and lower environments =====
if row == 0:
if len(bra[0]) == 2:
# Only two sites in column, use identity at both ends
top,bot = None,None
else:
# At bottom unit cell, use identity on bottom
top=top_envs[row+2]
bot=None
elif row == len(bra[0])-2:
# At top unit cell, use identity on top
top = None
bot = bot_envs[row-1]
else:
# In the bulk, no identity needed
top = top_envs[row+2]
bot = bot_envs[row-1]
# PEPS tensors =====================
cell_bra = [[bra[0][row],bra[0][row+1]],
[bra[1][row],bra[1][row+1]]]
cell_ket = [[ket[0][row],ket[0][row+1]],
[ket[1][row],ket[1][row+1]]]
cell_lbmpo = left_bmpo[row*2,row*2+1,row*2+2,row*2+3]
cell_rbmpo = right_bmpo[row*2,row*2+1,row*2+2,row*2+3]
# Flip tensors where needed ========
# Flip bra and ket tensors
flip_bra = [[bra[1][row].copy().transpose([3,1,2,0,4]),bra[1][row+1].copy().transpose([3,1,2,0,4])],
[bra[0][row].copy().transpose([3,1,2,0,4]),bra[0][row+1].copy().transpose([3,1,2,0,4])]]
flip_ket = [[ket[1][row].copy().transpose([3,1,2,0,4]),ket[1][row+1].copy().transpose([3,1,2,0,4])],
[ket[0][row].copy().transpose([3,1,2,0,4]),ket[0][row+1].copy().transpose([3,1,2,0,4])]]
# Flip (contracted) top/bot environments
# Always contract bot/top env to make transpose easier
if not contracted_env:
if top is not None:
flip_top = einsum('ijk,klm->ijlm',top[0],top[1]).remove_empty_ind(0)
flip_top = einsum('jlm,mno->jlno',flip_top,top[2])
flip_top = einsum('jlno,opq->jlnpq',flip_top,top[3])
flip_top = einsum('jlnpq,qrs->jlnprs',flip_top,top[4])
flip_top = einsum('jlnprs,stu->jlnprtu',flip_top,top[5]).remove_empty_ind(6)
if bot is not None:
flip_bot = einsum('ijk,klm->ijlm',bot[0],bot[1]).remove_empty_ind(0)
flip_bot = einsum('jlm,mno->jlno',flip_bot,bot[2])
flip_bot = einsum('jlno,opq->jlnpq',flip_bot,bot[3])
flip_bot = einsum('jlnpq,qrs->jlnprs',flip_bot,bot[4])
flip_bot = einsum('jlnprs,stu->jlnprtu',flip_bot,bot[5]).remove_empty_ind(6)
if top is not None:
flip_top = flip_top.transpose([5,3,4,1,2,0])
else: flip_top = None
if bot is not None:
flip_bot = flip_bot.transpose([5,3,4,1,2,0])
else: flip_bot = None
# Calculation energy contribution from first MPO -------
E1 = calc_local_nn_op_lb(ops_col[row][0],
cell_bra,
cell_ket,
top,
bot,
cell_lbmpo,
cell_rbmpo,
normalize=normalize,
chi=chi,
contracted_env=contracted_env)
# Calculate energy contribution from third MPO ---------
# (must flip horizontally so we can use the lb procedure
E2 = calc_local_nn_op_lb(ops_col[row][1],
flip_bra,
flip_ket,
flip_top,
flip_bot,
cell_rbmpo,
cell_lbmpo,
normalize=normalize,
chi=chi,
contracted_env=True)
# Calculate energy contribution from third MPO -----------
E3 = calc_local_nn_op_ru(ops_col[row][2],
cell_bra,
cell_ket,
top,
bot,
cell_lbmpo,
cell_rbmpo,
normalize=normalize,
chi=chi,
contracted_env=contracted_env)
# Return resulting energy --------------------------------
E = E1+E2+E3
return E
def calc_single_column_nn_op(peps,left_bmpo,right_bmpo,ops_col,normalize=True,ket=None,chi=10,contracted_env=False):
"""
Calculate contribution to an operator with next nearest (nn) neighbr interactions
from two neighboring columns of a peps
Args:
peps: List of list of ndarrays
The needed columns of the peps
left_bmpo:
The boundary mpo to the left of the two peps columns
right_bmpo:
The boundary mpo to the right of the two peps columns
ops_col: list of list of ndarrays
The operators acting on next nearest neighboring sites
within the two columns
Kwargs:
normalize: bool
Whether to normalize the operator evaluations
ket: List of list of ndarrays
The needed columns of the ket
contracted_env: bool
Whether to contract the upper and lower environment
or leave it as a boundary mps
Returns:
E: float
The operator value for interactions between the two columns
"""
# Calculate top and bottom environments
top_envs = calc_top_envs2(peps,left_bmpo,right_bmpo,ket=ket,chi=chi,contracted_env=contracted_env)
bot_envs = calc_bot_envs2(peps,left_bmpo,right_bmpo,ket=ket,chi=chi,contracted_env=contracted_env)
# Calculate Energy
E = peps[0][0].backend.zeros(len(ops_col))
for row in range(len(ops_col)):
E[row] = calc_local_nn_op(row,
peps,
ops_col,
left_bmpo,
right_bmpo,
bot_envs,
top_envs,
ket=ket,
chi=chi,
normalize=normalize,
contracted_env=contracted_env)
return E
def calc_single_column_op(peps_col,left_bmpo,right_bmpo,ops_col,
normalize=True,ket_col=None,in_mem=True):
"""
Calculate contribution to operator from interactions within
a single column.
Args:
peps_col:
A single column of the peps
left_bmpo:
The boundary mpo to the left of the peps column
right_bmpo:
The boundary mpo to the right of the peps column
ops:
The operators acting on nearest neighboring sites
within the column
Kwargs:
in_mem: bool
if True, then the peps tensors will all be loaded into memory
and all calculations will be done with them in memory
"""
# Calculate top and bottom environments
top_envs = calc_top_envs(peps_col,left_bmpo,right_bmpo,ket_col=ket_col,in_mem=in_mem)
bot_envs = calc_bot_envs(peps_col,left_bmpo,right_bmpo,ket_col=ket_col,in_mem=in_mem)
# Set up array to hold resulting energies
E = peps_col[0].backend.zeros(len(ops_col))
# Loop through rows calculating local energies
for row in range(len(ops_col)):
# Calculate environment
res = calc_N(row,peps_col,left_bmpo,right_bmpo,top_envs,bot_envs,
hermitian=False,
positive=False,
ket_col=ket_col,
in_mem=in_mem)
_,phys_b,phys_t,_,_,phys_bk,phys_tk,_,N = res
# Calc the local operator
E[row] = calc_local_op(phys_b,phys_t,N,ops_col[row],normalize=normalize,phys_b_ket=phys_bk,phys_t_ket=phys_tk)
# Return the energy
return E
def calc_all_column_op(peps,ops,chi=10,return_sum=True,normalize=True,ket=None,allow_normalize=False,in_mem=True):
"""
Calculate contribution to operator from interactions within all columns,
ignoring interactions between columns
Args:
peps : A list of lists of peps tensors
The PEPS to be normalized
ops :
The operator to be contracted with the peps
Kwargs:
chi : int
The maximum bond dimension for the boundary mpo
return_sum : bool
Whether to return the summation of all energies or
a 2D array showing the energy contribution from each bond.
ket : A list of lists of ket tensors
A second peps, to use as the ket, in the operator contraction
in_mem: bool
if True, then the peps tensors will all be loaded into memory
and all calculations will be done with them in memory
Returns:
val : float
The contribution of the column's interactions to
the observable's expectation value
"""
# Figure out peps size
Nx = len(peps)
Ny = len(peps[0])
# Compute the boundary MPOs
right_bmpo = calc_right_bound_mpo(peps, 0,
chi=chi,
return_all=True,
ket=ket,
allow_normalize=allow_normalize,
in_mem=in_mem)
left_bmpo = calc_left_bound_mpo (peps,Nx,
chi=chi,
return_all=True,
ket=ket,
allow_normalize=allow_normalize,
in_mem=in_mem)
ident_bmpo = identity_mps(len(right_bmpo[0]),
dtype=peps[0][0].dtype,
sym=(peps[0][0].sym is not None),
backend=peps.backend)
# Set up array to store energies
E = peps.backend.zeros((len(ops),len(ops[0])),dtype=peps[0][0].dtype)
# Loop through all columns
for col in range(Nx):
# Get the ket column (if not None)
if ket is None:
ket_col = None
else: ket_col = ket[col]
# First column (nothing on left side)
if col == 0:
E[col,:] = calc_single_column_op(peps[col],
ident_bmpo,
right_bmpo[col],
ops[col],
normalize=normalize,
ket_col=ket_col,
in_mem=in_mem)
# Second column (nothing on right side)
elif col == Nx-1:
E[col,:] = calc_single_column_op(peps[col],
left_bmpo[col-1],
ident_bmpo,
ops[col],
normalize=normalize,
ket_col=ket_col,
in_mem=in_mem)
# Center columns
else:
E[col,:] = calc_single_column_op(peps[col],
left_bmpo[col-1],
right_bmpo[col],
ops[col],
normalize=normalize,
ket_col=ket_col,
in_mem=in_mem)
# Print Energies
mpiprint(8,'Energy [:,:] = \n{}'.format(E))
# Return results
if return_sum:
return E.sum()
else:
return E
def calc_peps_nn_op(peps,ops,chi=10,normalize=True,ket=None,contracted_env=False,allow_normalize=False):
"""
Calculate the expectation value for a given next nearest (nn) neighbor operator
Args:
peps : A PEPS object
The PEPS to be normalized
ops :
The operator to be contracted with the peps
Kwargs:
chi : int
The maximum bond dimension for the boundary mpo
normalize : bool
Whether to divide the resulting operator value by the peps norm
ket : PEPS Object
A second peps, to use as the ket, in the operator contraction
contracted_env: bool
Whether to contract the upper and lower environment
or leave it as a boundary mps
Returns:
val : float
The resulting observable's expectation value
"""
# Absorb Lambda tensors if needed
if peps.ltensors is not None:
peps = peps.copy()
peps.absorb_lambdas()
else:
peps = peps.copy()
if ket is not None and ket.ltensors is not None:
ket = ket.copy()
ket.absorb_lambdas()
elif ket is not None:
ket = ket.copy()
# Figure out peps size
Nx = len(peps)
Ny = len(peps[0])
# Compute the boundary MPOs
right_bmpo = calc_right_bound_mpo(peps, 0,chi=chi,return_all=True,ket=ket,allow_normalize=allow_normalize)
left_bmpo = calc_left_bound_mpo (peps,Nx,chi=chi,return_all=True,ket=ket,allow_normalize=allow_normalize)
ident_bmpo = identity_mps(len(right_bmpo[0]),
dtype=peps[0][0].dtype,
sym=(peps[0][0].sym is not None),
backend=peps.backend)
# Loop through all columns
E = peps.backend.zeros((len(ops),len(ops[0])),dtype=peps[0][0].dtype)
for col in range(Nx-1):
# Use None if no ket tensor
if ket is None:
ket1 = None
ket2 = None
else:
ket1 = ket[col]
ket2 = ket[col+1]
# Evaluate energy for single column
if col == 0:
if len(peps) == 2:
# Use identities on both sides
E[col,:] = calc_single_column_nn_op([peps[col],peps[col+1]],
ident_bmpo,
ident_bmpo,
ops[col],
normalize=normalize,
ket=[ket1,ket2],
chi=chi,
contracted_env=contracted_env)
else:
# Use identity on left side
E[col,:] = calc_single_column_nn_op([peps[col],peps[col+1]],
ident_bmpo,
right_bmpo[col+1],
ops[col],
normalize=normalize,
ket=[ket1,ket2],
chi=chi,
contracted_env=contracted_env)
elif col == Nx-2:
# Use Identity on the right side
E[col,:] = calc_single_column_nn_op([peps[col],peps[col+1]],
left_bmpo[col-1],
ident_bmpo,
ops[col],
normalize=normalize,
ket=[ket1,ket2],
chi=chi,
contracted_env=contracted_env)
else:
E[col,:] = calc_single_column_nn_op([peps[col],peps[col+1]],
left_bmpo[col-1],
right_bmpo[col+1],
ops[col],
normalize=normalize,
ket=[ket1,ket2],
chi=chi,
contracted_env=contracted_env)
# Print out results if wanted
mpiprint(8,'Energy [:,:] = \n{}'.format(E))
# Return Result
return E.sum()
def calc_peps_op(peps,ops,chi=10,return_sum=True,normalize=True,ket=None,in_mem=True):
"""
Calculate the expectation value for a given operator
Args:
peps : A PEPS object
The PEPS to be normalized
ops :
The operator to be contracted with the peps
Kwargs:
chi : int
The maximum bond dimension for the boundary mpo
normalize : bool
Whether to divide the resulting operator value by the peps norm
return_sum : bool
Whether to either return an array of the results, the same shape
as ops, or a summation of all operators
ket : PEPS Object
A second peps, to use as the ket, in the operator contraction
in_mem: bool
if True, then the peps tensors will all be loaded into memory
and all calculations will be done with them in memory
Returns:
val : float
The resulting observable's expectation value
"""
# "normalize" with respect to the contraction between the two peps
peps.normalize(ket=ket,
pf=True,
in_mem=in_mem)
# Absorb Lambda tensors if needed
if peps.ltensors is not None:
peps = peps.copy()
peps.absorb_lambdas()
else:
peps = peps.copy()
if ket is not None and ket.ltensors is not None:
ket = ket.copy()
ket.absorb_lambdas()
elif ket is not None:
ket = ket.copy()
# Calculate contribution from interactions between columns
col_energy = calc_all_column_op(peps,ops[0],
chi=chi,
normalize=normalize,
return_sum=return_sum,
ket=ket,
in_mem=in_mem)
# Calculate contribution from interactions between rows
peps.rotate(clockwise=True)
if ket is not None:
ket.rotate(clockwise=True)
row_energy = calc_all_column_op(peps,ops[1],
chi=chi,
normalize=normalize,
return_sum=return_sum,
ket=ket,
in_mem=in_mem)
peps.rotate(clockwise=False)
if ket is not None:
ket.rotate(clockwise=False)
# Return Result
if return_sum:
return col_energy.sum()+row_energy.sum()
else:
return col_energy,row_energy
def increase_peps_mbd_lambda(peps,Dnew,noise=0.01):
"""
Increase the bond dimension of lambda tensors in a
canonical peps
Args:
peps : PEPS Object
The peps for which we are increasing the bond dimension
Dnew : int
The new bond dimension
Kwargs:
noise : float
The maximum magnitude of random noise to be incorporated
in increasing the bond dimension
Returns:
peps : PEPS Object
The peps with the bond dimension increased
"""
if peps.ltensors is None:
# Do nothing if there are no lambda tensors
return peps
else:
# Figure out peps size
Nx = len(peps.ltensors[0])
Ny = len(peps.ltensors[0][0])
Dold = peps.ltensors[0][0][0].shape[0]
# Get unitary tensor for insertion
identity = zeros((Dnew,Dold),dtype=peps.ltensors[0][0][0].dtype)
identity[:Dold,:] = eye(Dold,dtype=peps.ltensors[0][0][0].dtype)
mat = identity + noise*rand((Dnew,Dold),dtype=peps.ltensors[0][0][0].dtype)
mat = svd(mat)[0]
# Loop through all possible tensors and increase their sizes
for ind in range(len(peps.ltensors)):
for x in range(len(peps.ltensors[ind])):
for y in range(len(peps.ltensors[ind][x])):
peps.ltensors[ind][x][y] = einsum('Ll,l->L',mat,peps.ltensors[ind][x][y])
# Return result
return peps
def increase_zn_peps_mbd(peps,Dnew,noise=1e-10):
raise NotImplementedError()
def increase_peps_mbd(peps,Dnew,noise=1e-10,chi=None,normalize=True):
"""
Increase the bond dimension of a peps
Args:
peps : 2D Array
The peps tensors in a list of lists
Dnew : int
The new bond dimension
Kwargs:
noise : float
The maximum magnitude of random noise to be incorporated
in increasing the bond dimension
Returns:
peps : 2D Array
The new peps tensors with increased bond dimensions
"""
# Separate routine if using Zn Symmetry
if peps.Zn is not None:
return increase_zn_peps_mbd(peps,Dnew,noise=noise)
# Figure out peps size
Nx = len(peps)
Ny = len(peps[0])
for col in range(Nx):
for row in range(Ny):
# Determine tensor shape
old_shape = list(peps[row][col].ten.shape)
new_shape = list(peps[row][col].ten.shape)
legs = peps[row][col].legs
# Determine new required shape
ind = tuple()
# Left bond
if row != 0:
new_shape[legs[0][0]] = Dnew
ind += (slice(0,old_shape[legs[0][0]]),)
else:
for i in legs[0]:
ind += (slice(0,old_shape[i]),)
# Down Bond
if col != 0:
new_shape[peps[row][col].legs[1][0]] = Dnew
ind += (slice(0,old_shape[legs[1][0]]),)
else:
for i in legs[1]:
ind += (slice(0,old_shape[i]),)
# Physical Bond
for i in legs[2]:
ind += (slice(0,old_shape[i]),)
# Right Bond
if row != Nx-1:
new_shape[peps[row][col].legs[3][0]] = Dnew
ind += (slice(0,old_shape[legs[3][0]]),)
else:
for i in legs[3]:
ind += (slice(0,old_shape[i]),)
# Top Bond
if col != Ny-1:
new_shape[peps[row][col].legs[4][0]] = Dnew
ind += (slice(0,old_shape[legs[4][0]]),)
else:
for i in legs[4]:
ind += (slice(0,old_shape[i]),)
# Create an empty tensor
ten = peps.backend.zeros(new_shape,dtype=peps[row][col].dtype)
ten[ind] = peps[row][col].ten.copy()
# Add some noise (if needed
ten_noise = noise*peps.backend.random(new_shape)
ten += ten_noise
# Put new tensor back into peps
peps[row][col].ten = ten
# Increase Lambda tensors as well if needed
peps = increase_peps_mbd_lambda(peps,Dnew,noise=noise)
# Normalize if needed
if normalize:
peps.normalize(chi=chi)
# Return result
return peps
def copy_peps_tensors(peps):
"""
Create a copy of the PEPS tensors
"""
cp = []
for x in range(len(peps)):
tmp = []
for y in range(len(peps[0])):
tmp += [peps[x][y].copy()]
cp += [tmp]
return cp
def copy_lambda_tensors(peps):
"""
Create a copy of the PEPS tensors
"""
cp = []
for x in range(len(peps.ltensors)):
tmp = []
for y in range(len(peps.ltensors[x])):
tmp2 = []
for z in range(len(peps.ltensors[x][y])):
tmp2 += [peps.ltensors[x][y][z].copy()]
tmp += [tmp2]
cp += [tmp]
return cp
def peps_absorb_lambdas(Gamma,Lambda,mk_copy=True):
"""
Absorb the lambda tensors into the gamma tensors,
transforming the peps representations from the canonical
Gamma-Lambda form into the standard representation.
Args:
Gamma : list of lists
A list of a list of the peps gamma tensors
Lambda : list of lists of lists
The lambda tensors (singular value vectors)
with Lambda[0] being the lambda vecs on the vertical bonds and
Lambda[1] being the lambda vecs on the horizontal bonds.
Returns:
peps : list of lists
The peps tensors
"""
if Lambda is not None:
# Create a copy of Gamma (if needed)
if mk_copy:
Gamma = copy_peps_tensors(Gamma)
# Figure out peps lattice size
Nx = len(Gamma)
Ny = len(Gamma[0])
# loop through all sites, absorbing the "singular values"
for x in range(Nx):
for y in range(Ny):
# Absorb lambdas that are to the right and above (not symmetric
# but better for precision)
initsgn = Gamma[x][y].get_signs()
if x is not Nx-1:
Gamma[x][y] = einsum('ldpru,rR->ldpRu',Gamma[x][y],Lambda[1][x][y])
if y is not Ny-1:
Gamma[x][y] = einsum('ldpru,uU->ldprU',Gamma[x][y],Lambda[0][x][y])
Gamma[x][y].update_signs(initsgn)
# Return results
return Gamma
def load_peps(fname,in_mem=True):
"""
Load a saved PEPS into a new PEPS object
Args:
fname : str
The file which holds the saved PEPS object
in_mem : bool
Whether all peps tensors will be stored in local
memory or as references to tensors stored in disk
Returns:
peps : PEPS object
A peps object with the saved PEPS loaded
"""
# Open File
f = open_file(fname,'r')
# Get PEPS info
Nx = get_dataset('Nx')
Ny = get_dataset('Ny')
shape = get_dataset('shape')
d = get_dataset('d')
D = get_dataset('D')
chi = get_dataset('chi')
norm_tol = get_dataset('norm_tol')
exact_norm_tol = get_dataset('exact_norm_tol')
canonical = get_dataset('canonical')
singleLayer = get_dataset('singleLayer')
max_norm_iter = get_dataset('max_norm_iter')
norm_BS_upper = get_dataset('norm_BS_upper')
norm_BS_lower = get_dataset('norm_BS_lower')
norm_BS_print = get_dataset('norm_BS_print')
dtype = get_dataset('tensor_0_0').dtype
fname = get_dataset('fname')
fdir = get_dataset('fdir')
# Create new PEPS object
peps = PEPS(Nx=Nx,Ny=Ny,d=d,D=D,
chi=chi,norm_tol=norm_tol,
exact_norm_tol=exact_norm_tol,
canonical=canonical,
singleLayer=singleLayer,
max_norm_iter=max_norm_iter,
norm_BS_upper=norm_BS_upper,
norm_BS_lower=norm_BS_lower,
dtype=dtype,normalize=False,
fdir=fdir,fname=fname+'_loaded')
# Load PEPS Tensors
for i in range(Nx):
for j in range(Ny):
peps.tensors[i][j] = get_dataset('tensor_{}_{}'.format(i,j))
# Load lambda tensors (if there)
if canonical:
for ind in range(len(self.ltensors)):
for x in range(len(self.ltensors[ind])):
for y in range(len(self.ltensors[ind][x])):
peps.ltensors[ind][x][y] = get_dataset('ltensor_{}_{}_{}'.format(ind,x,y))
# Write to memory (if needed)
if not in_mem:
peps.to_disk()
if canonical:
raise ValueError('Store to disk not yet implemented for canonical PEPS')
# Return resulting PEPS
return peps
# -----------------------------------------------------------------
# PEPS Class
class PEPS:
"""
A class to hold and manipulate a PEPS
"""
#@profile
def __init__(self,Nx=10,Ny=10,d=2,D=2,
chi=None,chi_norm=None,chi_op=None,
Zn=None,thermal=False,
dZn=None,canonical=False,backend='numpy',
singleLayer=True,dtype=float_,
normalize=True,norm_tol=1e-3,
exact_norm_tol=20.,
max_norm_iter=100,norm_bs_upper=10.0,norm_bs_lower=0.0,
fname=None,fdir='./',in_mem=True):
"""
Create a random PEPS object
Args:
self : PEPS Object
Kwargs:
Nx : int
The length of the lattice in the x-direction
Ny : int
The length of the lattice in the y-direction
d : int
The local bond dimension
D : int
The auxilliary bond dimension
chi : int
The boundary mpo maximum bond dimension
chi_norm : int
The boundary mpo maximum bond dimension for
use when the norm is computed
chi_op : int
The boundary mpo maximum bond dimension for
use when operator expectation values are computed
Zn : int
Create a PEPS which preserves this Zn symmetry,
i.e. if Zn=2, then Z2 symmetry is preserved.
thermal : bool
Whether or not to create a thermal state,
i.e. an additional physical index
dZn : int
The number of symmetry sectors in the physical
bond dimension.
canonical : bool
If true, then the PEPS will be created in the
Gamma Lambda formalism, with diagonal matrices
between each set of PEPS tensors. Default is False,
where a standard PEPS, with one tensor per site,
will be created.
backend : str
This specifies the backend to be used for the calculation.
Options are currently 'numpy' or 'ctf'. If using symmetries,
this will be adapted to using symtensors with numpy or ctf as
the backend.
singleLayer : bool
Whether to use a single layer environment
(currently only option implemented)
exact_norm_tol : float
How close to 1. the norm should be before exact
artihmetic is used in the normalization procedure.
See documentation of normalize_peps() function
for more details.
norm_tol : float
How close to 1. the norm should be when calling peps.normalize()
dtype : dtype
The data type for the PEPS
normalize : bool
Whether the initialized random peps should be normalized
max_norm_iter : int
The maximum number of normalization iterations
norm_bs_upper : float
The upper bound for the binary search factor
during normalization.
norm_bs_lower : float
The lower bound for the binary search factor
during normalization.
norm_BS_print : boolean
Controls output of binary search normalization
procedure.
dtype : dtype
The data type for the PEPS
normalize : bool
Whether the initial random peps should be normalized
fname : str
Where the PEPS will be saved as an .npz file, if None,
then the default is 'peps_Nx{}_Ny{}_D{}'
fdir : str
The directory where the PEPS will be saved, default is
current working directory
in_mem : bool
Whether the peps tensors should be written to disk or
stored in local memory
Returns:
PEPS : PEPS Object
The resulting random projected entangled pair
state as a PEPS object
"""
# Collect input arguments
self.Nx = Nx
self.Ny = Ny
self.shape = (Nx,Ny)
self.d = d
self.D = D
if chi is None: chi = 4*D**2
self.chi = chi
if chi_norm is None: chi_norm = chi
if chi_op is None: chi_op = chi
self.chi_norm = chi_norm
self.chi_op = chi_op
self.Zn = Zn
self.thermal = thermal
if dZn is None: dZn = Zn
self.dZn = dZn
self.canonical = canonical
self.backend = backend
self.backend = load_lib(self.backend)
self.singleLayer = singleLayer
self.dtype = dtype
self.exact_norm_tol = exact_norm_tol
self.norm_tol = norm_tol
self.max_norm_iter = max_norm_iter
self.norm_bs_upper = norm_bs_upper
self.norm_bs_lower = norm_bs_lower
if fname is None:
self.fname = 'peps_Nx{}_Ny{}_D{}'.format(Nx,Ny,D)
else:
self.fname = fname
self.fdir = fdir
# Make a random PEPS
if thermal:
self.tensors = make_thermal_peps(self.Nx,
self.Ny,
self.d,
self.D,
Zn=self.Zn,
dZn=self.dZn,
backend=self.backend,
dtype=self.dtype)
else:
#tmpprint('Making Initial Random PEPS')
self.tensors = make_rand_peps(self.Nx,
self.Ny,
self.d,
self.D,
Zn=self.Zn,
dZn=self.dZn,
backend=self.backend,
dtype=self.dtype,
in_mem=in_mem)
# Add in lambda "singular value" matrices
if self.canonical:
if thermal:
self.ltensors = make_thermal_lambdas(self.Nx,
self.Ny,
self.D,
Zn=self.Zn,
backend=self.backend,
dtype=self.dtype,
in_mem=in_mem)
else:
self.ltensors = make_rand_lambdas(self.Nx,
self.Ny,
self.D,
Zn=self.Zn,
backend=self.backend,
dtype=self.dtype,
in_mem=in_mem)
else:
self.ltensors = None
# Normalize the PEPS
if normalize:
#tmpprint('Normalize Initialized PEPS')
self.normalize(in_mem=in_mem)
def calc_bmpo_left(self,col,chi=4,singleLayer=True,truncate=True,return_all=False,ket=None,allow_normalize=False,in_mem=True):
"""
Calculate the left boundary MPO
Args:
peps : List
A list of lists containing the peps tensors
col : int
The last column for which you need the environment
Kwargs:
chi : int
The maximum bond dimension of the boundary MPO
single_layer : bool
Indicates whether to use a single layer environment
(currently it is the only option...)
truncate : bool
Whether or not to do an svd and truncate the resulting
boundary mpo
return_all : bool
Whether to return a list of boundary mpos upto col or just
return the boundary mpo for col.
ket : List
A list of lists containing the ket's peps tensors
in_mem: bool
if True, then the peps tensors will all be loaded into memory
and all calculations will be done with them in memory, default
is True
returns:
bound_mpo : list
An mpo stored as a list, corresponding to the
resulting boundary mpo.
"""
# Get needed parameters
if chi is None:
chi = self.chi
if singleLayer is None:
singleLayer = self.singleLayer
# Calc BMPO
bmpo = calc_left_bound_mpo(self,
col,
chi=chi,
singleLayer=singleLayer,
truncate=truncate,
return_all=return_all,
ket=ket,
allow_normalize=allow_normalize,
in_mem=in_mem)
# Return result
return bmpo
def calc_bmpo_right(self,col,chi=None,singleLayer=None,truncate=True,return_all=False,ket=None,allow_normalize=False,in_mem=True):
"""
Calculate the right boundary MPO
Args:
peps : List
A list of lists containing the peps tensors
col : int or list of ints
The column(s) for which you need the environment
Kwargs:
chi : int
The maximum bond dimension of the boundary MPO
single_layer : bool
Indicates whether to use a single layer environment
(currently it is the only option...)
truncate : bool
Whether or not to do an svd and truncate the resulting
boundary mpo
return_all : bool
Whether to return a list of boundary mpos upto col or just
return the boundary mpo for col.
ket : List
A list of lists containing the ket's peps tensors
in_mem: bool
if True, then the peps tensors will all be loaded into memory
and all calculations will be done with them in memory, default
is True
returns:
bound_mpo : list
An mpo stored as a list, corresponding to the
resulting boundary mpo.
"""
# Get needed parameters
if chi is None:
chi = self.chi
if singleLayer is None:
singleLayer = self.singleLayer
# Do calculation
bmpo = calc_right_bound_mpo(self,
col,
chi=chi,
singleLayer=singleLayer,
truncate=truncate,
return_all=return_all,
ket=ket,
allow_normalize=allow_normalize,
in_mem=in_mem)
# Return bmpo
return bmpo
def calc_norm(self,chi=None,singleLayer=None,ket=None,in_mem=True):
"""
Calculate the norm of the PEPS
Args:
self : PEPS Object
Kwargs:
chi : int
The boundary MPO bond dimension
single_layer : bool
Indicates whether to use a single layer environment
(currently it is the only option...)
ket : PEPS Object
A peps containing the ket's peps tensors
in_mem: bool
if True, then the peps tensors will all be loaded into memory
and all calculations will be done with them in memory, default
is True
Returns:
norm : float
The (approximate) norm of the PEPS
"""
if chi is None: chi = self.chi
if singleLayer is None: singleLayer = self.singleLayer
return calc_peps_norm(self,chi=chi,singleLayer=singleLayer,ket=ket,in_mem=in_mem)
def normalize(self,max_iter=None,norm_tol=None,exact_norm_tol=None,chi=None,up=None,down=None,
singleLayer=None,ket=None,pf=False,in_mem=True):
"""
Normalize the full PEPS
Args:
self : PEPS Object
The PEPS to be normalized
Kwargs:
max_iter : int
The maximum number of iterations of the normalization
procedure. Default is 20.
norm_tol : float
How close to 1. the norm should be when calling peps.normalize()
exact_norm_tol : int
We require the measured norm to be within the bounds
10^(-exact_norm_tol) < norm < 10^(exact_norm_tol) before we do
exact arithmetic to get the norm very close to 1. Default
is 20.
chi : int
Boundary MPO maximum bond dimension
up : float
The upper bound for the binary search factor. Default is 1.0,
which assumes that the norm of the initial PEPS is greater
than 1 (this is almost always true).
down : float
The lower bound for the binary search factor. Default is 0.0.
The intial guess for the scale factor is the midpoint
between up and down. It's not recommended to adjust the
up and down parameters unless you really understand what
they are doing.
single_layer : bool
Indicates whether to use a single layer environment
(currently it is the only option...)
ket : peps object
If you would like the ket to be 'normalized', such that
when contracted with another peps, the contraction is equal
to one. Only the peps (not ket) will be altered to attempt
the normalization
pf: bool
If True, then we will normalize as though this is a partition
function instead of a contraction between to peps
in_mem: bool
if True, then the peps tensors will all be loaded into memory
and all calculations will be done with them in memory
Returns:
norm : float
The approximate norm of the PEPS after the normalization
procedure
"""
# Figure out good chi (if not given)
if chi is None: chi = self.chi_norm
if max_iter is None: max_iter = self.max_norm_iter
if exact_norm_tol is None: exact_norm_tol = self.exact_norm_tol
if norm_tol is None: norm_tol = self.norm_tol
if up is None: up = self.norm_bs_upper
if down is None: down = self.norm_bs_lower
if singleLayer is None: singleLayer = self.singleLayer
# Run the normalization procedure
norm, normpeps = normalize_peps(self,
max_iter = max_iter,
exact_norm_tol = exact_norm_tol,
norm_tol = norm_tol,
chi = chi,
up = up,
down = down,
singleLayer=singleLayer,
ket=ket,
pf=pf,
in_mem=in_mem)
# Copy the resulting tensors
self.tensors = copy_peps_tensors(normpeps)
if self.ltensors is not None:
self.ltensors = copy_lambda_tensors(normpeps)
return norm
def calc_op(self,ops,chi=None,normalize=True,return_sum=True,ket=None,nn=False,contracted_env=False,in_mem=True):
"""
Calculate the expectation value for a given operator
Args:
self : PEPS Object
The PEPS to be normalized
ops :
The operator to be contracted with the peps
Kwargs:
chi : int
The maximum bond dimension for the boundary mpo
normalize : bool
Whether to divide the resulting operator value by the peps norm
return_sum : bool
Whether to either return an array of the results, the same shape
as ops, or a summation of all operators
ket : PEPS Object
A second peps, to use as the ket, in the operator contraction
nn : bool
Whether the Hamiltonian involves next nearest (nn) neighbor interactions
contracted_env: bool
Whether to contract the upper and lower environment
or leave it as a boundary mps
in_mem: bool
if True, then the peps tensors will all be loaded into memory
and all calculations will be done with them in memory
Returns:
val : float
The resulting observable's expectation value
"""
if chi is None: chi = self.chi_op
# Calculate the operator's value
if nn:
if not in_mem: raise ValueError('Unable to do next nearest neighbor calcs with peps not in memory')
return calc_peps_nn_op(self,ops,chi=chi,normalize=normalize,ket=ket,contracted_env=contracted_env)
else:
return calc_peps_op(self,ops,chi=chi,normalize=normalize,return_sum=return_sum,ket=ket,in_mem=in_mem)
def increase_mbd(self,newD,chi=None,noise=1e-10,normalize=True):
"""
Increase the maximum bond dimension of the peps
Args:
self : PEPS Object
The PEPS to be normalized
Kwargs:
new_chi : int
The new bond dimension for the boundary mpo
"""
if newD is not None:
self.chi = chi
self = increase_peps_mbd(self,newD,noise=noise,normalize=normalize,chi=chi)
def absorb_lambdas(self):
"""
Absorb the lambda from the canonical Gamma-Lambda PEPS
form to return the normal PEPS form.
Args:
self : PEPS Object
The PEPS to be normalized
"""
self.tensors = peps_absorb_lambdas(self.tensors,self.ltensors)
self.ltensors = None
self.canonical = False
def __len__(self):
return self.Nx
def __getitem__(self,ind):
return self.tensors[ind]
def __setitem__(self,ind,item):
self.tensors[ind] = item
def copy(self):
"""
Return a copy of this PEPS
"""
peps_copy = PEPS(Nx=self.Nx,Ny=self.Ny,d=self.d,D=self.D,
chi=self.chi,norm_tol=self.norm_tol,
exact_norm_tol=self.exact_norm_tol,
canonical=self.canonical,
singleLayer=self.singleLayer,
backend=self.backend,
max_norm_iter=self.max_norm_iter,
norm_bs_upper=self.norm_bs_upper,
norm_bs_lower=self.norm_bs_lower,
dtype=self.dtype,normalize=False,
fdir=self.fdir,fname=self.fname+'_cp')
# Copy peps tensors
for i in range(self.Nx):
for j in range(self.Ny):
peps_copy.tensors[i][j] = self.tensors[i][j].copy()
# Copy lambda tensors (if there)
if self.ltensors is not None:
for ind in range(len(self.ltensors)):
for x in range(len(self.ltensors[ind])):
for y in range(len(self.ltensors[ind][x])):
peps_copy.ltensors[ind][x][y] = self.ltensors[ind][x][y].copy()
# Return result
return peps_copy
def rotate(self,clockwise=True):
"""
Rotate the peps
Args:
peps : a list of a list containing peps tensors
The initial peps tensor
Kwargs:
clockwise : bool
Rotates clockwise if True, counter-clockwise
otherwise
"""
self.tensors = rotate_peps(self.tensors,clockwise=clockwise)
self.ltensors= rotate_lambda(self.ltensors,clockwise=clockwise)
Nx_ = self.Nx
Ny_ = self.Ny
self.Nx = Ny_
self.Ny = Nx_
self.shape = (self.Nx,self.Ny)
def flip(self):
"""
Flip the peps columns
"""
self.tensors = flip_peps(self.tensors)
self.ltensors= flip_lambda(self.ltensors)
def make_sparse(self):
"""
Convert the densely stored symmetric PEPS to a sparsely stored symmetric PEPS
"""
# Create the new peps objects
speps = PEPS(Nx = self.Nx,
Ny = self.Ny,
d = self.d,
D = self.D,
chi = self.chi,
Zn = None,
canonical = self.canonical,
backend = self.backend,
singleLayer = self.singleLayer,
dtype = self.dtype,
exact_norm_tol= self.exact_norm_tol,
norm_tol = self.norm_tol,
max_norm_iter = self.max_norm_iter,
norm_bs_upper = self.norm_bs_upper,
norm_bs_lower = self.norm_bs_lower,
normalize=False)
# Loop through all sites converting tensors to sparse
for x in range(speps.Nx):
for y in range(speps.Ny):
# Get a sparse version of the tensors
speps[x][y] = self.tensors[x][y].copy().make_sparse()
# Do it for lambda tensors as well
if speps.canonical:
for x in range(len(speps.ltensors)):
for y in range(len(speps.ltensors[x])):
for z in range(len(speps.ltensors[x][y])):
speps.ltensors[x][y][z] = self.ltensors[x][y][z].copy().make_sparse()
# Return result
return speps
def save(self,fname=None,fdir=None):
"""
Save the PEPS tensors
"""
if fdir is None:
fdir = self.fdir
if not (fdir[-1] == '/'):
fdir = fdir + '/'
if fname is None:
fname = self.fname
if self.Zn is None:
# Create dict to hold everything being saved
#save_dict = dict()
## Add PEPS Tensors
#for i in range(len(self.tensors)):
# for j in range(len(self.tensors[i])):
# save_dict['tensor_{}_{}'.format(i,j)] = self.tensors[i][j].ten
## Add Lambda Tensors (if Canonical)
#if self.ltensors is not None:
# for ind in range(len(self.ltensors)):
# for x in range(len(self.ltensors[ind])):
# for y in range(len(self.ltensors[ind][x])):
# save_dict['ltensor_{}_{}_{}'.format(ind,x,y)] = self.ltensors[ind][x][y].ten
#
#np.savez(self.fdir+self.fname,**save_dict)
# Create file
for i in range(5):
try:
f = open_file(fdir+fname,'w')
# Add PEPS Info
create_dataset(f,'Nx',self.Nx)
create_dataset(f,'Ny',self.Ny)
create_dataset(f,'shape',self.shape)
create_dataset(f,'d',self.d)
create_dataset(f,'D',self.D)
create_dataset(f,'chi',self.chi)
create_dataset(f,'Zn',False if self.Zn is None else self.Zn)
create_dataset(f,'thermal',self.thermal)
create_dataset(f,'dZn',False if self.dZn is None else self.dZn)
create_dataset(f,'canonical',self.canonical)
create_dataset(f,'singleLayer',self.singleLayer)
create_dataset(f,'exact_norm_tol',self.exact_norm_tol)
create_dataset(f,'norm_tol',self.norm_tol)
create_dataset(f,'max_norm_iter',self.max_norm_iter)
create_dataset(f,'norm_bs_upper',self.norm_bs_upper)
create_dataset(f,'norm_bs_lower',self.norm_bs_lower)
create_dataset(f,'fname',fname)
create_dataset(f,'fdir',self.fdir)
# Add PEPS Tensors
for i in range(len(self.tensors)):
for j in range(len(self.tensors[i])):
# Load tensor (if needed)
init_in_mem = self.tensors[i][j].in_mem
if not init_in_mem: self.tensors[i][j].from_disk()
# Save the tensor
create_dataset(f,'tensor_{}_{}'.format(i,j),self.tensors[i][j].ten)
#create_dataset(f,'tensorlegs_{}_{}'.format(i,j),self.tensors[i][j].legs)
# Put the tensor back on disk (if needed)
if not init_in_mem: self.tensors[i][j].to_disk()
# Add Lambda Tensors (if Canonical)
if self.ltensors is not None:
for ind in range(len(self.ltensors)):
for x in range(len(self.ltensors[ind])):
for y in range(len(self.ltensors[ind][x])):
# Load tensor (if needed)
init_in_mem = self.ltensors[ind][x][y].in_mem
if not init_in_mem: self.ltensors[ind][x][y].from_disk()
# Save the tensor
create_dataset(f,'ltensor_{}_{}_{}'.format(ind,x,y),self.ltensors[ind][x][y].ten)
#create_dataset(f,'ltensorlegs_{}_{}_{}'.format(ind,x,y),self.ltensors[ind][x][y].legs)
# Put the tensor back on disk (if needed)
if not init_in_mem: self.ltensors[ind][x][y].to_disk()
# Close file
close_file(f)
break
except:
#print('Saving PEPS Failed... Attempt ({}/5)'.format(i))
pass
else:
pass
#print('Didnt save peps...')
#raise NotImplementedError()
def load_tensors(self,fname):
if self.Zn is None:
# Open File
f = open_file(fname,'r')
# Check to make sure this peps and the one we are loading agree
assert(self.Nx == get_dataset(f,'Nx'))
assert(self.Ny == get_dataset(f,'Ny'))
# Get PEPS Tensors
for i in range(len(self.tensors)):
for j in range(len(self.tensors[i])):
self.tensors[i][j].ten = get_dataset(f,'tensor_{}_{}'.format(i,j))
# Get Lambda Tensors (if Canonical
if self.ltensors is not None:
for ind in range(len(self.ltensors)):
for x in range(len(self.ltensors[ind])):
for y in range(len(self.ltensors[ind][x])):
self.ltensors[ind][x][y].ten = get_dataset(f,'ltensor_{}_{}_{}'.format(ind,x,y))
# Close File
close_file(f)
else:
raise NotImplementedError()
def max_entry(self):
maxval = 0.
for i in range(len(self)):
for j in range(len(self[i])):
maxval = max(maxval,self[i][j].max_abs())
return maxval
def to_disk(self):
"""
Write all peps tensors to disk
"""
for x in range(self.Nx):
self.col_to_disk(x)
def from_disk(self):
"""
Read all peps tensors from disk
"""
for x in range(self.Nx):
self.col_from_disk(x)
def col_to_disk(self,x):
"""
Write all the peps tensors in a column to disk
"""
for y in range(self.Ny):
self.site_to_disk(x,y)
def col_from_disk(self,x):
"""
Read all peps tensors in a column from disk
"""
for y in range(self.Ny):
self.site_from_disk(x,y)
def row_to_disk(self,y):
"""
Write all the peps tensors in a row to disk
"""
for x in range(self.Nx):
self.site_to_disk(x,y)
def row_from_disk(self,y):
"""
Read all peps tensors in a row from disk
"""
for x in range(self.Nx):
self.site_from_disk(x,y)
def site_to_disk(self,x,y):
"""
Write a peps tensor at site peps[x][y] to disk
"""
self[x][y].to_disk()
def site_from_disk(self,x,y):
"""
Read a peps tensor at site peps[x][y] to disk
"""
self[x][y].from_disk()
| [
"cyclopeps.tools.mps_tools.MPS",
"numpy.prod",
"cyclopeps.tools.gen_ten.eye",
"cyclopeps.tools.gen_ten.ones",
"cyclopeps.tools.gen_ten.zeros",
"cyclopeps.tools.gen_ten.rand",
"numpy.isfinite",
"cyclopeps.tools.gen_ten.einsum"
] | [((3961, 3975), 'cyclopeps.tools.mps_tools.MPS', 'MPS', (['bound_mpo'], {}), '(bound_mpo)\n', (3964, 3975), False, 'from cyclopeps.tools.mps_tools import MPS, identity_mps\n'), ((6850, 6868), 'cyclopeps.tools.mps_tools.MPS', 'MPS', (['bound_mpo_new'], {}), '(bound_mpo_new)\n', (6853, 6868), False, 'from cyclopeps.tools.mps_tools import MPS, identity_mps\n'), ((9917, 9935), 'cyclopeps.tools.mps_tools.MPS', 'MPS', (['bound_mpo_new'], {}), '(bound_mpo_new)\n', (9920, 9935), False, 'from cyclopeps.tools.mps_tools import MPS, identity_mps\n'), ((29520, 29533), 'cyclopeps.tools.mps_tools.MPS', 'MPS', (['peps_col'], {}), '(peps_col)\n', (29523, 29533), False, 'from cyclopeps.tools.mps_tools import MPS, identity_mps\n'), ((32663, 32748), 'cyclopeps.tools.gen_ten.zeros', 'zeros', (['dims', 'sym'], {'backend': 'backend', 'dtype': 'dtype', 'legs': '[[0], [1], [2, 3], [4], [5]]'}), '(dims, sym, backend=backend, dtype=dtype, legs=[[0], [1], [2, 3], [4],\n [5]])\n', (32668, 32748), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((35659, 35704), 'cyclopeps.tools.gen_ten.rand', 'rand', (['dims', 'sym'], {'backend': 'backend', 'dtype': 'dtype'}), '(dims, sym, backend=backend, dtype=dtype)\n', (35663, 35704), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((51422, 51474), 'cyclopeps.tools.gen_ten.zeros', 'zeros', (['(D, D)'], {'sym': 'sym', 'backend': 'backend', 'dtype': 'dtype'}), '((D, D), sym=sym, backend=backend, dtype=dtype)\n', (51427, 51474), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((52977, 53029), 'cyclopeps.tools.gen_ten.zeros', 'zeros', (['(D, D)'], {'sym': 'sym', 'backend': 'backend', 'dtype': 'dtype'}), '((D, D), sym=sym, backend=backend, dtype=dtype)\n', (52982, 53029), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((105453, 105482), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ab,bPU->aPU"""', 'sb', 'vb'], {}), "('ab,bPU->aPU', sb, vb)\n", (105459, 105482), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((105624, 105653), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""DPa,ab->DPb"""', 'ut', 'st'], {}), "('DPa,ab->DPb', ut, st)\n", (105630, 105653), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((2183, 2230), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""dpru,DpRU->dDRurU"""', 'ket[row]', 'bra[row]'], {}), "('dpru,DpRU->dDRurU', ket[row], bra[row])\n", (2189, 2230), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((2703, 2776), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Dr', 'Zr'], {'is_symmetric': 'ket[row].is_symmetric', 'backend': 'ket[row].backend'}), '(Dr, Zr, is_symmetric=ket[row].is_symmetric, backend=ket[row].backend)\n', (2706, 2776), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((2841, 2914), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Du', 'Zu'], {'is_symmetric': 'ket[row].is_symmetric', 'backend': 'ket[row].backend'}), '(Du, Zu, is_symmetric=ket[row].is_symmetric, backend=ket[row].backend)\n', (2844, 2914), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((2979, 3052), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Du', 'Zu'], {'is_symmetric': 'ket[row].is_symmetric', 'backend': 'ket[row].backend'}), '(Du, Zu, is_symmetric=ket[row].is_symmetric, backend=ket[row].backend)\n', (2982, 3052), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((3635, 3664), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""du,DU->dDuU"""', 'I3', 'I2'], {}), "('du,DU->dDuU', I3, I2)\n", (3641, 3664), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((3675, 3710), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""dDuU,lr->dlDruU"""', 'Itmp', 'I1'], {}), "('dDuU,lr->dlDruU', Itmp, I1)\n", (3681, 3710), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((5118, 5179), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""mln,ldpru->mdrpnu"""', 'bound_mpo[2 * row + 1]', 'ket[row]'], {}), "('mln,ldpru->mdrpnu', bound_mpo[2 * row + 1], ket[row])\n", (5124, 5179), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((5681, 5754), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Dd', 'Zd'], {'is_symmetric': 'ket[row].is_symmetric', 'backend': 'ket[row].backend'}), '(Dd, Zd, is_symmetric=ket[row].is_symmetric, backend=ket[row].backend)\n', (5684, 5754), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((6480, 6527), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""mLn,du->mdLnu"""', 'bound_mpo[2 * row]', 'I1'], {}), "('mLn,du->mdLnu', bound_mpo[2 * row], I1)\n", (6486, 6527), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((7883, 7940), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""mLn,LDPRU->mDRnUP"""', 'bound_mpo[2 * row]', 'bra[row]'], {}), "('mLn,LDPRU->mDRnUP', bound_mpo[2 * row], bra[row])\n", (7889, 7940), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((8752, 8825), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Du', 'Zu'], {'is_symmetric': 'bra[row].is_symmetric', 'backend': 'bra[row].backend'}), '(Du, Zu, is_symmetric=bra[row].is_symmetric, backend=bra[row].backend)\n', (8755, 8825), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((9500, 9541), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""mrPn,DU->mDPrnU"""', 'bound_tens', 'I1'], {}), "('mrPn,DU->mDPrnU', bound_tens, I1)\n", (9506, 9541), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((37241, 37259), 'numpy.isfinite', 'np.isfinite', (['const'], {}), '(const)\n', (37252, 37259), True, 'import numpy as np\n'), ((56765, 56802), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""urj,jga->augr"""', 'left1', 'left2'], {}), "('urj,jga->augr', left1, left2)\n", (56771, 56802), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((59785, 59824), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""xsq,qhf->hsxf"""', 'right1', 'right2'], {}), "('xsq,qhf->hsxf', right1, right2)\n", (59791, 59824), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((60005, 60017), 'cyclopeps.tools.mps_tools.MPS', 'MPS', (['top_env'], {}), '(top_env)\n', (60008, 60017), False, 'from cyclopeps.tools.mps_tools import MPS, identity_mps\n'), ((61734, 61777), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jga,zay->zjyg"""', 'left2', 'prev_env[0]'], {}), "('jga,zay->zjyg', left2, prev_env[0])\n", (61740, 61777), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((64503, 64558), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""qif,uft->uiqt"""', 'right2', 'prev_env[2 * ncol + 1]'], {}), "('qif,uft->uiqt', right2, prev_env[2 * ncol + 1])\n", (64509, 64558), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((64735, 64747), 'cyclopeps.tools.mps_tools.MPS', 'MPS', (['top_env'], {}), '(top_env)\n', (64738, 64747), False, 'from cyclopeps.tools.mps_tools import MPS, identity_mps\n'), ((66504, 66547), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""zay,ura->zuyr"""', 'prev_env[0]', 'left1'], {}), "('zay,ura->zuyr', prev_env[0], left1)\n", (66510, 66547), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((68594, 68649), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ufz,xtf->utxz"""', 'prev_env[2 * ncol + 1]', 'right1'], {}), "('ufz,xtf->utxz', prev_env[2 * ncol + 1], right1)\n", (68600, 68649), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((68826, 68838), 'cyclopeps.tools.mps_tools.MPS', 'MPS', (['top_env'], {}), '(top_env)\n', (68829, 68838), False, 'from cyclopeps.tools.mps_tools import MPS, identity_mps\n'), ((72736, 72773), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""agj,jps->asgp"""', 'left1', 'left2'], {}), "('agj,jps->asgp', left1, left2)\n", (72742, 72773), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((75746, 75785), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""fio,orx->irxf"""', 'right1', 'right2'], {}), "('fio,orx->irxf', right1, right2)\n", (75752, 75785), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((75992, 76004), 'cyclopeps.tools.mps_tools.MPS', 'MPS', (['bot_env'], {}), '(bot_env)\n', (75995, 76004), False, 'from cyclopeps.tools.mps_tools import MPS, identity_mps\n'), ((77720, 77763), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""agj,zay->zjyg"""', 'left1', 'prev_env[0]'], {}), "('agj,zay->zjyg', left1, prev_env[0])\n", (77726, 77763), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((80483, 80538), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""fix,uft->uixt"""', 'right1', 'prev_env[2 * ncol + 1]'], {}), "('fix,uft->uixt', right1, prev_env[2 * ncol + 1])\n", (80489, 80538), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((80715, 80727), 'cyclopeps.tools.mps_tools.MPS', 'MPS', (['bot_env'], {}), '(bot_env)\n', (80718, 80727), False, 'from cyclopeps.tools.mps_tools import MPS, identity_mps\n'), ((82492, 82535), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""zay,aps->zsyp"""', 'prev_env[0]', 'left2'], {}), "('zay,aps->zsyp', prev_env[0], left2)\n", (82498, 82535), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((84617, 84672), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""yft,frx->yrxt"""', 'prev_env[2 * ncol + 1]', 'right2'], {}), "('yft,frx->yrxt', prev_env[2 * ncol + 1], right2)\n", (84623, 84672), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((84849, 84861), 'cyclopeps.tools.mps_tools.MPS', 'MPS', (['bot_env'], {}), '(bot_env)\n', (84852, 84861), False, 'from cyclopeps.tools.mps_tools import MPS, identity_mps\n'), ((97115, 97154), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""NdLDRn,MLN->MdDRn"""', 'tmp', 'left1'], {}), "('NdLDRn,MLN->MdDRn', tmp, left1)\n", (97121, 97154), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((97171, 97209), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""MdDRn,mRn->MdDm"""', 'tmp', 'right1'], {}), "('MdDRn,mRn->MdDm', tmp, right1)\n", (97177, 97209), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((97257, 97301), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldpru,OuUo->OldprUo"""', 'ket', 'prev_env'], {}), "('ldpru,OuUo->OldprUo', ket, prev_env)\n", (97263, 97301), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((97314, 97355), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""OldprUo,NlO->NdprUo"""', 'tmp', 'left2'], {}), "('OldprUo,NlO->NdprUo', tmp, left2)\n", (97320, 97355), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((97368, 97408), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""NdprUo,nro->NdpUn"""', 'tmp', 'right2'], {}), "('NdprUo,nro->NdpUn', tmp, right2)\n", (97374, 97408), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((97421, 97460), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""NdpUn,LDpRU->NdLDRn"""', 'tmp', 'bra'], {}), "('NdpUn,LDpRU->NdLDRn', tmp, bra)\n", (97427, 97460), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((97473, 97512), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""NdLDRn,MLN->MdDRn"""', 'tmp', 'left1'], {}), "('NdLDRn,MLN->MdDRn', tmp, left1)\n", (97479, 97512), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((97529, 97567), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""MdDRn,mRn->MdDm"""', 'tmp', 'right1'], {}), "('MdDRn,mRn->MdDm', tmp, right1)\n", (97535, 97567), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((101832, 101871), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""NlurUn,NlO->OurUn"""', 'tmp', 'left2'], {}), "('NlurUn,NlO->OurUn', tmp, left2)\n", (101838, 101871), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((101888, 101926), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""OurUn,nro->OuUo"""', 'tmp', 'right2'], {}), "('OurUn,nro->OuUo', tmp, right2)\n", (101894, 101926), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((101982, 102026), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""LDPRU,MdDm->MdLPURm"""', 'bra', 'prev_env'], {}), "('LDPRU,MdDm->MdLPURm', bra, prev_env)\n", (101988, 102026), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((102039, 102080), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""MdLPURm,MLN->NdPURm"""', 'tmp', 'left1'], {}), "('MdLPURm,MLN->NdPURm', tmp, left1)\n", (102045, 102080), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((102093, 102133), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""NdPURm,mRn->NdPUn"""', 'tmp', 'right1'], {}), "('NdPURm,mRn->NdPUn', tmp, right1)\n", (102099, 102133), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((102146, 102185), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""NdPUn,ldPru->NlurUn"""', 'tmp', 'ket'], {}), "('NdPUn,ldPru->NlurUn', tmp, ket)\n", (102152, 102185), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((102198, 102237), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""NlurUn,NlO->OurUn"""', 'tmp', 'left2'], {}), "('NlurUn,NlO->OurUn', tmp, left2)\n", (102204, 102237), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((102254, 102292), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""OurUn,nro->OuUo"""', 'tmp', 'right2'], {}), "('OurUn,nro->OuUo', tmp, right2)\n", (102260, 102292), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((105203, 105248), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""LDPRU,lUpru->lLDPRpru"""', 'peps1', 'peps2'], {}), "('LDPRU,lUpru->lLDPRpru', peps1, peps2)\n", (105209, 105248), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((105792, 105829), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""LDRa,aPb->LDRPb"""', 'ub', 'phys_b'], {}), "('LDRa,aPb->LDRPb', ub, phys_b)\n", (105798, 105829), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((105844, 105886), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""LDRPb,bpc->LDRPpc"""', 'final', 'phys_t'], {}), "('LDRPb,bpc->LDRPpc', final, phys_t)\n", (105850, 105886), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((105901, 105943), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""LDRPpc,lcru->lLDPRpru"""', 'final', 'vt'], {}), "('LDRPpc,lcru->lLDPRpru', final, vt)\n", (105907, 105943), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((115094, 115131), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""AdDa,AuUa->uUdD"""', 'envt', 'envb'], {}), "('AdDa,AuUa->uUdD', envt, envb)\n", (115100, 115131), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((117123, 117167), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""CdDc,CLB->BLdDc"""', 'env_bot', 'lbmpo[0]'], {}), "('CdDc,CLB->BLdDc', env_bot, lbmpo[0])\n", (117129, 117167), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((117181, 117223), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""BLdDc,cRb->BLdDRb"""', 'tmp', 'rbmpo[0]'], {}), "('BLdDc,cRb->BLdDRb', tmp, rbmpo[0])\n", (117187, 117223), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((117237, 117281), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""BLdDRb,BlA->AlLdDRb"""', 'tmp', 'lbmpo[1]'], {}), "('BLdDRb,BlA->AlLdDRb', tmp, lbmpo[1])\n", (117243, 117281), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((117295, 117341), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""AlLdDRb,bra->AlLdDrRa"""', 'tmp', 'rbmpo[1]'], {}), "('AlLdDRb,bra->AlLdDrRa', tmp, rbmpo[1])\n", (117301, 117341), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((118531, 118575), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""CuUc,BlC->BluUc"""', 'env_top', 'lbmpo[3]'], {}), "('CuUc,BlC->BluUc', env_top, lbmpo[3])\n", (118537, 118575), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((118589, 118631), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""BluUc,brc->BluUrb"""', 'tmp', 'rbmpo[3]'], {}), "('BluUc,brc->BluUrb', tmp, rbmpo[3])\n", (118595, 118631), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((118645, 118689), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""BluUrb,ALB->ALluUrb"""', 'tmp', 'lbmpo[2]'], {}), "('BluUrb,ALB->ALluUrb', tmp, lbmpo[2])\n", (118651, 118689), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((118703, 118749), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ALluUrb,aRb->AlLuUrRa"""', 'tmp', 'rbmpo[2]'], {}), "('ALluUrb,aRb->AlLuUrRa', tmp, rbmpo[2])\n", (118709, 118749), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((118791, 118844), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""AkKuUsSa,AlLdDrRa->lLdDrRkKuUsS"""', 'envt', 'envb'], {}), "('AkKuUsSa,AlLdDrRa->lLdDrRkKuUsS', envt, envb)\n", (118797, 118844), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((119966, 120013), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""APU,UQB->APQB"""', 'phys_b_bra', 'phys_t_bra'], {}), "('APU,UQB->APQB', phys_b_bra, phys_t_bra)\n", (119972, 120013), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((120026, 120059), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""APQB,aAbB->aPQb"""', 'tmp', 'N'], {}), "('APQB,aAbB->aPQb', tmp, N)\n", (120032, 120059), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((120072, 120119), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""apu,uqb->apqb"""', 'phys_b_ket', 'phys_t_ket'], {}), "('apu,uqb->apqb', phys_b_ket, phys_t_ket)\n", (120078, 120119), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((120133, 120166), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""apqb,apqb->"""', 'tmp1', 'tmp2'], {}), "('apqb,apqb->', tmp1, tmp2)\n", (120139, 120166), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((120868, 120923), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""LDPRZ,KZQSU->LDPRKQSU"""', 'phys_b_bra', 'phys_t_bra'], {}), "('LDPRZ,KZQSU->LDPRKQSU', phys_b_bra, phys_t_bra)\n", (120874, 120923), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((120940, 120991), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""LDPRKQSU,lLdDrRkKuUsS->PQldrkus"""', 'comb1', 'N'], {}), "('LDPRKQSU,lLdDrRkKuUsS->PQldrkus', comb1, N)\n", (120946, 120991), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((121008, 121063), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldprz,kzqsu->ldprkqsu"""', 'phys_b_ket', 'phys_t_ket'], {}), "('ldprz,kzqsu->ldprkqsu', phys_b_ket, phys_t_ket)\n", (121014, 121063), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((121079, 121122), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""PQldrkus,ldPrkQsu->"""', 'comb1', 'comb2'], {}), "('PQldrkus,ldPrkQsu->', comb1, comb2)\n", (121085, 121122), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((125690, 125738), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldparu,pPx->ldxParu"""', 'bra[0][1]', 'mpo[0]'], {}), "('ldparu,pPx->ldxParu', bra[0][1], mpo[0])\n", (125696, 125738), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((125917, 125967), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldparu,xpPy->ldParyux"""', 'bra[0][0]', 'mpo[1]'], {}), "('ldparu,xpPy->ldParyux', bra[0][0], mpo[1])\n", (125923, 125967), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((126186, 126234), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldparu,ypP->lydParu"""', 'bra[1][0]', 'mpo[2]'], {}), "('ldparu,ypP->lydParu', bra[1][0], mpo[2])\n", (126192, 126234), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((126432, 126478), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldpru,pPx->ldxPru"""', 'bra[0][1]', 'mpo[0]'], {}), "('ldpru,pPx->ldxPru', bra[0][1], mpo[0])\n", (126438, 126478), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((126551, 126599), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldpru,xpPy->ldPryux"""', 'bra[0][0]', 'mpo[1]'], {}), "('ldpru,xpPy->ldPryux', bra[0][0], mpo[1])\n", (126557, 126599), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((126712, 126758), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldpru,ypP->lydPru"""', 'bra[1][0]', 'mpo[2]'], {}), "('ldpru,ypP->lydPru', bra[1][0], mpo[2])\n", (126718, 126758), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((127925, 127962), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""lkbKBr,lkbKBr->"""', 'Hbot', 'Htop'], {}), "('lkbKBr,lkbKBr->', Hbot, Htop)\n", (127931, 127962), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((129886, 129934), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldparu,pPx->ldParxu"""', 'bra[0][1]', 'mpo[0]'], {}), "('ldparu,pPx->ldParxu', bra[0][1], mpo[0])\n", (129892, 129934), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((130113, 130163), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldparu,xpPy->lxdyParu"""', 'bra[1][1]', 'mpo[1]'], {}), "('ldparu,xpPy->lxdyParu', bra[1][1], mpo[1])\n", (130119, 130163), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((130380, 130428), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldparu,ypP->ldParuy"""', 'bra[1][0]', 'mpo[2]'], {}), "('ldparu,ypP->ldParuy', bra[1][0], mpo[2])\n", (130386, 130428), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((130626, 130672), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldpru,pPx->ldPrxu"""', 'bra[0][1]', 'mpo[0]'], {}), "('ldpru,pPx->ldPrxu', bra[0][1], mpo[0])\n", (130632, 130672), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((130745, 130793), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldpru,xpPy->lxdyPru"""', 'bra[1][1]', 'mpo[1]'], {}), "('ldpru,xpPy->lxdyPru', bra[1][1], mpo[1])\n", (130751, 130793), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((130904, 130950), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldpru,ypP->ldPruy"""', 'bra[1][0]', 'mpo[2]'], {}), "('ldpru,ypP->ldPruy', bra[1][0], mpo[2])\n", (130910, 130950), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((132117, 132154), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""lkbKBr,lkbKBr->"""', 'Hbot', 'Htop'], {}), "('lkbKBr,lkbKBr->', Hbot, Htop)\n", (132123, 132154), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((155191, 155246), 'cyclopeps.tools.gen_ten.zeros', 'zeros', (['(Dnew, Dold)'], {'dtype': 'peps.ltensors[0][0][0].dtype'}), '((Dnew, Dold), dtype=peps.ltensors[0][0][0].dtype)\n', (155196, 155246), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((155273, 155318), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Dold'], {'dtype': 'peps.ltensors[0][0][0].dtype'}), '(Dold, dtype=peps.ltensors[0][0][0].dtype)\n', (155276, 155318), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((57466, 57535), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Dl', 'Zl'], {'is_symmetric': 'braten.is_symmetric', 'backend': 'braten.backend'}), '(Dl, Zl, is_symmetric=braten.is_symmetric, backend=braten.backend)\n', (57469, 57535), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((58205, 58241), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""gklh,Rr->gRkhlr"""', 'ketten', 'I'], {}), "('gklh,Rr->gRkhlr', ketten, I)\n", (58211, 58241), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((58781, 58850), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Dl', 'Zl'], {'is_symmetric': 'ketten.is_symmetric', 'backend': 'ketten.backend'}), '(Dl, Zl, is_symmetric=ketten.is_symmetric, backend=ketten.backend)\n', (58784, 58850), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((59520, 59556), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""rvls,Hh->Hlrvhs"""', 'braten', 'I'], {}), "('rvls,Hh->Hlrvhs', braten, I)\n", (59526, 59556), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((62119, 62177), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""gklhb,ybx->ygkxhl"""', 'ketten', 'prev_env[2 * col + 1]'], {}), "('gklhb,ybx->ygkxhl', ketten, prev_env[2 * col + 1])\n", (62125, 62177), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((62594, 62663), 'cyclopeps.tools.gen_ten.eye', 'eye', (['D1', 'Z1'], {'is_symmetric': 'ketten.is_symmetric', 'backend': 'ketten.backend'}), '(D1, Z1, is_symmetric=ketten.is_symmetric, backend=ketten.backend)\n', (62597, 62663), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((63397, 63466), 'cyclopeps.tools.gen_ten.eye', 'eye', (['D2', 'Z2'], {'is_symmetric': 'ketten.is_symmetric', 'backend': 'ketten.backend'}), '(D2, Z2, is_symmetric=ketten.is_symmetric, backend=ketten.backend)\n', (63400, 63466), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((64147, 64197), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""xcw,Hh->xHcwh"""', 'prev_env[2 * col + 2]', 'I1'], {}), "('xcw,Hh->xHcwh', prev_env[2 * col + 2], I1)\n", (64153, 64197), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((64210, 64246), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""xHcwh,Ll->xHLlcwh"""', 'res', 'I2'], {}), "('xHcwh,Ll->xHLlcwh', res, I2)\n", (64216, 64246), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((67031, 67100), 'cyclopeps.tools.gen_ten.eye', 'eye', (['D1', 'Z1'], {'is_symmetric': 'braten.is_symmetric', 'backend': 'braten.backend'}), '(D1, Z1, is_symmetric=braten.is_symmetric, backend=braten.backend)\n', (67034, 67100), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((67781, 67831), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ybx,Rr->yRbxr"""', 'prev_env[2 * col + 1]', 'I1'], {}), "('ybx,Rr->yRbxr', prev_env[2 * col + 1], I1)\n", (67787, 67831), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((68333, 68376), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""xlcw,rvlsc->xrvws"""', 'envten', 'braten'], {}), "('xlcw,rvlsc->xrvws', envten, braten)\n", (68339, 68376), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((73421, 73490), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Dl', 'Zl'], {'is_symmetric': 'braten.is_symmetric', 'backend': 'braten.backend'}), '(Dl, Zl, is_symmetric=braten.is_symmetric, backend=braten.backend)\n', (73424, 73490), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((74160, 74196), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""pkqt,Gg->Gptgkq"""', 'ketten', 'I'], {}), "('pkqt,Gg->Gptgkq', ketten, I)\n", (74166, 74196), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((74742, 74811), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Dl', 'Zl'], {'is_symmetric': 'ketten.is_symmetric', 'backend': 'ketten.backend'}), '(Dl, Zl, is_symmetric=ketten.is_symmetric, backend=ketten.backend)\n', (74745, 74811), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((75481, 75517), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""gkhl,Qq->gkQlhq"""', 'braten', 'I'], {}), "('gkhl,Qq->gkQlhq', braten, I)\n", (75487, 75517), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((78241, 78310), 'cyclopeps.tools.gen_ten.eye', 'eye', (['D1', 'Z1'], {'is_symmetric': 'braten.is_symmetric', 'backend': 'braten.backend'}), '(D1, Z1, is_symmetric=braten.is_symmetric, backend=braten.backend)\n', (78244, 78310), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((79044, 79113), 'cyclopeps.tools.gen_ten.eye', 'eye', (['D2', 'Z2'], {'is_symmetric': 'braten.is_symmetric', 'backend': 'braten.backend'}), '(D2, Z2, is_symmetric=braten.is_symmetric, backend=braten.backend)\n', (79047, 79113), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((79794, 79844), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ybx,Gg->yGbxg"""', 'prev_env[2 * col + 1]', 'I1'], {}), "('ybx,Gg->yGbxg', prev_env[2 * col + 1], I1)\n", (79800, 79844), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((79857, 79893), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""yGbxg,Kk->yGbKxgk"""', 'res', 'I2'], {}), "('yGbxg,Kk->yGbKxgk', res, I2)\n", (79863, 79893), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((80206, 80264), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""gckhl,xcw->xgklwh"""', 'braten', 'prev_env[2 * col + 2]'], {}), "('gckhl,xcw->xgklwh', braten, prev_env[2 * col + 2])\n", (80212, 80264), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((83138, 83181), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ybkx,pbkqt->yptxq"""', 'envten', 'ketten'], {}), "('ybkx,pbkqt->yptxq', envten, ketten)\n", (83144, 83181), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((83600, 83669), 'cyclopeps.tools.gen_ten.eye', 'eye', (['D1', 'Z1'], {'is_symmetric': 'ketten.is_symmetric', 'backend': 'ketten.backend'}), '(D1, Z1, is_symmetric=ketten.is_symmetric, backend=ketten.backend)\n', (83603, 83669), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((84350, 84400), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""xcw,Qq->xQcwq"""', 'prev_env[2 * col + 2]', 'I1'], {}), "('xcw,Qq->xQcwq', prev_env[2 * col + 2], I1)\n", (84356, 84400), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((88945, 88986), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jklnoq,urj->urklnoq"""', 'tmp', 'left1'], {}), "('jklnoq,urj->urklnoq', tmp, left1)\n", (88951, 88986), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((89169, 89211), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ukvnwtq,xtq->ukvnwx"""', 'tmp', 'right1'], {}), "('ukvnwtq,xtq->ukvnwx', tmp, right1)\n", (89175, 89211), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((89242, 89288), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jga,abcdef->jgbcdef"""', 'left2', 'prev_env'], {}), "('jga,abcdef->jgbcdef', left2, prev_env)\n", (89248, 89288), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((89305, 89349), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jgbcdef,gklhb->jklhcdef"""', 'tmp', 'ket1'], {}), "('jgbcdef,gklhb->jklhcdef', tmp, ket1)\n", (89311, 89349), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((89366, 89412), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jklhcdef,hnoid->jklcnoief"""', 'tmp', 'ket2'], {}), "('jklhcdef,hnoid->jklcnoief', tmp, ket2)\n", (89372, 89412), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((89429, 89475), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jklcnoief,qif->jklcnoeq"""', 'tmp', 'right2'], {}), "('jklcnoief,qif->jklcnoeq', tmp, right2)\n", (89435, 89475), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((89492, 89537), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jklcnoeq,urj->urklcnoeq"""', 'tmp', 'left1'], {}), "('jklcnoeq,urj->urklcnoeq', tmp, left1)\n", (89498, 89537), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((89554, 89600), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""urklcnoeq,rvlsc->ukvnsoeq"""', 'tmp', 'bra1'], {}), "('urklcnoeq,rvlsc->ukvnsoeq', tmp, bra1)\n", (89560, 89600), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((89617, 89661), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ukvnsoeq,swote->ukvnwtq"""', 'tmp', 'bra2'], {}), "('ukvnsoeq,swote->ukvnwtq', tmp, bra2)\n", (89623, 89661), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((89682, 89724), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ukvnwtq,xtq->ukvnwx"""', 'tmp', 'right1'], {}), "('ukvnwtq,xtq->ukvnwx', tmp, right1)\n", (89688, 89724), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((93393, 93434), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jklmno,jps->spklmno"""', 'tmp', 'left2'], {}), "('jklmno,jps->spklmno', tmp, left2)\n", (93399, 93434), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((93617, 93659), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""stlvrno,orx->stlvnx"""', 'tmp', 'right2'], {}), "('stlvrno,orx->stlvnx', tmp, right2)\n", (93623, 93659), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((93690, 93736), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""agj,abcdef->jgbcdef"""', 'left1', 'prev_env'], {}), "('agj,abcdef->jgbcdef', left1, prev_env)\n", (93696, 93736), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((93753, 93797), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jgbcdef,gckhl->jbklhdef"""', 'tmp', 'bra1'], {}), "('jgbcdef,gckhl->jbklhdef', tmp, bra1)\n", (93759, 93797), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((93814, 93860), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jbklhdef,hemin->jbkldmnif"""', 'tmp', 'bra2'], {}), "('jbklhdef,hemin->jbkldmnif', tmp, bra2)\n", (93820, 93860), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((93877, 93923), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jbkldmnif,fio->jbkldmno"""', 'tmp', 'right1'], {}), "('jbkldmnif,fio->jbkldmno', tmp, right1)\n", (93883, 93923), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((93940, 93985), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jbkldmno,jps->spbkldmno"""', 'tmp', 'left2'], {}), "('jbkldmno,jps->spbkldmno', tmp, left2)\n", (93946, 93985), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((94002, 94048), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""spbkldmno,pbkqt->stqldmno"""', 'tmp', 'ket1'], {}), "('spbkldmno,pbkqt->stqldmno', tmp, ket1)\n", (94008, 94048), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((94065, 94109), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""stqldmno,qdmrv->stlvrno"""', 'tmp', 'ket2'], {}), "('stqldmno,qdmrv->stlvrno', tmp, ket2)\n", (94071, 94109), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((94130, 94172), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""stlvrno,orx->stlvnx"""', 'tmp', 'right2'], {}), "('stlvrno,orx->stlvnx', tmp, right2)\n", (94136, 94172), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((106660, 106692), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""UDab,abud->UuDd"""', 'N', 'N1'], {}), "('UDab,abud->UuDd', N, N1)\n", (106666, 106692), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((106915, 106971), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldrkustvwxyz,tvwxyzLDRKUS->lLdDrRkKuUsS"""', 'N', 'N1'], {}), "('ldrkustvwxyz,tvwxyzLDRKUS->lLdDrRkKuUsS', N, N1)\n", (106921, 106971), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((113729, 113767), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""BUb,BlA->AlUb"""', 'tmp', 'lbmpo[1]'], {}), "('BUb,BlA->AlUb', tmp, lbmpo[1])\n", (113735, 113767), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((113860, 113900), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""AurUb,bra->AuUa"""', 'tmp', 'rbmpo[1]'], {}), "('AurUb,bra->AuUa', tmp, rbmpo[1])\n", (113866, 113900), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((113931, 113975), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""CdDc,CLB->BLdDc"""', 'env_bot', 'lbmpo[0]'], {}), "('CdDc,CLB->BLdDc', env_bot, lbmpo[0])\n", (113937, 113975), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((113992, 114032), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""BLdDc,LDRU->BdURc"""', 'tmp', 'peps_b'], {}), "('BLdDc,LDRU->BdURc', tmp, peps_b)\n", (113998, 114032), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((114049, 114089), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""BdURc,cRb->BdUb"""', 'tmp', 'rbmpo[0]'], {}), "('BdURc,cRb->BdUb', tmp, rbmpo[0])\n", (114055, 114089), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((114106, 114146), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""BdUb,BlA->AldUb"""', 'tmp', 'lbmpo[1]'], {}), "('BdUb,BlA->AldUb', tmp, lbmpo[1])\n", (114112, 114146), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((114163, 114202), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""AldUb,ldru->AurUb"""', 'tmp', 'ket_b'], {}), "('AldUb,ldru->AurUb', tmp, ket_b)\n", (114169, 114202), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((114219, 114259), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""AurUb,bra->AuUa"""', 'tmp', 'rbmpo[1]'], {}), "('AurUb,bra->AuUa', tmp, rbmpo[1])\n", (114225, 114259), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((114521, 114559), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""Bdb,ALB->ALdb"""', 'tmp', 'lbmpo[2]'], {}), "('Bdb,ALB->ALdb', tmp, lbmpo[2])\n", (114527, 114559), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((114653, 114693), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""AdDRb,aRb->AdDa"""', 'tmp', 'rbmpo[2]'], {}), "('AdDRb,aRb->AdDa', tmp, rbmpo[2])\n", (114659, 114693), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((114724, 114768), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""CuUc,BlC->BluUc"""', 'env_top', 'lbmpo[3]'], {}), "('CuUc,BlC->BluUc', env_top, lbmpo[3])\n", (114730, 114768), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((114785, 114824), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""BluUc,ldru->BdrUc"""', 'tmp', 'ket_t'], {}), "('BluUc,ldru->BdrUc', tmp, ket_t)\n", (114791, 114824), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((114841, 114881), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""BdrUc,brc->BdUb"""', 'tmp', 'rbmpo[3]'], {}), "('BdrUc,brc->BdUb', tmp, rbmpo[3])\n", (114847, 114881), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((114898, 114938), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""BdUb,ALB->ALdUb"""', 'tmp', 'lbmpo[2]'], {}), "('BdUb,ALB->ALdUb', tmp, lbmpo[2])\n", (114904, 114938), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((114955, 114995), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ALdUb,LDRU->AdDRb"""', 'tmp', 'peps_t'], {}), "('ALdUb,LDRU->AdDRb', tmp, peps_t)\n", (114961, 114995), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((115012, 115052), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""AdDRb,aRb->AdDa"""', 'tmp', 'rbmpo[2]'], {}), "('AdDRb,aRb->AdDa', tmp, rbmpo[2])\n", (115018, 115052), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((120211, 120248), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""aPQb,apqb->PQpq"""', 'tmp1', 'tmp2'], {}), "('aPQb,apqb->PQpq', tmp1, tmp2)\n", (120217, 120248), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((121175, 121222), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""PQldrkus,ldprkqsu->PQpq"""', 'comb1', 'comb2'], {}), "('PQldrkus,ldprkqsu->PQpq', comb1, comb2)\n", (121181, 121222), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((129199, 129236), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""lkbKBr,lkbKBr->"""', 'Nbot', 'Ntop'], {}), "('lkbKBr,lkbKBr->', Nbot, Ntop)\n", (129205, 129236), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((133391, 133428), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""lkbKBr,lkbKBr->"""', 'Nbot', 'Ntop'], {}), "('lkbKBr,lkbKBr->', Nbot, Ntop)\n", (133397, 133428), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((137070, 137111), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jlm,mno->jlno"""', 'flip_top', 'top[2]'], {}), "('jlm,mno->jlno', flip_top, top[2])\n", (137076, 137111), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((137133, 137176), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jlno,opq->jlnpq"""', 'flip_top', 'top[3]'], {}), "('jlno,opq->jlnpq', flip_top, top[3])\n", (137139, 137176), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((137198, 137243), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jlnpq,qrs->jlnprs"""', 'flip_top', 'top[4]'], {}), "('jlnpq,qrs->jlnprs', flip_top, top[4])\n", (137204, 137243), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((137463, 137504), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jlm,mno->jlno"""', 'flip_bot', 'bot[2]'], {}), "('jlm,mno->jlno', flip_bot, bot[2])\n", (137469, 137504), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((137526, 137569), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jlno,opq->jlnpq"""', 'flip_bot', 'bot[3]'], {}), "('jlno,opq->jlnpq', flip_bot, bot[3])\n", (137532, 137569), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((137591, 137636), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jlnpq,qrs->jlnprs"""', 'flip_bot', 'bot[4]'], {}), "('jlnpq,qrs->jlnprs', flip_bot, bot[4])\n", (137597, 137636), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((42163, 42177), 'numpy.isfinite', 'np.isfinite', (['z'], {}), '(z)\n', (42174, 42177), True, 'import numpy as np\n'), ((96972, 97010), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""Ndpr,nro->oNdpn"""', 'tmp', 'right2'], {}), "('Ndpr,nro->oNdpn', tmp, right2)\n", (96978, 97010), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((97043, 97082), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""Ndpn,LDpRU->UNdLDRn"""', 'tmp', 'bra'], {}), "('Ndpn,LDpRU->UNdLDRn', tmp, bra)\n", (97049, 97082), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((101689, 101727), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""NPUR,mRn->mNPUn"""', 'tmp', 'right1'], {}), "('NPUR,mRn->mNPUn', tmp, right1)\n", (101695, 101727), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((101760, 101799), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""NPUn,ldPru->dNlurUn"""', 'tmp', 'ket'], {}), "('NPUn,ldPru->dNlurUn', tmp, ket)\n", (101766, 101799), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((116079, 116122), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""CLB,BlA->CLlA"""', 'lbmpo[0]', 'lbmpo[1]'], {}), "('CLB,BlA->CLlA', lbmpo[0], lbmpo[1])\n", (116085, 116122), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((116146, 116191), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""LDPRU,CLlA->DPRUClA"""', 'peps_b', 'symtmp'], {}), "('LDPRU,CLlA->DPRUClA', peps_b, symtmp)\n", (116152, 116191), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((116215, 116264), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""cRb,DPRUClA->DPUClAcb"""', 'rbmpo[0]', 'symtmp'], {}), "('cRb,DPRUClA->DPUClAcb', rbmpo[0], symtmp)\n", (116221, 116264), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((116288, 116339), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""bra,DPUClAcb->DPUClAcra"""', 'rbmpo[1]', 'symtmp'], {}), "('bra,DPUClAcb->DPUClAcra', rbmpo[1], symtmp)\n", (116294, 116339), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((116363, 116413), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldPru,DPUClAcra->CdDcAuUa"""', 'ket_b', 'symtmp'], {}), "('ldPru,DPUClAcra->CdDcAuUa', ket_b, symtmp)\n", (116369, 116413), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((116484, 116610), 'cyclopeps.tools.gen_ten.ones', 'ones', (['(1, 1, 1, 1)'], {'sym': '[symtmp.sym[0][:4], symtmp.sym[1][:4], None, None]', 'backend': 'lbmpo[0].backend', 'dtype': 'lbmpo[0].dtype'}), '((1, 1, 1, 1), sym=[symtmp.sym[0][:4], symtmp.sym[1][:4], None, None],\n backend=lbmpo[0].backend, dtype=lbmpo[0].dtype)\n', (116488, 116610), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((116895, 116971), 'cyclopeps.tools.gen_ten.ones', 'ones', (['(1, 1, 1, 1)'], {'sym': 'None', 'backend': 'lbmpo[0].backend', 'dtype': 'lbmpo[0].dtype'}), '((1, 1, 1, 1), sym=None, backend=lbmpo[0].backend, dtype=lbmpo[0].dtype)\n', (116899, 116971), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((117565, 117608), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ALB,BlC->ALlC"""', 'lbmpo[2]', 'lbmpo[3]'], {}), "('ALB,BlC->ALlC', lbmpo[2], lbmpo[3])\n", (117571, 117608), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((117632, 117677), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""LDPRU,ALlC->DPRUAlC"""', 'peps_t', 'symtmp'], {}), "('LDPRU,ALlC->DPRUAlC', peps_t, symtmp)\n", (117638, 117677), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((117701, 117750), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""aRb,DPRUAlC->DPUAlCab"""', 'rbmpo[2]', 'symtmp'], {}), "('aRb,DPRUAlC->DPUAlCab', rbmpo[2], symtmp)\n", (117707, 117750), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((117774, 117825), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""brc,DPUAlCab->DPUAlCarc"""', 'rbmpo[3]', 'symtmp'], {}), "('brc,DPUAlCab->DPUAlCarc', rbmpo[3], symtmp)\n", (117780, 117825), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((117849, 117899), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldPru,DPUAlCarc->CuUcDaAd"""', 'ket_t', 'symtmp'], {}), "('ldPru,DPUAlCarc->CuUcDaAd', ket_t, symtmp)\n", (117855, 117899), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((117970, 118096), 'cyclopeps.tools.gen_ten.ones', 'ones', (['(1, 1, 1, 1)'], {'sym': '[symtmp.sym[0][:4], symtmp.sym[1][:4], None, None]', 'backend': 'lbmpo[0].backend', 'dtype': 'lbmpo[0].dtype'}), '((1, 1, 1, 1), sym=[symtmp.sym[0][:4], symtmp.sym[1][:4], None, None],\n backend=lbmpo[0].backend, dtype=lbmpo[0].dtype)\n', (117974, 118096), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((118381, 118457), 'cyclopeps.tools.gen_ten.ones', 'ones', (['(1, 1, 1, 1)'], {'sym': 'None', 'backend': 'lbmpo[0].backend', 'dtype': 'lbmpo[0].dtype'}), '((1, 1, 1, 1), sym=None, backend=lbmpo[0].backend, dtype=lbmpo[0].dtype)\n', (118385, 118457), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((120477, 120512), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""PaQbpaqb,PQpq->"""', 'tmp', 'ham'], {}), "('PaQbpaqb,PQpq->', tmp, ham)\n", (120483, 120512), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((120731, 120762), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""PQpq,PQpq->"""', 'tmp', 'ham'], {}), "('PQpq,PQpq->', tmp, ham)\n", (120737, 120762), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((121483, 121524), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""PaQbpaqb,PQpq->"""', 'phys_inds', 'ham'], {}), "('PaQbpaqb,PQpq->', phys_inds, ham)\n", (121489, 121524), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((121769, 121806), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""PQpq,PQpq->"""', 'phys_inds', 'ham'], {}), "('PQpq,PQpq->', phys_inds, ham)\n", (121775, 121806), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((155349, 155403), 'cyclopeps.tools.gen_ten.rand', 'rand', (['(Dnew, Dold)'], {'dtype': 'peps.ltensors[0][0][0].dtype'}), '((Dnew, Dold), dtype=peps.ltensors[0][0][0].dtype)\n', (155353, 155403), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((57852, 57923), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Dli', 'Zli'], {'is_symmetric': 'braten.is_symmetric', 'backend': 'braten.backend'}), '(Dli, Zli, is_symmetric=braten.is_symmetric, backend=braten.backend)\n', (57855, 57923), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((58035, 58063), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ij,IJ->iIjJ"""', 'I', 'Ii'], {}), "('ij,IJ->iIjJ', I, Ii)\n", (58041, 58063), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((59167, 59238), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Dli', 'Zli'], {'is_symmetric': 'ketten.is_symmetric', 'backend': 'ketten.backend'}), '(Dli, Zli, is_symmetric=ketten.is_symmetric, backend=ketten.backend)\n', (59170, 59238), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((59350, 59378), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ij,IJ->iIjJ"""', 'I', 'Ii'], {}), "('ij,IJ->iIjJ', I, Ii)\n", (59356, 59378), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((62983, 63054), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Dli', 'Zli'], {'is_symmetric': 'ketten.is_symmetric', 'backend': 'ketten.backend'}), '(Dli, Zli, is_symmetric=ketten.is_symmetric, backend=ketten.backend)\n', (62986, 63054), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((63167, 63196), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ij,IJ->iIjJ"""', 'I1', 'Ii'], {}), "('ij,IJ->iIjJ', I1, Ii)\n", (63173, 63196), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((63786, 63857), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Dli', 'Zli'], {'is_symmetric': 'ketten.is_symmetric', 'backend': 'ketten.backend'}), '(Dli, Zli, is_symmetric=ketten.is_symmetric, backend=ketten.backend)\n', (63789, 63857), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((63970, 63999), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ij,IJ->iIjJ"""', 'I2', 'Ii'], {}), "('ij,IJ->iIjJ', I2, Ii)\n", (63976, 63999), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((67420, 67491), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Dli', 'Zli'], {'is_symmetric': 'braten.is_symmetric', 'backend': 'braten.backend'}), '(Dli, Zli, is_symmetric=braten.is_symmetric, backend=braten.backend)\n', (67423, 67491), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((67604, 67633), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ij,IJ->iIjJ"""', 'I1', 'Ii'], {}), "('ij,IJ->iIjJ', I1, Ii)\n", (67610, 67633), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((73807, 73878), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Dli', 'Zli'], {'is_symmetric': 'braten.is_symmetric', 'backend': 'braten.backend'}), '(Dli, Zli, is_symmetric=braten.is_symmetric, backend=braten.backend)\n', (73810, 73878), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((73990, 74018), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ij,IJ->iIjJ"""', 'I', 'Ii'], {}), "('ij,IJ->iIjJ', I, Ii)\n", (73996, 74018), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((75128, 75199), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Dli', 'Zli'], {'is_symmetric': 'braten.is_symmetric', 'backend': 'braten.backend'}), '(Dli, Zli, is_symmetric=braten.is_symmetric, backend=braten.backend)\n', (75131, 75199), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((75311, 75339), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ij,IJ->iIjJ"""', 'I', 'Ii'], {}), "('ij,IJ->iIjJ', I, Ii)\n", (75317, 75339), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((78630, 78701), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Dli', 'Zli'], {'is_symmetric': 'braten.is_symmetric', 'backend': 'braten.backend'}), '(Dli, Zli, is_symmetric=braten.is_symmetric, backend=braten.backend)\n', (78633, 78701), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((78814, 78843), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ij,IJ->iIjJ"""', 'I1', 'Ii'], {}), "('ij,IJ->iIjJ', I1, Ii)\n", (78820, 78843), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((79433, 79504), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Dli', 'Zli'], {'is_symmetric': 'braten.is_symmetric', 'backend': 'braten.backend'}), '(Dli, Zli, is_symmetric=braten.is_symmetric, backend=braten.backend)\n', (79436, 79504), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((79617, 79646), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ij,IJ->iIjJ"""', 'I2', 'Ii'], {}), "('ij,IJ->iIjJ', I2, Ii)\n", (79623, 79646), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((83989, 84060), 'cyclopeps.tools.gen_ten.eye', 'eye', (['Dli', 'Zli'], {'is_symmetric': 'braten.is_symmetric', 'backend': 'braten.backend'}), '(Dli, Zli, is_symmetric=braten.is_symmetric, backend=braten.backend)\n', (83992, 84060), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((84173, 84202), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ij,IJ->iIjJ"""', 'I1', 'Ii'], {}), "('ij,IJ->iIjJ', I1, Ii)\n", (84179, 84202), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((88789, 88829), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jklh,hnoid->djklnoi"""', 'tmp', 'ket2'], {}), "('jklh,hnoid->djklnoi', tmp, ket2)\n", (88795, 88829), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((88866, 88908), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jklnoi,qif->fjklnoq"""', 'tmp', 'right2'], {}), "('jklnoi,qif->fjklnoq', tmp, right2)\n", (88872, 88908), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((89003, 89047), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""urklnoq,rvlsc->cukvsnoq"""', 'tmp', 'bra1'], {}), "('urklnoq,rvlsc->cukvsnoq', tmp, bra1)\n", (89009, 89047), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((89084, 89128), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ukvsnoq,swote->eukvnwtq"""', 'tmp', 'bra2'], {}), "('ukvsnoq,swote->eukvnwtq', tmp, bra2)\n", (89090, 89128), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((93237, 93277), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jklh,hemin->ejklmni"""', 'tmp', 'bra2'], {}), "('jklh,hemin->ejklmni', tmp, bra2)\n", (93243, 93277), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((93314, 93356), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jklmni,fio->fjklmno"""', 'tmp', 'right1'], {}), "('jklmni,fio->fjklmno', tmp, right1)\n", (93320, 93356), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((93451, 93495), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""spklmno,pbkqt->bstqlmno"""', 'tmp', 'ket1'], {}), "('spklmno,pbkqt->bstqlmno', tmp, ket1)\n", (93457, 93495), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((93532, 93576), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""stqlmno,qdmrv->dstlvrno"""', 'tmp', 'ket2'], {}), "('stqlmno,qdmrv->dstlvrno', tmp, ket2)\n", (93538, 93576), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((107185, 107229), 'numpy.prod', 'np.prod', (['[N.ten.shape[i] for i in N.legs[0]]'], {}), '([N.ten.shape[i] for i in N.legs[0]])\n', (107192, 107229), True, 'import numpy as np\n'), ((107255, 107299), 'numpy.prod', 'np.prod', (['[N.ten.shape[i] for i in N.legs[1]]'], {}), '([N.ten.shape[i] for i in N.legs[1]])\n', (107262, 107299), True, 'import numpy as np\n'), ((107325, 107369), 'numpy.prod', 'np.prod', (['[N.ten.shape[i] for i in N.legs[2]]'], {}), '([N.ten.shape[i] for i in N.legs[2]])\n', (107332, 107369), True, 'import numpy as np\n'), ((107395, 107439), 'numpy.prod', 'np.prod', (['[N.ten.shape[i] for i in N.legs[3]]'], {}), '([N.ten.shape[i] for i in N.legs[3]])\n', (107402, 107439), True, 'import numpy as np\n'), ((108787, 108831), 'numpy.prod', 'np.prod', (['[N.ten.shape[i] for i in N.legs[0]]'], {}), '([N.ten.shape[i] for i in N.legs[0]])\n', (108794, 108831), True, 'import numpy as np\n'), ((108857, 108901), 'numpy.prod', 'np.prod', (['[N.ten.shape[i] for i in N.legs[1]]'], {}), '([N.ten.shape[i] for i in N.legs[1]])\n', (108864, 108901), True, 'import numpy as np\n'), ((108927, 108971), 'numpy.prod', 'np.prod', (['[N.ten.shape[i] for i in N.legs[2]]'], {}), '([N.ten.shape[i] for i in N.legs[2]])\n', (108934, 108971), True, 'import numpy as np\n'), ((108997, 109041), 'numpy.prod', 'np.prod', (['[N.ten.shape[i] for i in N.legs[3]]'], {}), '([N.ten.shape[i] for i in N.legs[3]])\n', (109004, 109041), True, 'import numpy as np\n'), ((109067, 109111), 'numpy.prod', 'np.prod', (['[N.ten.shape[i] for i in N.legs[4]]'], {}), '([N.ten.shape[i] for i in N.legs[4]])\n', (109074, 109111), True, 'import numpy as np\n'), ((109137, 109181), 'numpy.prod', 'np.prod', (['[N.ten.shape[i] for i in N.legs[5]]'], {}), '([N.ten.shape[i] for i in N.legs[5]])\n', (109144, 109181), True, 'import numpy as np\n'), ((109207, 109251), 'numpy.prod', 'np.prod', (['[N.ten.shape[i] for i in N.legs[6]]'], {}), '([N.ten.shape[i] for i in N.legs[6]])\n', (109214, 109251), True, 'import numpy as np\n'), ((109277, 109321), 'numpy.prod', 'np.prod', (['[N.ten.shape[i] for i in N.legs[7]]'], {}), '([N.ten.shape[i] for i in N.legs[7]])\n', (109284, 109321), True, 'import numpy as np\n'), ((109347, 109391), 'numpy.prod', 'np.prod', (['[N.ten.shape[i] for i in N.legs[8]]'], {}), '([N.ten.shape[i] for i in N.legs[8]])\n', (109354, 109391), True, 'import numpy as np\n'), ((109417, 109461), 'numpy.prod', 'np.prod', (['[N.ten.shape[i] for i in N.legs[9]]'], {}), '([N.ten.shape[i] for i in N.legs[9]])\n', (109424, 109461), True, 'import numpy as np\n'), ((109488, 109533), 'numpy.prod', 'np.prod', (['[N.ten.shape[i] for i in N.legs[10]]'], {}), '([N.ten.shape[i] for i in N.legs[10]])\n', (109495, 109533), True, 'import numpy as np\n'), ((109560, 109605), 'numpy.prod', 'np.prod', (['[N.ten.shape[i] for i in N.legs[11]]'], {}), '([N.ten.shape[i] for i in N.legs[11]])\n', (109567, 109605), True, 'import numpy as np\n'), ((113654, 113692), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""BUR,cRb->cBUb"""', 'tmp', 'rbmpo[0]'], {}), "('BUR,cRb->cBUb', tmp, rbmpo[0])\n", (113660, 113692), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((113784, 113823), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""AlUb,ldru->dAurUb"""', 'tmp', 'ket_b'], {}), "('AlUb,ldru->dAurUb', tmp, ket_b)\n", (113790, 113823), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((114446, 114484), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""Bdr,brc->cBdb"""', 'tmp', 'rbmpo[3]'], {}), "('Bdr,brc->cBdb', tmp, rbmpo[3])\n", (114452, 114484), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((114576, 114616), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ALdb,LDRU->UAdDRb"""', 'tmp', 'peps_t'], {}), "('ALdb,LDRU->UAdDRb', tmp, peps_t)\n", (114582, 114616), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((136989, 137028), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ijk,klm->ijlm"""', 'top[0]', 'top[1]'], {}), "('ijk,klm->ijlm', top[0], top[1])\n", (136995, 137028), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((137265, 137312), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jlnprs,stu->jlnprtu"""', 'flip_top', 'top[5]'], {}), "('jlnprs,stu->jlnprtu', flip_top, top[5])\n", (137271, 137312), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((137382, 137421), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ijk,klm->ijlm"""', 'bot[0]', 'bot[1]'], {}), "('ijk,klm->ijlm', bot[0], bot[1])\n", (137388, 137421), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((137658, 137705), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jlnprs,stu->jlnprtu"""', 'flip_bot', 'bot[5]'], {}), "('jlnprs,stu->jlnprtu', flip_bot, bot[5])\n", (137664, 137705), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((155704, 155752), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""Ll,l->L"""', 'mat', 'peps.ltensors[ind][x][y]'], {}), "('Ll,l->L', mat, peps.ltensors[ind][x][y])\n", (155710, 155752), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((160580, 160635), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldpru,rR->ldpRu"""', 'Gamma[x][y]', 'Lambda[1][x][y]'], {}), "('ldpru,rR->ldpRu', Gamma[x][y], Lambda[1][x][y])\n", (160586, 160635), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((160702, 160757), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldpru,uU->ldprU"""', 'Gamma[x][y]', 'Lambda[0][x][y]'], {}), "('ldpru,uU->ldprU', Gamma[x][y], Lambda[0][x][y])\n", (160708, 160757), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((42400, 42418), 'numpy.isfinite', 'np.isfinite', (['zprev'], {}), '(zprev)\n', (42411, 42418), True, 'import numpy as np\n'), ((96880, 96919), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""ldpru,NlO->uONdpr"""', 'ket', 'left2'], {}), "('ldpru,NlO->uONdpr', ket, left2)\n", (96886, 96919), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((101597, 101636), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""LDPRU,MLN->DMNPUR"""', 'bra', 'left1'], {}), "('LDPRU,MLN->DMNPUR', bra, left1)\n", (101603, 101636), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((88692, 88732), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""jga,gklhb->abjklh"""', 'left2', 'ket1'], {}), "('jga,gklhb->abjklh', left2, ket1)\n", (88698, 88732), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((93140, 93180), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""agj,gckhl->acjklh"""', 'left1', 'bra1'], {}), "('agj,gckhl->acjklh', left1, bra1)\n", (93146, 93180), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((113554, 113597), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""CLB,LDRU->CDBUR"""', 'lbmpo[0]', 'peps_b'], {}), "('CLB,LDRU->CDBUR', lbmpo[0], peps_b)\n", (113560, 113597), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n'), ((114347, 114389), 'cyclopeps.tools.gen_ten.einsum', 'einsum', (['"""BlC,ldru->CuBdr"""', 'lbmpo[3]', 'ket_t'], {}), "('BlC,ldru->CuBdr', lbmpo[3], ket_t)\n", (114353, 114389), False, 'from cyclopeps.tools.gen_ten import rand, einsum, eye, ones, svd_ten, zeros\n')] |
import numpy as np
import ray
import ray.rllib.algorithms.ppo as ppo
import onnxruntime
import os
import shutil
# Configure our PPO.
config = ppo.DEFAULT_CONFIG.copy()
config["num_gpus"] = 0
config["num_workers"] = 1
config["framework"] = "tf"
outdir = "export_tf"
if os.path.exists(outdir):
shutil.rmtree(outdir)
np.random.seed(1234)
# We will run inference with this test batch
test_data = {
"obs": np.random.uniform(0, 1.0, size=(10, 4)).astype(np.float32),
}
# Start Ray and initialize a PPO Algorithm.
ray.init()
algo = ppo.PPO(config=config, env="CartPole-v0")
# You could train the model here
# algo.train()
# Let's run inference on the tensorflow model
policy = algo.get_policy()
result_tf, _ = policy.model(test_data)
# Evaluate tensor to fetch numpy array
with policy._sess.as_default():
result_tf = result_tf.eval()
# This line will export the model to ONNX
res = algo.export_policy_model(outdir, onnx=11)
# Import ONNX model
exported_model_file = os.path.join(outdir, "saved_model.onnx")
# Start an inference session for the ONNX model
session = onnxruntime.InferenceSession(exported_model_file, None)
# Pass the same test batch to the ONNX model (rename to match tensor names)
onnx_test_data = {f"default_policy/{k}:0": v for k, v in test_data.items()}
result_onnx = session.run(["default_policy/model/fc_out/BiasAdd:0"], onnx_test_data)
# These results should be equal!
print("TENSORFLOW", result_tf)
print("ONNX", result_onnx)
assert np.allclose(result_tf, result_onnx), "Model outputs are NOT equal. FAILED"
print("Model outputs are equal. PASSED")
| [
"os.path.exists",
"numpy.allclose",
"ray.init",
"os.path.join",
"onnxruntime.InferenceSession",
"numpy.random.uniform",
"numpy.random.seed",
"ray.rllib.algorithms.ppo.PPO",
"shutil.rmtree",
"ray.rllib.algorithms.ppo.DEFAULT_CONFIG.copy"
] | [((143, 168), 'ray.rllib.algorithms.ppo.DEFAULT_CONFIG.copy', 'ppo.DEFAULT_CONFIG.copy', ([], {}), '()\n', (166, 168), True, 'import ray.rllib.algorithms.ppo as ppo\n'), ((270, 292), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (284, 292), False, 'import os\n'), ((321, 341), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (335, 341), True, 'import numpy as np\n'), ((520, 530), 'ray.init', 'ray.init', ([], {}), '()\n', (528, 530), False, 'import ray\n'), ((538, 579), 'ray.rllib.algorithms.ppo.PPO', 'ppo.PPO', ([], {'config': 'config', 'env': '"""CartPole-v0"""'}), "(config=config, env='CartPole-v0')\n", (545, 579), True, 'import ray.rllib.algorithms.ppo as ppo\n'), ((981, 1021), 'os.path.join', 'os.path.join', (['outdir', '"""saved_model.onnx"""'], {}), "(outdir, 'saved_model.onnx')\n", (993, 1021), False, 'import os\n'), ((1081, 1136), 'onnxruntime.InferenceSession', 'onnxruntime.InferenceSession', (['exported_model_file', 'None'], {}), '(exported_model_file, None)\n', (1109, 1136), False, 'import onnxruntime\n'), ((1476, 1511), 'numpy.allclose', 'np.allclose', (['result_tf', 'result_onnx'], {}), '(result_tf, result_onnx)\n', (1487, 1511), True, 'import numpy as np\n'), ((298, 319), 'shutil.rmtree', 'shutil.rmtree', (['outdir'], {}), '(outdir)\n', (311, 319), False, 'import shutil\n'), ((413, 452), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1.0)'], {'size': '(10, 4)'}), '(0, 1.0, size=(10, 4))\n', (430, 452), True, 'import numpy as np\n')] |
import math
import os
import time
import numpy as np
import pandas as pd
import scipy.io as scio
import geatpy as ea
import warnings
class Problem:
def __init__(self, name, M, maxormins, Dim, varTypes, lb, ub, lbin, ubin, aimFunc=None, calReferObjV=None):
self.name = name
self.M = M
self.maxormins = np.array(maxormins)
self.Dim = Dim
self.varTypes = np.array(varTypes)
self.ranges = np.array([lb, ub])
self.borders = np.array([lbin, ubin])
self.aimFunc = aimFunc if aimFunc is not None else self.aimFunc
self.calReferObjV = calReferObjV if calReferObjV is not None else self.calReferObjV
def aimFunc(self, pop):
raise RuntimeError(
'error in Problem: aimFunc has not been initialized. ')
def calReferObjV(self):
return None
def getReferObjV(self, reCalculate=False):
"""
Description: This function is used to read/calculate the objective function reference value of the problem, which can be either the theoretical global optimal solution's objective function value or an artificially set non-optimal objective function reference value.
reCalculate is a bool variable used to determine if calReferObjV() needs to be called to recalculate the objective function reference value.
By default reCalculate is False, in which case it will first try to read the data of the theoretical global optimal solution.
If it cannot be read, then calReferObjV() will be called to compute the theoretical global optimal solution.
After calculating the theoretical global optimal solution
After calculating the theoretical global optimal solution, the result will be saved to the referenceObjV folder with the file name "problem_target_dimension_number_of_decision_variables.csv".
"""
if os.path.exists('referenceObjV') == False:
os.makedirs('referenceObjV')
if reCalculate == False:
if os.path.exists('referenceObjV/' + self.name + '_M' + str(self.M) + '_D' + str(self.Dim) + '.csv'):
return np.loadtxt('referenceObjV/' + self.name + '_M' + str(self.M) + '_D' + str(self.Dim) + '.csv',
delimiter=',')
referenceObjV = self.calReferObjV()
if referenceObjV is not None:
np.savetxt('referenceObjV/' + self.name + '_M' + str(self.M) + '_D' + str(self.Dim) + '.csv', referenceObjV,
delimiter=',')
else:
print('No data found for the reference value of the objective function')
return referenceObjV
class Population:
"""
Population : class - population class
Description:
The population class is a class used to store information about a population.
Properties:
sizes : int - The size of the population, i.e. the number of individuals in the population.
ChromNum : int - The number of chromosomes, i.e. how many chromosomes there are per individual.
Encoding : str - the chromosome encoding method.
'BG':binary/Gray encoding.
'RI':real integer encoding, i.e. a mixture of real and integer encoding.
'P':Permutation encoding.
Related concept: The term 'real-valued encoding' includes both real integer encoding and permutation encoding.
Their common feature is that chromosomes can directly represent the corresponding decision variables without decoding.
The term "real integer" refers to a population chromosome that contains both real decimals and real integers.
Special usage.
If Encoding=None is set, the Field,Chrom member property of the population class will be set to None.
The population will not carry information directly related to the chromosome, which reduces unnecessary data storage.
This can be used when you only want to count information that is not directly related to chromosomes.
In particular, it can be used to evaluate the fitness of individuals in a uniform way during the evolutionary optimization of multiple populations.
Field : array - The translation matrix, either FieldD or FieldDR (see Geatpy data structure for details).
Chrom : array - population chromosome matrix, each row corresponds to one chromosome of an individual.
Lind : int - The length of the population chromosome.
ObjV : array - population objective function value matrix, each row corresponds to an individual's objective function value, each column corresponds to a target.
FitnV : array - The vector of individual fitnesses of the population, each element corresponds to the fitness of one individual, and the minimum fitness is 0.
CV : array - CV (Constraint Violation Value) is a matrix used to quantitatively describe the degree of constraint violation, each row corresponds to an individual and each column corresponds to a constraint.
Note: When no constraints are set, CV is set to None.
Phen : array - the population expression matrix (i.e., the matrix of decision variables represented by each chromosome of the population after decoding).
Function :
See the source code for details.
"""
def __init__(self, Encoding, Field, NIND, Chrom=None, ObjV=None, FitnV=None, CV=None, Phen=None):
"""
Description: Constructor for the population class, used to instantiate the population object, e.g.
import geatpy as ea
population = ea.Population(Encoding, Field, NIND), the
NIND is the number of individuals needed.
At this point the population is not really initialized, it is only the instantiation of the population object.
This constructor must be passed in Chrom to complete the real initialization of the population.
At first, you can pass only Encoding, Field and NIND to complete the instantiation of the population object.
Other properties can be assigned later by calculation.
"""
if type(NIND) is int and NIND >= 0:
self.sizes = NIND
else:
raise RuntimeError(
'error in Population: Size error.')
self.ChromNum = 1
self.Encoding = Encoding
if Encoding is None:
self.Field = None
self.Chrom = None
else:
self.Field = Field.copy()
self.Chrom = Chrom.copy() if Chrom is not None else None
self.Lind = Chrom.shape[1] if Chrom is not None else 0
self.ObjV = ObjV.copy() if ObjV is not None else None
self.FitnV = FitnV.copy() if FitnV is not None else None
self.CV = CV.copy() if CV is not None else None
self.Phen = Phen.copy() if Phen is not None else None
def initChrom(self, NIND=None):
"""
Description: Initializes the population chromosome matrix with NIND as the desired number of individuals.
NIND can be defaulted, if not, the population will be resized to NIND before initializing the chromosome matrix.
"""
if NIND is not None:
self.sizes = NIND
self.Chrom = ea.crtpc(self.Encoding, self.sizes, self.Field)
self.Lind = self.Chrom.shape[1]
self.ObjV = None
self.FitnV = None
self.CV = None
def decoding(self):
"""
Description: Population chromosome decoding.
"""
if self.Encoding == 'BG':
Phen = self.bi2dec(self.Chrom, self.Field)
elif self.Encoding == 'RI' or self.Encoding == 'P':
Phen = self.Chrom.copy()
else:
raise RuntimeError(
'error in Population.decoding: Encoding must be ''BG'' or ''RI'' or ''P''.')
return Phen
def copy(self):
"""
copy : function - duplication of populations
Usage:
Suppose pop is a population matrix, then: pop1 = pop.copy() completes the copy of pop population.
"""
return Population(self.Encoding,
self.Field,
self.sizes,
self.Chrom,
self.ObjV,
self.FitnV,
self.CV,
self.Phen)
def __getitem__(self, index):
"""
Description: Slicing of a population, i.e., selecting the corresponding individuals in the population according to the index subscript vector to form a new population.
Usage: Suppose pop is a population matrix containing more than 2 individuals, then.
pop1 = pop[[0,1]] to obtain the population consisting of the first and second individuals of the pop population.
Note: index must be a slice or a row vector of type Numpy array or a list of type list.
This function does not check the legality of the passed index parameter in detail.
"""
if self.Encoding is None:
NewChrom = None
else:
if self.Chrom is None:
raise RuntimeError(
'error in Population: Chrom is None.')
NewChrom = self.Chrom[index]
if type(index) != slice and type(index) != np.ndarray and type(index) != list:
raise RuntimeError(
'error in Population: index must be a 1-D array.')
if type(index) == slice:
NIND = (index.stop - (index.start if index.start is not None else 0)) // (
index.step if index.step is not None else 1)
else:
index_array = np.array(index)
if index_array.dtype == bool:
NIND = int(np.sum(index_array))
else:
NIND = len(index_array)
return Population(self.Encoding,
self.Field,
NIND,
NewChrom,
self.ObjV[index] if self.ObjV is not None else None,
self.FitnV[index] if self.FitnV is not None else None,
self.CV[index] if self.CV is not None else None,
self.Phen[index] if self.Phen is not None else None)
def shuffle(self):
"""
shuffle : function - Shuffle the order of individuals in a population
Usage: Assuming pop is a population matrix, then pop.shuffle() can be used to shuffle the order of individuals in the pop population.
"""
shuff = np.argsort(np.random.rand(self.sizes))
if self.Encoding is None:
self.Chrom = None
else:
if self.Chrom is None:
raise RuntimeError(
'error in Population: Chrom is None.')
self.Chrom = self.Chrom[shuff, :]
self.ObjV = self.ObjV[shuff, :] if self.ObjV is not None else None
self.FitnV = self.FitnV[shuff] if self.FitnV is not None else None
self.CV = self.CV[shuff, :] if self.CV is not None else None
self.Phen = self.Phen[shuff, :] if self.Phen is not None else None
def __setitem__(self, index, pop):
"""
Description: Assignment of population individuals
Usage: Suppose pop is a population matrix with more than 2 individuals, and pop1 is another population matrix with 2 individuals, then
pop[[0,1]] = pop1 to assign the first and second individuals of pop population to the individuals of pop1 population.
Note: index must be a row vector of type Numpy array, this function does not check the legitimacy of the index passed in.
In addition, the function does not actively reset the fitness of the population after replacing individuals.
If the fitness of all individuals needs to be re-evaluated due to individual replacement, the fitness of the population needs to be updated by handwritten code.
"""
if self.Encoding is not None:
if self.Encoding != pop.Encoding:
raise RuntimeError(
'error in Population: Encoding disagree.')
if np.all(self.Field == pop.Field) == False:
raise RuntimeError(
'error in Population: Field disagree. ')
if self.Chrom is None:
raise RuntimeError(
'error in Population: Chrom is None. ')
self.Chrom[index] = pop.Chrom
if (self.ObjV is None) ^ (pop.ObjV is None):
raise RuntimeError(
'error in Population: ObjV disagree.')
if (self.FitnV is None) ^ (pop.FitnV is None):
raise RuntimeError(
'error in Population: FitnV disagree. ')
if (self.CV is None) ^ (pop.CV is None):
raise RuntimeError(
'error in Population: CV disagree. ')
if (self.Phen is None) ^ (pop.Phen is None):
raise RuntimeError(
'error in Population: Phen disagree.')
if self.ObjV is not None:
self.ObjV[index] = pop.ObjV
if self.FitnV is not None:
self.FitnV[index] = pop.FitnV
if self.CV is not None:
self.CV[index] = pop.CV
if self.Phen is not None:
self.Phen[index] = pop.Phen
self.sizes = self.Phen.shape[0]
def __add__(self, pop):
"""
Description: Merge populations of individuals
Usage: Suppose pop1, pop2 are two populations, their number of individuals can be equal or unequal, then
pop = pop1 + pop2 to merge the individuals of pop1 and pop2 populations.
Note that.
This function does not actively reset the fitness after performing a population merge.
If you need to re-evaluate the fitness of all individuals due to population merging, you need to update the fitness of the population by handwriting the code.
"""
if self.Encoding is None:
NewChrom = None
else:
if self.Encoding != pop.Encoding:
raise RuntimeError(
'error in Population: Encoding disagree. ')
if self.Chrom is None or pop.Chrom is None:
raise RuntimeError(
'error in Population: Chrom is None. ')
if np.all(self.Field == pop.Field) == False:
raise RuntimeError(
'error in Population: Field disagree.')
NewChrom = np.vstack([self.Chrom, pop.Chrom])
if (self.ObjV is None) ^ (pop.ObjV is None):
raise RuntimeError(
'error in Population: ObjV disagree.')
if (self.CV is None) ^ (pop.CV is None):
raise RuntimeError(
'error in Population: CV disagree. ')
if (self.Phen is None) ^ (pop.Phen is None):
raise RuntimeError(
'error in Population: Phen disagree.')
NIND = self.sizes + pop.sizes
return Population(self.Encoding,
self.Field,
NIND,
NewChrom,
np.vstack([self.ObjV, pop.ObjV]
) if self.ObjV is not None else None,
np.vstack(
[self.FitnV, pop.FitnV]) if self.FitnV is not None and pop.FitnV is not None else None,
np.vstack([self.CV, pop.CV]
) if self.CV is not None else None,
np.vstack([self.Phen, pop.Phen]) if self.Phen is not None else None)
def __len__(self):
"""
Description: Calculate the population size
Usage: Suppose pop is a population, then len(pop) gives the number of individuals in the population.
In fact, the population size can also be obtained from pop.sizes.
"""
return self.sizes
def save(self):
"""
Description: Saves the information about the population to a file.
This function will save the information about the population in the "Result" folder, where.
"Encoding.txt" holds the chromosome code of the population.
"Field.csv" holds the decoding matrix of the population chromosomes.
"Chrom.csv" holds the chromosome matrix of the population; "ObjV.csv" holds the chromosome matrix of the population.
"ObjV.csv" holds the population's objective function matrix.
"FitnV.csv" holds the fitness column vectors of the population individuals.
"CV.csv" holds the matrix of constraint violations of the population individuals.
"Phen.csv" holds the population chromosome phenotype matrix.
Note: this function does not check the legality of the population.
"""
return None
def load_latest_population(self, population):
"""
Description: Extracts the population information from the file and returns it to the population object.
This function will save the population information in the "Result" folder, where.
#"Encodingsi.txt" holds the chromosome code of the population, i is 0,1,2,3... i is 0,1,2,3...;.
#"Fieldsi.csv" holds the decoding matrix of the population chromosomes, i is 0,1,2,3...; #"Fieldsi.csv" holds the decoding matrix of the population chromosomes, i is 0,1,2,3... ; # "Fieldsi.csv" holds the translation matrix of population chromosomes, i is 0,1,2,3...
Chromsi.csv" holds the population chromosome matrix, i is 0,1,2,3...; # "Fieldsi.csv" holds the population chromosome matrix, i is 0,1,2,3... ;.
"ObjV.csv" holds the matrix of the population's objective function.
"FitnV.csv" holds the fitness column vectors of the population individuals.
"CV.csv" holds the matrix of constraint violations of the population individuals.
"Phen.csv" holds the population chromosome phenotype matrix.
#'ChromNum': determined by Chroms
'Linds': a list, eg: [32, 12], as determined by Chroms
#'size': the size of the population, determined by the input parameters
Note: This function does not check the legality of the population.
"""
# Chroms & Linds
for i in range(population.ChromNum):
Chroms = pd.read_csv('GA_Search/Result/Chroms' +
str(i) + '.csv', header=None)
population.Linds[i] = Chroms.shape[1]
population.Chroms[i] = np.array(Chroms)
# ObjV
if os.path.exists('GA_Search/Result/ObjV.csv'):
ObjV = pd.read_csv('GA_Search/Result/ObjV.csv', header=None)
population.ObjV = np.array(ObjV)
# FitnV
if os.path.exists('GA_Search/Result/FitnV.csv'):
FitnV = pd.read_csv('GA_Search/Result/FitnV.csv', header=None)
population.FitnV = np.array(FitnV)
# CV
if os.path.exists('GA_Search/Result/CV.csv'):
CV = pd.read_csv('GA_Search/Result/CV.csv', header=None)
population.CV = np.array(CV)
# Phen
if os.path.exists('GA_Search/Result/Phen.csv'):
Phen = pd.read_csv('GA_Search/Result/Phen.csv', header=None)
population.Phen = np.array(Phen)
return population
def warmup_Chrom(self, individual, NIND):
"""
Description: Initializes the population chromosome matrix with NIND as the desired number of individuals.
NIND can be defaulted, if not, the population will be resized to NIND before initializing the chromosome matrix.
"""
individual = np.array(individual)
if individual.ndim == 1:
Phen = np.expand_dims(individual, 0).repeat(
NIND, axis=0)
elif individual.ndim == 2 and individual.shape[0] == NIND:
Phen = individual
else:
print("The number of individuals does not correspond to the set population size, please check the dimension of individual")
self.sizes = NIND
self.Chrom = self.dec2bi(Phen, self.Field)
self.Lind = self.Chrom.shape[1]
self.ObjV = None
self.FitnV = None
self.CV = None
def bi2dec(self, Chrom, Field):
lengthVars = np.cumsum(Field[0])
posVars = np.concatenate(([0], lengthVars[:-1]))
numVar = Field.shape[1]
temporary = np.zeros((Chrom.shape[0], numVar))
for i in range(Chrom.shape[0]):
phen_i = np.zeros(numVar)
for var in range(numVar):
total = 0
var_s = int(posVars[var])
var_e = int(posVars[var] + Field[0][var])
for j in range(var_s, var_e):
total += Chrom[i][j] * int((math.pow(2, var_e - j - 1)))
if total + Field[1, var] <= Field[2, var]:
phen_i[var] = total + Field[1, var]
else:
phen_i[var] = Field[2, var]
temporary[i] = phen_i
temporary = temporary.astype(int)
return temporary
def dec2bi(self, Phen, Field):
lengthVars = Field[0]
posVars = np.concatenate(
([0], np.cumsum(lengthVars)[:-1]))
pop_size = Phen.shape[0]
var_num = Phen.shape[1]
Chrom = np.zeros((pop_size, int(np.sum(lengthVars))))
for i in range(pop_size):
Chrom_i = []
for var in range(var_num):
num = Phen[i][var] - Field[1, var]
arry = []
while True:
arry.append(num % 2)
num = num // 2
if num == 0:
break
bi = np.array(arry[::-1])
zero_need = int(lengthVars[var] - len(bi))
bi_full = np.concatenate(([0] * zero_need, bi))
Chrom_i += list(bi_full.astype(int))
Chrom[i] = np.array(Chrom_i)
Chrom = Chrom.astype(int)
return Chrom
class PsyPopulation(ea.Population):
"""
PsyPopulation : class - Polychromosomal Population class (Popysomy Population)
Description:
The class Popysomy Population is a class used to store information about populations that contain multiple chromosomes per individual.
This class is similar to the population class Population, except that it can contain multiple chromosomes and therefore supports complex mixed coding.
Attributes:
sizes : int - The population size, i.e. the number of individuals in the population.
ChromNum : int - The number of chromosomes, i.e. how many chromosomes are present in each individual.
Encodings : list - The list that stores the encoding method of each chromosome.
Fields : list - A list of the corresponding decoding matrices for each chromosome.
Chroms : list - stores the list of chromosome matrices for each population.
Linds : list - List of chromosome lengths of the population.
ObjV : array - Matrix of population objective function values, each row corresponds to an individual objective function value, each column corresponds to an objective.
FitnV : array - The vector of individual fitnesses of the population, each element corresponds to the fitness of an individual, and the minimum fitness is 0.
CV : array - CV (Constraint Violation Value) is a matrix used to quantitatively describe the degree of constraint violation, each row corresponds to an individual and each column corresponds to a constraint.
Note: When no constraints are set, CV is set to None.
Phen : array - the population expression matrix (i.e., the matrix composed of the decision variables represented by the chromosomes after decoding).
Function :
See the source code for details.
"""
def __init__(self, Encodings, Fields, NIND, Chroms=None, ObjV=None, FitnV=None, CV=None, Phen=None):
"""
Description: Constructor for the population class, used to instantiate the population object, e.g.
import geatpy as ea
population = ea.PsyPopulation(Encodings, Fields, NIND), the
NIND is the number of individuals needed.
At this point the population is not really initialized, it is only the instantiation of the population object.
The constructor must be passed in Chroms to complete the real initialization of the population.
At first, you can pass only Encodings, Fields and NIND to complete the instantiation of the population object.
Other properties can be assigned later by calculation.
"""
if type(NIND) is int and NIND >= 0:
self.sizes = NIND
else:
raise RuntimeError(
'error in PysPopulation: Size error.')
self.ChromNum = len(Encodings)
if self.ChromNum == 1:
raise RuntimeError(
'error in PysPopulation: ChromNum must be bigger than 1.')
self.Encodings = Encodings
self.Fields = Fields.copy()
self.Chroms = [None] * self.ChromNum
self.Linds = []
if Chroms is None:
self.Linds = [0] * self.ChromNum
else:
for i in range(self.ChromNum):
if Chroms[i] is not None:
self.Linds.append(Chroms[i].shape[1])
self.Chroms[i] = Chroms[i].copy(
) if Chroms[i] is not None else None
else:
self.Linds.append(0)
self.ObjV = ObjV.copy() if ObjV is not None else None
self.FitnV = FitnV.copy() if FitnV is not None else None
self.CV = CV.copy() if CV is not None else None
self.Phen = Phen.copy() if Phen is not None else None
def initChrom(self, NIND=None):
"""
Description: Initializes the population chromosome matrix with NIND as the desired number of individuals.
NIND can be defaulted, if not, the population will be resized to NIND before initializing the chromosome matrix.
"""
if NIND is not None:
self.sizes = NIND
for i in range(self.ChromNum):
self.Chroms[i] = ea.crtpc(
self.Encodings[i], self.sizes, self.Fields[i])
self.Linds.append(self.Chroms[i].shape[1])
self.ObjV = None
self.FitnV = np.ones((self.sizes, 1))
self.CV = None
def warmup_Chroms(self, individual, NIND):
"""
Description: Initializes the population chromosome matrix with NIND as the desired number of individuals.
NIND can be defaulted, if not, the population will be resized to NIND before initializing the chromosome matrix.
"""
individual = np.array(individual)
if individual.ndim == 1:
Phen = np.expand_dims(individual, 0).repeat(
NIND, axis=0)
elif individual.ndim == 2 and individual.shape[0] == NIND:
Phen = individual
else:
print("The number of individuals does not correspond to the set population size, please check the dimension of individual")
idx = 0
for i in range(self.ChromNum):
self.Chroms[i] = self.dec2bi(
Phen[:, idx: idx+self.Fields[i].shape[1]], self.Fields[i])
self.Linds.append(self.Chroms[i].shape[1])
idx += self.Fields[i].shape[1]
self.sizes = NIND
self.ObjV = None
self.FitnV = None
self.CV = None
def bi2dec(self, Chrom, Field):
lengthVars = np.cumsum(Field[0])
posVars = np.concatenate(([0], lengthVars[:-1]))
numVar = Field.shape[1]
temporary = np.zeros((Chrom.shape[0], numVar))
for i in range(Chrom.shape[0]):
phen_i = np.zeros(numVar)
for var in range(numVar):
total = 0
var_s = int(posVars[var])
var_e = int(posVars[var] + Field[0][var])
for j in range(var_s, var_e):
total += Chrom[i][j] * int((math.pow(2, var_e - j - 1)))
if total + Field[1, var] <= Field[2, var]:
phen_i[var] = total + Field[1, var]
else:
phen_i[var] = Field[2, var]
temporary[i] = phen_i
temporary = temporary.astype(int)
return temporary
def dec2bi(self, Phen, Field):
lengthVars = Field[0]
posVars = np.concatenate(
([0], np.cumsum(lengthVars)[:-1]))
pop_size = Phen.shape[0]
var_num = Phen.shape[1]
Chrom = np.zeros((pop_size, int(np.sum(lengthVars))))
for i in range(pop_size):
Chrom_i = []
for var in range(var_num):
num = Phen[i][var] - Field[1, var]
arry = []
while True:
arry.append(num % 2)
num = num // 2
if num == 0:
break
bi = np.array(arry[::-1])
zero_need = int(lengthVars[var] - len(bi))
bi_full = np.concatenate(([0] * zero_need, bi))
Chrom_i += list(bi_full.astype(int))
Chrom[i] = np.array(Chrom_i)
Chrom = Chrom.astype(int)
return Chrom
def decoding(self):
"""
Description: Population chromosome decoding.
"""
Phen = np.ones((self.sizes, 0))
for i in range(self.ChromNum):
if self.Encodings[i] == 'BG':
tempPhen = self.bi2dec(self.Chroms[i], self.Fields[i])
# tempPhen = ea.bs2ri(
elif self.Encodings[i] == 'RI' or self.Encodings[i] == 'P':
tempPhen = self.Chroms[i].copy()
else:
raise RuntimeError(
'error in PsyPopulation.decoding: Encoding must be ''BG'' or ''RI'' or ''P''.')
Phen = np.hstack([Phen, tempPhen])
return Phen
def copy(self):
"""
copy : function - duplication of populations
Usage:
Suppose pop is a population matrix, then: pop1 = pop.copy() completes the copy of pop population.
"""
return PsyPopulation(self.Encodings,
self.Fields,
self.sizes,
self.Chroms,
self.ObjV,
self.FitnV,
self.CV,
self.Phen)
def __getitem__(self, index):
"""
Description: Slicing of a population, i.e., selecting the corresponding individuals in the population according to the index subscript vector to form a new population.
Usage: Suppose pop is a population matrix containing more than 2 individuals, then.
pop1 = pop[[0,1]] to obtain the population consisting of the 1st and 2nd individuals of the pop population.
Note: The legality of index is not checked here.
"""
NewChroms = []
for i in range(self.ChromNum):
if self.Chroms[i] is None:
raise RuntimeError(
'error in PsyPopulation: Chrom[i] is None.')
NewChroms.append(self.Chroms[i][index])
NIND = NewChroms[0].shape[0]
return PsyPopulation(self.Encodings,
self.Fields,
NIND,
NewChroms,
self.ObjV[index] if self.ObjV is not None else None,
self.FitnV[index] if self.FitnV is not None else None,
self.CV[index] if self.CV is not None else None,
self.Phen[index] if self.Phen is not None else None)
def shuffle(self):
"""
shuffle : function - Shuffle the order of individuals in a population
Usage: Assuming pop is a population matrix, then pop.shuffle() can be used to shuffle the order of individuals in the pop population.
"""
shuff = np.argsort(np.random.rand(self.sizes))
for i in range(self.ChromNum):
if self.Chroms[i] is None:
raise RuntimeError(
'error in PsyPopulation: Chrom[i] is None. ')
self.Chroms[i] = self.Chroms[i][shuff, :]
self.ObjV = self.ObjV[shuff, :] if self.ObjV is not None else None
self.FitnV = self.FitnV[shuff] if self.FitnV is not None else None
self.CV = self.CV[shuff, :] if self.CV is not None else None
self.Phen = self.Phen[shuff, :] if self.Phen is not None else None
def __setitem__(self, index, pop):
"""
Description: Assignment of population individuals
Usage: Suppose pop is a population matrix with more than 2 individuals, and pop1 is another population matrix with 2 individuals, then
pop[[0,1]] = pop1 to assign the first and second individuals of pop population to the individuals of pop1 population.
Note: index must be a row vector of type Numpy array, this function does not check the legitimacy of the index passed in.
In addition, the function does not actively reset the fitness of the population after replacing individuals.
If the fitness of all individuals needs to be re-evaluated due to individual replacement, the fitness of the population needs to be updated by handwritten code.
"""
for i in range(self.ChromNum):
if self.Encodings[i] != pop.Encodings[i]:
raise RuntimeError(
'error in PsyPopulation: Encoding disagree.')
if np.all(self.Fields[i] == pop.Fields[i]) == False:
raise RuntimeError(
'error in PsyPopulation: Field disagree. ')
if self.Chroms[i] is None:
raise RuntimeError(
'error in PsyPopulation: Chrom[i] is None.')
self.Chroms[i][index] = pop.Chroms[i]
if (self.ObjV is None) ^ (pop.ObjV is None):
raise RuntimeError(
'error in PsyPopulation: ObjV disagree. ')
if (self.FitnV is None) ^ (pop.FitnV is None):
raise RuntimeError(
'error in PsyPopulation: FitnV disagree.')
if (self.CV is None) ^ (pop.CV is None):
raise RuntimeError(
'error in PsyPopulation: CV disagree. ')
if (self.Phen is None) ^ (pop.Phen is None):
raise RuntimeError(
'error in PsyPopulation: Phen disagree.')
if self.ObjV is not None:
self.ObjV[index] = pop.ObjV
if self.FitnV is not None:
self.FitnV[index] = pop.FitnV
if self.CV is not None:
self.CV[index] = pop.CV
if self.Phen is not None:
self.Phen[index] = pop.Phen
self.sizes = self.Phen.shape[0]
def __add__(self, pop):
"""
Description: Merge populations of individuals
Usage: Suppose pop1, pop2 are two populations, their number of individuals can be equal or unequal, then
pop = pop1 + pop2 to merge the individuals of pop1 and pop2 populations.
Note that.
This function does not actively reset the fitness after performing a population merge.
If you need to re-evaluate the fitness of all individuals due to population merging, you need to update the fitness of the population by handwriting the code.
"""
NIND = self.sizes + pop.sizes
NewChroms = self.Chroms
for i in range(self.ChromNum):
if self.Encodings[i] != pop.Encodings[i]:
raise RuntimeError(
'error in PsyPopulation: Encoding disagree. ')
if np.all(self.Fields[i] == pop.Fields[i]) == False:
raise RuntimeError(
'error in PsyPopulation: Field disagree.')
if self.Chroms[i] is None or pop.Chroms[i] is None:
raise RuntimeError(
'error in PsyPopulation: Chrom is None.')
NewChroms[i] = np.vstack([NewChroms[i], pop.Chroms[i]])
if (self.ObjV is None) ^ (pop.ObjV is None):
raise RuntimeError(
'error in PsyPopulation: ObjV disagree.')
if (self.CV is None) ^ (pop.CV is None):
raise RuntimeError(
'error in PsyPopulation: CV disagree. ')
if (self.Phen is None) ^ (pop.Phen is None):
raise RuntimeError(
'error in PsyPopulation: Phen disagree.')
return PsyPopulation(self.Encodings,
self.Fields,
NIND,
NewChroms,
np.vstack([self.ObjV, pop.ObjV]
) if self.ObjV is not None else None,
np.vstack(
[self.FitnV, pop.FitnV]) if self.FitnV is not None and pop.FitnV is not None else None,
np.vstack([self.CV, pop.CV]
) if self.CV is not None else None,
np.vstack([self.Phen, pop.Phen]) if self.Phen is not None else None)
def __len__(self):
"""
Description: Calculate the population size
Usage: Suppose pop is a population, then len(pop) gives the number of individuals in the population.
In fact, the population size can also be obtained from pop.sizes.
"""
return self.sizes
def save(self):
"""
Description: Saves the information about the population to a file.
This function will save the information of the population in the "Result" folder, where.
"Encodingsi.txt" holds the chromosome code of the population, i is 0,1,2,3... i is 0,1,2,3...
"Fieldsi.csv" holds the decoding matrix of the population chromosomes, i is 0,1,2,3...; "Fieldsi.csv" holds the decoding matrix of the population chromosomes, i is 0,1,2,3... ;.
"Chromsi.csv" holds the chromosome matrix of the population, i is 0,1,2,3... ;.
"ObjV.csv" holds the matrix of the population's objective function.
"FitnV.csv" holds the fitness column vectors of the population individuals.
"CV.csv" holds the matrix of constraint violations of the population individuals.
"Phen.csv" holds the population chromosome phenotype matrix.
Note: this function does not check the legality of the population.
"""
if os.path.exists('Result') == False:
os.makedirs('Result')
for i in range(self.ChromNum):
with open('Result/Encodings' + str(i) + '.txt', 'w') as file:
file.write(str(self.Encodings[i]))
file.close()
np.savetxt('Result/Fields' + str(i) + '.csv',
self.Fields[i], delimiter=',')
np.savetxt('Result/Chroms' + str(i) + '.csv',
self.Chroms[i], delimiter=',')
if self.ObjV is not None:
np.savetxt('Result/ObjV.csv', self.ObjV, delimiter=',')
if self.FitnV is not None:
np.savetxt('Result/FitnV.csv', self.FitnV, delimiter=',')
if self.CV is not None:
np.savetxt('Result/CV.csv', self.CV, delimiter=',')
if self.Phen is not None:
np.savetxt('Result/Phen.csv', self.Phen, delimiter=',')
class Algorithm:
"""
Algorithm : class - Algorithm template top-level parent class
Description:
The Algorithm Settings class is a class used to store information related to the setting of parameters for running the algorithm.
Attribute:
name : str - the name of the algorithm (the name can be set freely).
problem : class <Problem> - object of the problem class.
MAXGEN : int - The maximum number of evolutionary generations.
currentGen : int - The number of generations of the current evolution.
MAXTIME : float - time limit (in seconds).
timeSlot : float - Timestamp (in seconds).
passTime : float - Used time (in seconds).
MAXEVALS : int - The maximum number of evaluations.
evalsNum : int - The current number of evaluations.
MAXSIZE : int - The maximum number of optimal solutions.
population : class <Population> - Population object.
drawing : int - parameter for the drawing method.
0 means no drawing.
1 means draw the result, and
2 means drawing the target space dynamics in real time, and
3 means draw the decision space dynamics in real time.
Function:
terminated() : calculate whether to terminate the evolution, the specific function needs to be implemented in the inherited class i.e. algorithm template.
run() : execute function, need to be implemented in the inherited class, i.e. algorithm template.
check() : Used to check if the data of ObjV and CV of the population object are wrong.
call_aimFunc() : Used to call aimFunc() in the problem class to compute ObjV and CV (if there are constraints).
"""
def __init__(self):
self.name = 'Algorithm'
self.problem = None
self.MAXGEN = None
self.currentGen = None
self.MAXTIME = None
self.timeSlot = None
self.passTime = None
self.MAXEVALS = None
self.evalsNum = None
self.MAXSIZE = None
self.population = None
self.drawing = None
def terminated(self):
pass
def run(self):
pass
def check(self, pop):
"""
Used to check the data of ObjV and CV of the population object for errors.
"""
if np.any(np.isnan(pop.ObjV)):
warnings.warn(
"Warning: Some elements of ObjV are NAN, please check the calculation of ObjV.",
RuntimeWarning)
elif np.any(np.isinf(pop.ObjV)):
warnings.warn(
"Warning: Some elements of ObjV are Inf, please check the calculation of ObjV.",
RuntimeWarning)
if pop.CV is not None:
if np.any(np.isnan(pop.CV)):
warnings.warn(
"Warning: Some elements of CV are NAN, please check the calculation of CV.",
RuntimeWarning)
elif np.any(np.isinf(pop.CV)):
warnings.warn(
"Warning: Some elements of CV are Inf, please check the calculation of CV.",
RuntimeWarning)
def call_aimFunc(self, pop):
"""
Note on use:
The target function called by this function is shaped like: aimFunc(pop), (implemented in the custom problem class).
where pop is an object of the population class, representing a population.
The Phen property of the pop object (i.e., the phenotype of the population chromosome) is equivalent to a matrix consisting of decision variables for all individuals of the population.
The function computes the matrix consisting of the objective function values of all individuals of the population based on this Phen and assigns it to the ObjV attribute of the pop object.
If there is a constraint, it is assigned to the CV attribute of the pop object after calculating the constraint violation matrix CV (see Geatpy data structure for details).
The function does not return any return value, and the value of the objective function is stored in the ObjV property of the pop object.
The constraint violation matrix is stored in the CV attribute of the population object.
For example: population is a population object, then call_aimFunc(population) to complete the calculation of the objective function value.
After that, you can get the obtained objective function value by population.ObjV and the constraint violation degree matrix by population.CV.
If the above specification is not met, please modify the algorithm template or customize a new one.
"""
pop.Phen = pop.decoding()
if self.problem is None:
raise RuntimeError(
'error: problem has not been initialized.')
self.problem.aimFunc(pop)
self.evalsNum = self.evalsNum + \
pop.sizes if self.evalsNum is not None else pop.sizes
if type(pop.ObjV) != np.ndarray or pop.ObjV.ndim != 2 or pop.ObjV.shape[0] != pop.sizes or pop.ObjV.shape[
1] != self.problem.M:
raise RuntimeError(
'error: ObjV is illegal. ')
if pop.CV is not None:
if type(pop.CV) != np.ndarray or pop.CV.ndim != 2 or pop.CV.shape[0] != pop.sizes:
raise RuntimeError(
'error: CV is illegal.')
class soea_SEGA_templet(ea.SoeaAlgorithm):
"""
soea_SEGA_templet : class - Strengthen Elitist GA templet (Enhanced Genetic Algorithm Template for Elite Retention)
Algorithm Description:
This template implements the genetic algorithm for enhanced elite retention. The algorithm flow is as follows.
1) Initialize a population of N individuals according to the coding rules.
2) Stop if the stopping condition is satisfied, otherwise continue the execution.
3) Statistical analysis is performed on the current population, such as recording its optimal individuals, average fitness, etc.
4) Independently select N females from the current population.
5) Independently perform crossover operations on these N females.
6) Independently mutate these N crossed individuals.
7) Merge the parent population and the crossover variant to obtain a population of size 2N.
8) Select N individuals from the merged population according to the selection algorithm to obtain the new generation population.
9) Return to step 2.
It is advisable to set a large crossover and mutation probability for this algorithm, otherwise the new generation population generated will have more and more duplicate individuals.
"""
def __init__(self, problem, population):
ea.SoeaAlgorithm.__init__(self, problem, population)
if population.ChromNum != 1:
raise RuntimeError(
'The incoming population object must be a single chromosome population type.')
self.name = 'SEGA'
self.selFunc = 'tour'
self.MAXGEN = problem.MAXGEN
if population.Encoding == 'P':
self.recOper = ea.Xovpmx(XOVR=0.7)
self.mutOper = ea.Mutinv(Pm=0.5)
else:
self.recOper = ea.Xovdp(XOVR=0.7)
if population.Encoding == 'BG':
self.mutOper = ea.Mutbin(Pm=None)
elif population.Encoding == 'RI':
self.mutOper = ea.Mutbga(
Pm=1 / self.problem.Dim, MutShrink=0.5, Gradient=20)
else:
raise RuntimeError(
' The encoding method must be ''BG'', ''RI'' or ''P''...')
def run(self, individual, prophetPop=None):
# ==========================Initial Configuration===========================
population = self.population
NIND = population.sizes
self.initialization()
# ===========================Prepare============================
population.warmup_Chrom(individual, NIND)
self.call_aimFunc(population)
if prophetPop is not None:
population = (prophetPop + population)[:NIND]
population.FitnV = ea.scaling(
population.ObjV, population.CV, self.problem.maxormins)
# ===========================Start to evolve============================
while self.terminated(population) == False:
offspring = population[ea.selecting(
self.selFunc, population.FitnV, NIND)]
offspring.Chrom = self.recOper.do(offspring.Chrom)
offspring.Chrom = self.mutOper.do(
offspring.Encoding, offspring.Chrom, offspring.Field)
self.call_aimFunc(offspring)
population = population + offspring
population.FitnV = ea.scaling(
population.ObjV, population.CV, self.problem.maxormins)
population = population[ea.selecting(
'dup', population.FitnV, NIND)]
return self.finishing(population)
class MoeaAlgorithm(Algorithm):
"""
Description:
This is the parent class of the multi-objective evolutionary optimization algorithm template, from which all multi-objective optimization algorithm templates are inherited.
In order to make the algorithm also good at solving constrained optimization problems, this algorithm template is slightly modified by adding a "forgetting strategy".
When there are no feasible individuals in a generation, the evolutionary recorder ignores this generation and does not record the individuals in this generation, but does not affect the evolution.
"""
def __init__(self, problem, population):
Algorithm.__init__(self)
self.problem = problem
self.population = population
self.drawing = 0
self.ax = None
self.forgetCount = None
self.maxForgetCount = None
self.pop_trace = None
def initialization(self):
"""
Description: This function is used to initialize the parameters of the algorithm template before its evolution.
This function needs to be called at the beginning of the execution of the run() method of the algorithm template, while starting the timer, to ensure that all these parameters are initialized correctly.
to ensure that all these parameters are initialized correctly.
"""
self.ax = None
self.passTime = 0
self.forgetCount = 0
self.maxForgetCount = 100000
self.pop_trace = []
self.currentGen = 0 # Set initial to generation 0
self.evalsNum = 0
self.timeSlot = time.time()
def stat(self, population):
feasible = np.where(np.all(population.CV <= 0, 1))[0] if population.CV is not None else np.array(
range(population.sizes))
if len(feasible) > 0:
tempPop = population[feasible]
self.pop_trace.append(tempPop)
self.forgetCount = 0
self.passTime += time.time() - self.timeSlot
if self.drawing == 2:
self.ax = ea.moeaplot(
tempPop.ObjV, 'objective values', False, self.ax, self.currentGen, gridFlag=True)
elif self.drawing == 3:
self.ax = ea.varplot(tempPop.Phen, 'decision variables', False, self.ax, self.currentGen,
gridFlag=False)
self.timeSlot = time.time()
else:
self.currentGen -= 1
self.forgetCount += 1
def terminated(self, population):
"""
Description:
This function is used to determine if evolution should be terminated, population is the incoming population.
"""
self.check(population)
self.stat(population)
if self.currentGen + 1 >= self.MAXGEN or self.forgetCount >= self.maxForgetCount:
return True
else:
self.currentGen += 1
return False
def finishing(self, population):
"""
The function to call when the evolution is complete.
"""
[levels, criLevel] = ea.ndsortDED(
population.ObjV, None, 1, population.CV, self.problem.maxormins)
NDSet = population[np.where(levels == 1)[0]]
if NDSet.CV is not None:
NDSet = NDSet[np.where(np.all(NDSet.CV <= 0, 1))[0]]
self.passTime += time.time() - self.timeSlot
# if self.drawing != 0:
# if NDSet.ObjV.shape[1] == 2 or NDSet.ObjV.shape[1] == 3:
# ea.moeaplot(NDSet.ObjV, 'Pareto Front', saveFlag=True, gridFlag=True)
# else:
# ea.moeaplot(NDSet.ObjV, 'Value Path', saveFlag=True, gridFlag=False)
return NDSet
class SoeaAlgorithm(Algorithm):
"""
Description:
This is the parent class of the single-objective evolutionary optimization algorithm template, from which all single-objective optimization algorithm templates are inherited.
In order to make the algorithm also good at solving constrained optimization problems, this algorithm template is slightly modified by adding a "forgetting strategy".
When there are no feasible individuals in a generation, the evolutionary recorder ignores this generation and does not record the individuals in this generation, but does not affect the evolution.。
"""
def __init__(self, problem, population):
Algorithm.__init__(self)
self.problem = problem
self.population = population
self.drawing = 0
self.forgetCount = None
self.maxForgetCount = 100000
self.trappedCount = 0
self.trappedValue = 0
self.maxTrappedCount = 10000000
self.preObjV = np.nan
self.ax = None
def initialization(self):
"""
Description: This function is used to initialize some dynamic parameters of the algorithm template before its evolution.
This function needs to be called at the beginning of the execution of the run() method of the algorithm template, while starting the timer, to ensure that all these parameters are initialized correctly.
to ensure that all these parameters are initialized correctly.
"""
self.ax = None
self.passTime = 0
self.forgetCount = 0
self.preObjV = np.nan
self.trappedCount = 0
self.obj_trace = np.zeros((self.MAXGEN, 2)) * \
np.nan # Define an objective function value recorder with an initial value of nan
# Define variable recorder to record decision variable values, initial value is nan
''' save space
self.var_trace = np.zeros((self.MAXGEN, self.problem.Dim)) * np.nan
'''
self.currentGen = 0
self.evalsNum = 0
self.timeSlot = time.time()
def stat(self, population):
feasible = np.where(np.all(population.CV <= 0, 1))[0] if population.CV is not None else np.array(
range(population.sizes))
if len(feasible) > 0:
tempPop = population[feasible]
bestIdx = np.argmax(tempPop.FitnV)
self.obj_trace[self.currentGen, 0] = np.sum(
tempPop.ObjV) / tempPop.sizes
self.obj_trace[self.currentGen,
1] = tempPop.ObjV[bestIdx]
'''save space
self.var_trace[self.currentGen,
:] = tempPop.Phen[bestIdx, :]
'''
self.forgetCount = 0
if np.abs(self.preObjV - self.obj_trace[self.currentGen, 1]) < self.trappedValue:
self.trappedCount += 1
else:
self.trappedCount = 0
self.passTime += time.time() - self.timeSlot
if self.drawing == 2:
self.ax = ea.soeaplot(self.obj_trace[:, [1]], Label='Objective Value', saveFlag=False, ax=self.ax,
gen=self.currentGen, gridFlag=False)
elif self.drawing == 3:
self.ax = ea.varplot(tempPop.Phen, Label='decision variables', saveFlag=False, ax=self.ax,
gen=self.currentGen, gridFlag=False)
self.timeSlot = time.time()
else:
self.currentGen -= 1
self.forgetCount += 1
def terminated(self, population):
"""
Description:
This function is used to determine if evolution should be terminated, population is the incoming population.
"""
self.check(population)
self.stat(population)
if self.currentGen + 1 >= self.MAXGEN or self.forgetCount >= self.maxForgetCount or self.trappedCount >= self.maxTrappedCount:
return True
else:
self.preObjV = self.obj_trace[self.currentGen, 1]
self.currentGen += 1
return False
def finishing(self, population):
"""
The function to call when the evolution is complete.
"""
delIdx = np.where(np.isnan(self.obj_trace))[0]
self.obj_trace = np.delete(self.obj_trace, delIdx, 0)
self.var_trace = np.delete(self.var_trace, delIdx, 0)
if self.obj_trace.shape[0] == 0:
raise RuntimeError(
'error: No feasible solution.')
self.passTime += time.time() - self.timeSlot
return [population, self.obj_trace, self.var_trace]
def save_profit(self):
# delIdx = np.where(np.isnan(self.obj_trace))[0]
# self.obj_trace = np.delete(self.obj_trace, delIdx, 0)
# self.var_trace = np.delete(self.var_trace, delIdx, 0)
# if self.obj_trace.shape[0] == 0:
# raise RuntimeError('error: No feasible solution.')
return self.obj_trace
class soea_psy_SEGA_templet(ea.SoeaAlgorithm):
"""
soea_psy_SEGA_templet : class - Polysomy Strengthen Elitist GA templet (Enhanced Elitist Retained Multichromosome Genetic Algorithm Template)
Template description:
This template is a polysomal version of the built-in algorithm template soea_SEGA_templet.
Therefore, the population objects inside are objects of the PsyPopulation class, which supports mixed coding of multicromosomal populations.
Algorithm description:
This template implements the genetic algorithm with enhanced elite retention. The algorithm flow is as follows.
1) Initialize a population of N individuals according to the encoding rules.
2) Stop if the stopping condition is satisfied, otherwise continue the execution.
3) Statistical analysis is performed on the current population, such as recording its optimal individuals, average fitness, etc.
4) Independently select N females from the current population.
5) Independently perform crossover operations on these N females.
6) Independently mutate these N crossed individuals.
7) Merge the parent population and the crossover variant to obtain a population of size 2N.
8) Select N individuals from the merged population according to the selection algorithm to obtain the new generation population.
9) Return to step 2.
It is advisable to set a large crossover and mutation probability for this algorithm, otherwise the new generation population generated will have more and more duplicate individuals.
"""
def __init__(self, problem, population, XOVR):
ea.SoeaAlgorithm.__init__(self, problem, population)
if population.ChromNum == 1:
raise RuntimeError('The incoming population object must be a multichromosomal population type.')
self.name = 'psy-SEGA'
self.selFunc = 'tour'
self.MAXGEN = problem.MAXGEN
self.recOpers = []
self.mutOpers = []
for i in range(population.ChromNum):
if population.Encodings[i] == 'P':
recOper = ea.Xovpmx(XOVR=XOVR)
mutOper = ea.Mutinv(Pm=0.5)
else:
recOper = ea.Xovdp(XOVR=XOVR)
if population.Encodings[i] == 'BG':
mutOper = ea.Mutbin(Pm=None)
elif population.Encodings[i] == 'RI':
mutOper = ea.Mutbga(
Pm=1 / self.problem.Dim, MutShrink=0.5, Gradient=20)
else:
raise RuntimeError('The encoding method must be ''BG'', ''RI'' or ''P''.')
self.recOpers.append(recOper)
self.mutOpers.append(mutOper)
def save_policy(self, population):
bestIdx = np.argmax(population.ObjV)
bestIdiv = population.Phen[bestIdx]
raw_policy = bestIdiv
''' Standardized policy'''
policy = np.zeros_like(raw_policy)
action_num = len(self.problem.vars_dict.keys())
raw_policy = raw_policy.reshape((action_num, -1), order='C')
Idx = 0
for d in range(self.problem.plant_periods):
for i, tup in enumerate(self.problem.vars_dict.items()):
dims = tup[1][2]
multi = tup[1][1]
policy[Idx:Idx+dims] = raw_policy[i,
d*dims: (d+1)*dims] * multi
Idx += dims
X_best_sofar_path = self.problem.X_best_sofar_path.replace(
".mat", "iter@%d.mat" % self.currentGen)
scio.savemat(X_best_sofar_path,
{'policy': list(policy)},
do_compression=True)
def run(self, individual, prophetPop=None):
# ==========================Initial Configuration===========================
population = self.population
NIND = population.sizes
self.initialization()
# ===========================Prepare============================
population.warmup_Chroms(individual, NIND)
self.call_aimFunc(population)
if prophetPop is not None:
population = (prophetPop + population)[:NIND]
population.FitnV = ea.scaling(
population.ObjV, population.CV, self.problem.maxormins)
# ===========================Start to evolve============================
while self.terminated(population) == False:
offspring = population[ea.selecting(
self.selFunc, population.FitnV, NIND)]
for i in range(population.ChromNum):
offspring.Chroms[i] = self.recOpers[i].do(
offspring.Chroms[i])
offspring.Chroms[i] = self.mutOpers[i].do(offspring.Encodings[i], offspring.Chroms[i],
offspring.Fields[i])
self.call_aimFunc(offspring)
population = population + offspring
population.FitnV = ea.scaling(
population.ObjV, population.CV, self.problem.maxormins)
population = population[ea.selecting(
'dup', population.FitnV, NIND)]
if self.currentGen % self.problem.save_interval == 0:
self.save_policy(population)
return population
| [
"geatpy.Xovpmx",
"numpy.random.rand",
"pandas.read_csv",
"numpy.hstack",
"numpy.array",
"geatpy.SoeaAlgorithm.__init__",
"geatpy.Mutinv",
"os.path.exists",
"numpy.where",
"numpy.delete",
"numpy.vstack",
"numpy.concatenate",
"warnings.warn",
"numpy.isinf",
"numpy.abs",
"numpy.ones",
"... | [((332, 351), 'numpy.array', 'np.array', (['maxormins'], {}), '(maxormins)\n', (340, 351), True, 'import numpy as np\n'), ((399, 417), 'numpy.array', 'np.array', (['varTypes'], {}), '(varTypes)\n', (407, 417), True, 'import numpy as np\n'), ((440, 458), 'numpy.array', 'np.array', (['[lb, ub]'], {}), '([lb, ub])\n', (448, 458), True, 'import numpy as np\n'), ((482, 504), 'numpy.array', 'np.array', (['[lbin, ubin]'], {}), '([lbin, ubin])\n', (490, 504), True, 'import numpy as np\n'), ((7321, 7368), 'geatpy.crtpc', 'ea.crtpc', (['self.Encoding', 'self.sizes', 'self.Field'], {}), '(self.Encoding, self.sizes, self.Field)\n', (7329, 7368), True, 'import geatpy as ea\n'), ((18703, 18746), 'os.path.exists', 'os.path.exists', (['"""GA_Search/Result/ObjV.csv"""'], {}), "('GA_Search/Result/ObjV.csv')\n", (18717, 18746), False, 'import os\n'), ((18893, 18937), 'os.path.exists', 'os.path.exists', (['"""GA_Search/Result/FitnV.csv"""'], {}), "('GA_Search/Result/FitnV.csv')\n", (18907, 18937), False, 'import os\n'), ((19085, 19126), 'os.path.exists', 'os.path.exists', (['"""GA_Search/Result/CV.csv"""'], {}), "('GA_Search/Result/CV.csv')\n", (19099, 19126), False, 'import os\n'), ((19264, 19307), 'os.path.exists', 'os.path.exists', (['"""GA_Search/Result/Phen.csv"""'], {}), "('GA_Search/Result/Phen.csv')\n", (19278, 19307), False, 'import os\n'), ((19782, 19802), 'numpy.array', 'np.array', (['individual'], {}), '(individual)\n', (19790, 19802), True, 'import numpy as np\n'), ((20419, 20438), 'numpy.cumsum', 'np.cumsum', (['Field[0]'], {}), '(Field[0])\n', (20428, 20438), True, 'import numpy as np\n'), ((20457, 20495), 'numpy.concatenate', 'np.concatenate', (['([0], lengthVars[:-1])'], {}), '(([0], lengthVars[:-1]))\n', (20471, 20495), True, 'import numpy as np\n'), ((20548, 20582), 'numpy.zeros', 'np.zeros', (['(Chrom.shape[0], numVar)'], {}), '((Chrom.shape[0], numVar))\n', (20556, 20582), True, 'import numpy as np\n'), ((26530, 26554), 'numpy.ones', 'np.ones', (['(self.sizes, 1)'], {}), '((self.sizes, 1))\n', (26537, 26554), True, 'import numpy as np\n'), ((26907, 26927), 'numpy.array', 'np.array', (['individual'], {}), '(individual)\n', (26915, 26927), True, 'import numpy as np\n'), ((27726, 27745), 'numpy.cumsum', 'np.cumsum', (['Field[0]'], {}), '(Field[0])\n', (27735, 27745), True, 'import numpy as np\n'), ((27764, 27802), 'numpy.concatenate', 'np.concatenate', (['([0], lengthVars[:-1])'], {}), '(([0], lengthVars[:-1]))\n', (27778, 27802), True, 'import numpy as np\n'), ((27855, 27889), 'numpy.zeros', 'np.zeros', (['(Chrom.shape[0], numVar)'], {}), '((Chrom.shape[0], numVar))\n', (27863, 27889), True, 'import numpy as np\n'), ((29595, 29619), 'numpy.ones', 'np.ones', (['(self.sizes, 0)'], {}), '((self.sizes, 0))\n', (29602, 29619), True, 'import numpy as np\n'), ((46437, 46489), 'geatpy.SoeaAlgorithm.__init__', 'ea.SoeaAlgorithm.__init__', (['self', 'problem', 'population'], {}), '(self, problem, population)\n', (46462, 46489), True, 'import geatpy as ea\n'), ((47844, 47910), 'geatpy.scaling', 'ea.scaling', (['population.ObjV', 'population.CV', 'self.problem.maxormins'], {}), '(population.ObjV, population.CV, self.problem.maxormins)\n', (47854, 47910), True, 'import geatpy as ea\n'), ((50325, 50336), 'time.time', 'time.time', ([], {}), '()\n', (50334, 50336), False, 'import time\n'), ((51821, 51898), 'geatpy.ndsortDED', 'ea.ndsortDED', (['population.ObjV', 'None', '(1)', 'population.CV', 'self.problem.maxormins'], {}), '(population.ObjV, None, 1, population.CV, self.problem.maxormins)\n', (51833, 51898), True, 'import geatpy as ea\n'), ((54502, 54513), 'time.time', 'time.time', ([], {}), '()\n', (54511, 54513), False, 'import time\n'), ((56768, 56804), 'numpy.delete', 'np.delete', (['self.obj_trace', 'delIdx', '(0)'], {}), '(self.obj_trace, delIdx, 0)\n', (56777, 56804), True, 'import numpy as np\n'), ((56830, 56866), 'numpy.delete', 'np.delete', (['self.var_trace', 'delIdx', '(0)'], {}), '(self.var_trace, delIdx, 0)\n', (56839, 56866), True, 'import numpy as np\n'), ((59057, 59109), 'geatpy.SoeaAlgorithm.__init__', 'ea.SoeaAlgorithm.__init__', (['self', 'problem', 'population'], {}), '(self, problem, population)\n', (59082, 59109), True, 'import geatpy as ea\n'), ((60190, 60216), 'numpy.argmax', 'np.argmax', (['population.ObjV'], {}), '(population.ObjV)\n', (60199, 60216), True, 'import numpy as np\n'), ((60344, 60369), 'numpy.zeros_like', 'np.zeros_like', (['raw_policy'], {}), '(raw_policy)\n', (60357, 60369), True, 'import numpy as np\n'), ((61630, 61696), 'geatpy.scaling', 'ea.scaling', (['population.ObjV', 'population.CV', 'self.problem.maxormins'], {}), '(population.ObjV, population.CV, self.problem.maxormins)\n', (61640, 61696), True, 'import geatpy as ea\n'), ((1869, 1900), 'os.path.exists', 'os.path.exists', (['"""referenceObjV"""'], {}), "('referenceObjV')\n", (1883, 1900), False, 'import os\n'), ((1923, 1951), 'os.makedirs', 'os.makedirs', (['"""referenceObjV"""'], {}), "('referenceObjV')\n", (1934, 1951), False, 'import os\n'), ((9762, 9777), 'numpy.array', 'np.array', (['index'], {}), '(index)\n', (9770, 9777), True, 'import numpy as np\n'), ((10684, 10710), 'numpy.random.rand', 'np.random.rand', (['self.sizes'], {}), '(self.sizes)\n', (10698, 10710), True, 'import numpy as np\n'), ((14633, 14667), 'numpy.vstack', 'np.vstack', (['[self.Chrom, pop.Chrom]'], {}), '([self.Chrom, pop.Chrom])\n', (14642, 14667), True, 'import numpy as np\n'), ((18659, 18675), 'numpy.array', 'np.array', (['Chroms'], {}), '(Chroms)\n', (18667, 18675), True, 'import numpy as np\n'), ((18767, 18820), 'pandas.read_csv', 'pd.read_csv', (['"""GA_Search/Result/ObjV.csv"""'], {'header': 'None'}), "('GA_Search/Result/ObjV.csv', header=None)\n", (18778, 18820), True, 'import pandas as pd\n'), ((18851, 18865), 'numpy.array', 'np.array', (['ObjV'], {}), '(ObjV)\n', (18859, 18865), True, 'import numpy as np\n'), ((18959, 19013), 'pandas.read_csv', 'pd.read_csv', (['"""GA_Search/Result/FitnV.csv"""'], {'header': 'None'}), "('GA_Search/Result/FitnV.csv', header=None)\n", (18970, 19013), True, 'import pandas as pd\n'), ((19045, 19060), 'numpy.array', 'np.array', (['FitnV'], {}), '(FitnV)\n', (19053, 19060), True, 'import numpy as np\n'), ((19145, 19196), 'pandas.read_csv', 'pd.read_csv', (['"""GA_Search/Result/CV.csv"""'], {'header': 'None'}), "('GA_Search/Result/CV.csv', header=None)\n", (19156, 19196), True, 'import pandas as pd\n'), ((19225, 19237), 'numpy.array', 'np.array', (['CV'], {}), '(CV)\n', (19233, 19237), True, 'import numpy as np\n'), ((19328, 19381), 'pandas.read_csv', 'pd.read_csv', (['"""GA_Search/Result/Phen.csv"""'], {'header': 'None'}), "('GA_Search/Result/Phen.csv', header=None)\n", (19339, 19381), True, 'import pandas as pd\n'), ((19412, 19426), 'numpy.array', 'np.array', (['Phen'], {}), '(Phen)\n', (19420, 19426), True, 'import numpy as np\n'), ((20644, 20660), 'numpy.zeros', 'np.zeros', (['numVar'], {}), '(numVar)\n', (20652, 20660), True, 'import numpy as np\n'), ((22094, 22111), 'numpy.array', 'np.array', (['Chrom_i'], {}), '(Chrom_i)\n', (22102, 22111), True, 'import numpy as np\n'), ((26356, 26411), 'geatpy.crtpc', 'ea.crtpc', (['self.Encodings[i]', 'self.sizes', 'self.Fields[i]'], {}), '(self.Encodings[i], self.sizes, self.Fields[i])\n', (26364, 26411), True, 'import geatpy as ea\n'), ((27951, 27967), 'numpy.zeros', 'np.zeros', (['numVar'], {}), '(numVar)\n', (27959, 27967), True, 'import numpy as np\n'), ((29401, 29418), 'numpy.array', 'np.array', (['Chrom_i'], {}), '(Chrom_i)\n', (29409, 29418), True, 'import numpy as np\n'), ((30107, 30134), 'numpy.hstack', 'np.hstack', (['[Phen, tempPhen]'], {}), '([Phen, tempPhen])\n', (30116, 30134), True, 'import numpy as np\n'), ((32293, 32319), 'numpy.random.rand', 'np.random.rand', (['self.sizes'], {}), '(self.sizes)\n', (32307, 32319), True, 'import numpy as np\n'), ((36343, 36383), 'numpy.vstack', 'np.vstack', (['[NewChroms[i], pop.Chroms[i]]'], {}), '([NewChroms[i], pop.Chroms[i]])\n', (36352, 36383), True, 'import numpy as np\n'), ((38814, 38838), 'os.path.exists', 'os.path.exists', (['"""Result"""'], {}), "('Result')\n", (38828, 38838), False, 'import os\n'), ((38861, 38882), 'os.makedirs', 'os.makedirs', (['"""Result"""'], {}), "('Result')\n", (38872, 38882), False, 'import os\n'), ((39346, 39401), 'numpy.savetxt', 'np.savetxt', (['"""Result/ObjV.csv"""', 'self.ObjV'], {'delimiter': '""","""'}), "('Result/ObjV.csv', self.ObjV, delimiter=',')\n", (39356, 39401), True, 'import numpy as np\n'), ((39449, 39506), 'numpy.savetxt', 'np.savetxt', (['"""Result/FitnV.csv"""', 'self.FitnV'], {'delimiter': '""","""'}), "('Result/FitnV.csv', self.FitnV, delimiter=',')\n", (39459, 39506), True, 'import numpy as np\n'), ((39551, 39602), 'numpy.savetxt', 'np.savetxt', (['"""Result/CV.csv"""', 'self.CV'], {'delimiter': '""","""'}), "('Result/CV.csv', self.CV, delimiter=',')\n", (39561, 39602), True, 'import numpy as np\n'), ((39649, 39704), 'numpy.savetxt', 'np.savetxt', (['"""Result/Phen.csv"""', 'self.Phen'], {'delimiter': '""","""'}), "('Result/Phen.csv', self.Phen, delimiter=',')\n", (39659, 39704), True, 'import numpy as np\n'), ((42050, 42068), 'numpy.isnan', 'np.isnan', (['pop.ObjV'], {}), '(pop.ObjV)\n', (42058, 42068), True, 'import numpy as np\n'), ((42083, 42203), 'warnings.warn', 'warnings.warn', (['"""Warning: Some elements of ObjV are NAN, please check the calculation of ObjV."""', 'RuntimeWarning'], {}), "(\n 'Warning: Some elements of ObjV are NAN, please check the calculation of ObjV.'\n , RuntimeWarning)\n", (42096, 42203), False, 'import warnings\n'), ((46814, 46833), 'geatpy.Xovpmx', 'ea.Xovpmx', ([], {'XOVR': '(0.7)'}), '(XOVR=0.7)\n', (46823, 46833), True, 'import geatpy as ea\n'), ((46861, 46878), 'geatpy.Mutinv', 'ea.Mutinv', ([], {'Pm': '(0.5)'}), '(Pm=0.5)\n', (46870, 46878), True, 'import geatpy as ea\n'), ((46920, 46938), 'geatpy.Xovdp', 'ea.Xovdp', ([], {'XOVR': '(0.7)'}), '(XOVR=0.7)\n', (46928, 46938), True, 'import geatpy as ea\n'), ((48467, 48533), 'geatpy.scaling', 'ea.scaling', (['population.ObjV', 'population.CV', 'self.problem.maxormins'], {}), '(population.ObjV, population.CV, self.problem.maxormins)\n', (48477, 48533), True, 'import geatpy as ea\n'), ((51119, 51130), 'time.time', 'time.time', ([], {}), '()\n', (51128, 51130), False, 'import time\n'), ((52088, 52099), 'time.time', 'time.time', ([], {}), '()\n', (52097, 52099), False, 'import time\n'), ((54095, 54121), 'numpy.zeros', 'np.zeros', (['(self.MAXGEN, 2)'], {}), '((self.MAXGEN, 2))\n', (54103, 54121), True, 'import numpy as np\n'), ((54786, 54810), 'numpy.argmax', 'np.argmax', (['tempPop.FitnV'], {}), '(tempPop.FitnV)\n', (54795, 54810), True, 'import numpy as np\n'), ((55907, 55918), 'time.time', 'time.time', ([], {}), '()\n', (55916, 55918), False, 'import time\n'), ((57013, 57024), 'time.time', 'time.time', ([], {}), '()\n', (57022, 57024), False, 'import time\n'), ((62400, 62466), 'geatpy.scaling', 'ea.scaling', (['population.ObjV', 'population.CV', 'self.problem.maxormins'], {}), '(population.ObjV, population.CV, self.problem.maxormins)\n', (62410, 62466), True, 'import geatpy as ea\n'), ((12282, 12313), 'numpy.all', 'np.all', (['(self.Field == pop.Field)'], {}), '(self.Field == pop.Field)\n', (12288, 12313), True, 'import numpy as np\n'), ((14472, 14503), 'numpy.all', 'np.all', (['(self.Field == pop.Field)'], {}), '(self.Field == pop.Field)\n', (14478, 14503), True, 'import numpy as np\n'), ((15294, 15326), 'numpy.vstack', 'np.vstack', (['[self.ObjV, pop.ObjV]'], {}), '([self.ObjV, pop.ObjV])\n', (15303, 15326), True, 'import numpy as np\n'), ((15426, 15460), 'numpy.vstack', 'np.vstack', (['[self.FitnV, pop.FitnV]'], {}), '([self.FitnV, pop.FitnV])\n', (15435, 15460), True, 'import numpy as np\n'), ((15581, 15609), 'numpy.vstack', 'np.vstack', (['[self.CV, pop.CV]'], {}), '([self.CV, pop.CV])\n', (15590, 15609), True, 'import numpy as np\n'), ((15707, 15739), 'numpy.vstack', 'np.vstack', (['[self.Phen, pop.Phen]'], {}), '([self.Phen, pop.Phen])\n', (15716, 15739), True, 'import numpy as np\n'), ((21874, 21894), 'numpy.array', 'np.array', (['arry[::-1]'], {}), '(arry[::-1])\n', (21882, 21894), True, 'import numpy as np\n'), ((21980, 22017), 'numpy.concatenate', 'np.concatenate', (['([0] * zero_need, bi)'], {}), '(([0] * zero_need, bi))\n', (21994, 22017), True, 'import numpy as np\n'), ((29181, 29201), 'numpy.array', 'np.array', (['arry[::-1]'], {}), '(arry[::-1])\n', (29189, 29201), True, 'import numpy as np\n'), ((29287, 29324), 'numpy.concatenate', 'np.concatenate', (['([0] * zero_need, bi)'], {}), '(([0] * zero_need, bi))\n', (29301, 29324), True, 'import numpy as np\n'), ((33883, 33922), 'numpy.all', 'np.all', (['(self.Fields[i] == pop.Fields[i])'], {}), '(self.Fields[i] == pop.Fields[i])\n', (33889, 33922), True, 'import numpy as np\n'), ((36005, 36044), 'numpy.all', 'np.all', (['(self.Fields[i] == pop.Fields[i])'], {}), '(self.Fields[i] == pop.Fields[i])\n', (36011, 36044), True, 'import numpy as np\n'), ((36999, 37031), 'numpy.vstack', 'np.vstack', (['[self.ObjV, pop.ObjV]'], {}), '([self.ObjV, pop.ObjV])\n', (37008, 37031), True, 'import numpy as np\n'), ((37137, 37171), 'numpy.vstack', 'np.vstack', (['[self.FitnV, pop.FitnV]'], {}), '([self.FitnV, pop.FitnV])\n', (37146, 37171), True, 'import numpy as np\n'), ((37298, 37326), 'numpy.vstack', 'np.vstack', (['[self.CV, pop.CV]'], {}), '([self.CV, pop.CV])\n', (37307, 37326), True, 'import numpy as np\n'), ((37430, 37462), 'numpy.vstack', 'np.vstack', (['[self.Phen, pop.Phen]'], {}), '([self.Phen, pop.Phen])\n', (37439, 37462), True, 'import numpy as np\n'), ((42247, 42265), 'numpy.isinf', 'np.isinf', (['pop.ObjV'], {}), '(pop.ObjV)\n', (42255, 42265), True, 'import numpy as np\n'), ((42280, 42400), 'warnings.warn', 'warnings.warn', (['"""Warning: Some elements of ObjV are Inf, please check the calculation of ObjV."""', 'RuntimeWarning'], {}), "(\n 'Warning: Some elements of ObjV are Inf, please check the calculation of ObjV.'\n , RuntimeWarning)\n", (42293, 42400), False, 'import warnings\n'), ((42477, 42493), 'numpy.isnan', 'np.isnan', (['pop.CV'], {}), '(pop.CV)\n', (42485, 42493), True, 'import numpy as np\n'), ((42512, 42628), 'warnings.warn', 'warnings.warn', (['"""Warning: Some elements of CV are NAN, please check the calculation of CV."""', 'RuntimeWarning'], {}), "(\n 'Warning: Some elements of CV are NAN, please check the calculation of CV.'\n , RuntimeWarning)\n", (42525, 42628), False, 'import warnings\n'), ((47015, 47033), 'geatpy.Mutbin', 'ea.Mutbin', ([], {'Pm': 'None'}), '(Pm=None)\n', (47024, 47033), True, 'import geatpy as ea\n'), ((48093, 48143), 'geatpy.selecting', 'ea.selecting', (['self.selFunc', 'population.FitnV', 'NIND'], {}), '(self.selFunc, population.FitnV, NIND)\n', (48105, 48143), True, 'import geatpy as ea\n'), ((48588, 48631), 'geatpy.selecting', 'ea.selecting', (['"""dup"""', 'population.FitnV', 'NIND'], {}), "('dup', population.FitnV, NIND)\n", (48600, 48631), True, 'import geatpy as ea\n'), ((50691, 50702), 'time.time', 'time.time', ([], {}), '()\n', (50700, 50702), False, 'import time\n'), ((50780, 50878), 'geatpy.moeaplot', 'ea.moeaplot', (['tempPop.ObjV', '"""objective values"""', '(False)', 'self.ax', 'self.currentGen'], {'gridFlag': '(True)'}), "(tempPop.ObjV, 'objective values', False, self.ax, self.\n currentGen, gridFlag=True)\n", (50791, 50878), True, 'import geatpy as ea\n'), ((51939, 51960), 'numpy.where', 'np.where', (['(levels == 1)'], {}), '(levels == 1)\n', (51947, 51960), True, 'import numpy as np\n'), ((54860, 54880), 'numpy.sum', 'np.sum', (['tempPop.ObjV'], {}), '(tempPop.ObjV)\n', (54866, 54880), True, 'import numpy as np\n'), ((55207, 55264), 'numpy.abs', 'np.abs', (['(self.preObjV - self.obj_trace[self.currentGen, 1])'], {}), '(self.preObjV - self.obj_trace[self.currentGen, 1])\n', (55213, 55264), True, 'import numpy as np\n'), ((55410, 55421), 'time.time', 'time.time', ([], {}), '()\n', (55419, 55421), False, 'import time\n'), ((55498, 55627), 'geatpy.soeaplot', 'ea.soeaplot', (['self.obj_trace[:, [1]]'], {'Label': '"""Objective Value"""', 'saveFlag': '(False)', 'ax': 'self.ax', 'gen': 'self.currentGen', 'gridFlag': '(False)'}), "(self.obj_trace[:, [1]], Label='Objective Value', saveFlag=False,\n ax=self.ax, gen=self.currentGen, gridFlag=False)\n", (55509, 55627), True, 'import geatpy as ea\n'), ((56714, 56738), 'numpy.isnan', 'np.isnan', (['self.obj_trace'], {}), '(self.obj_trace)\n', (56722, 56738), True, 'import numpy as np\n'), ((59527, 59547), 'geatpy.Xovpmx', 'ea.Xovpmx', ([], {'XOVR': 'XOVR'}), '(XOVR=XOVR)\n', (59536, 59547), True, 'import geatpy as ea\n'), ((59574, 59591), 'geatpy.Mutinv', 'ea.Mutinv', ([], {'Pm': '(0.5)'}), '(Pm=0.5)\n', (59583, 59591), True, 'import geatpy as ea\n'), ((59636, 59655), 'geatpy.Xovdp', 'ea.Xovdp', ([], {'XOVR': 'XOVR'}), '(XOVR=XOVR)\n', (59644, 59655), True, 'import geatpy as ea\n'), ((61879, 61929), 'geatpy.selecting', 'ea.selecting', (['self.selFunc', 'population.FitnV', 'NIND'], {}), '(self.selFunc, population.FitnV, NIND)\n', (61891, 61929), True, 'import geatpy as ea\n'), ((62521, 62564), 'geatpy.selecting', 'ea.selecting', (['"""dup"""', 'population.FitnV', 'NIND'], {}), "('dup', population.FitnV, NIND)\n", (62533, 62564), True, 'import geatpy as ea\n'), ((9847, 9866), 'numpy.sum', 'np.sum', (['index_array'], {}), '(index_array)\n', (9853, 9866), True, 'import numpy as np\n'), ((19855, 19884), 'numpy.expand_dims', 'np.expand_dims', (['individual', '(0)'], {}), '(individual, 0)\n', (19869, 19884), True, 'import numpy as np\n'), ((21355, 21376), 'numpy.cumsum', 'np.cumsum', (['lengthVars'], {}), '(lengthVars)\n', (21364, 21376), True, 'import numpy as np\n'), ((21489, 21507), 'numpy.sum', 'np.sum', (['lengthVars'], {}), '(lengthVars)\n', (21495, 21507), True, 'import numpy as np\n'), ((26980, 27009), 'numpy.expand_dims', 'np.expand_dims', (['individual', '(0)'], {}), '(individual, 0)\n', (26994, 27009), True, 'import numpy as np\n'), ((28662, 28683), 'numpy.cumsum', 'np.cumsum', (['lengthVars'], {}), '(lengthVars)\n', (28671, 28683), True, 'import numpy as np\n'), ((28796, 28814), 'numpy.sum', 'np.sum', (['lengthVars'], {}), '(lengthVars)\n', (28802, 28814), True, 'import numpy as np\n'), ((42684, 42700), 'numpy.isinf', 'np.isinf', (['pop.CV'], {}), '(pop.CV)\n', (42692, 42700), True, 'import numpy as np\n'), ((42719, 42835), 'warnings.warn', 'warnings.warn', (['"""Warning: Some elements of CV are Inf, please check the calculation of CV."""', 'RuntimeWarning'], {}), "(\n 'Warning: Some elements of CV are Inf, please check the calculation of CV.'\n , RuntimeWarning)\n", (42732, 42835), False, 'import warnings\n'), ((47111, 47173), 'geatpy.Mutbga', 'ea.Mutbga', ([], {'Pm': '(1 / self.problem.Dim)', 'MutShrink': '(0.5)', 'Gradient': '(20)'}), '(Pm=1 / self.problem.Dim, MutShrink=0.5, Gradient=20)\n', (47120, 47173), True, 'import geatpy as ea\n'), ((50398, 50427), 'numpy.all', 'np.all', (['(population.CV <= 0)', '(1)'], {}), '(population.CV <= 0, 1)\n', (50404, 50427), True, 'import numpy as np\n'), ((50958, 51058), 'geatpy.varplot', 'ea.varplot', (['tempPop.Phen', '"""decision variables"""', '(False)', 'self.ax', 'self.currentGen'], {'gridFlag': '(False)'}), "(tempPop.Phen, 'decision variables', False, self.ax, self.\n currentGen, gridFlag=False)\n", (50968, 51058), True, 'import geatpy as ea\n'), ((54576, 54605), 'numpy.all', 'np.all', (['(population.CV <= 0)', '(1)'], {}), '(population.CV <= 0, 1)\n', (54582, 54605), True, 'import numpy as np\n'), ((55724, 55846), 'geatpy.varplot', 'ea.varplot', (['tempPop.Phen'], {'Label': '"""decision variables"""', 'saveFlag': '(False)', 'ax': 'self.ax', 'gen': 'self.currentGen', 'gridFlag': '(False)'}), "(tempPop.Phen, Label='decision variables', saveFlag=False, ax=\n self.ax, gen=self.currentGen, gridFlag=False)\n", (55734, 55846), True, 'import geatpy as ea\n'), ((59739, 59757), 'geatpy.Mutbin', 'ea.Mutbin', ([], {'Pm': 'None'}), '(Pm=None)\n', (59748, 59757), True, 'import geatpy as ea\n'), ((52033, 52057), 'numpy.all', 'np.all', (['(NDSet.CV <= 0)', '(1)'], {}), '(NDSet.CV <= 0, 1)\n', (52039, 52057), True, 'import numpy as np\n'), ((59843, 59905), 'geatpy.Mutbga', 'ea.Mutbga', ([], {'Pm': '(1 / self.problem.Dim)', 'MutShrink': '(0.5)', 'Gradient': '(20)'}), '(Pm=1 / self.problem.Dim, MutShrink=0.5, Gradient=20)\n', (59852, 59905), True, 'import geatpy as ea\n'), ((20919, 20945), 'math.pow', 'math.pow', (['(2)', '(var_e - j - 1)'], {}), '(2, var_e - j - 1)\n', (20927, 20945), False, 'import math\n'), ((28226, 28252), 'math.pow', 'math.pow', (['(2)', '(var_e - j - 1)'], {}), '(2, var_e - j - 1)\n', (28234, 28252), False, 'import math\n')] |
import unittest
import sys
sys.path.insert(0,'..')
import numpy as np
from parampy import Parameters
from qubricks import Operator
from qubricks.wall import SpinBasis, SimpleBasis
class TestBasis(unittest.TestCase):
def setUp(self):
self.b = SpinBasis(dim=2**3)
def test_properties(self):
self.assertEqual(self.b.dim, 8)
self.assertIsInstance(self.b.operator, Operator)
self.assertEqual(len(self.b.states()), 2**3)
def test_symbolic(self):
self.skipTest("Symbolic functions are not yet fully baked.")
def test_transform(self):
self.assertEqual(self.b.transform([1,0,0,0,0,0,0,0]).tolist(),[1,0,0,0,0,0,0,0])
def test_repr(self):
self.assertEqual(self.b.state_fromString("|uuu>").tolist(), [1,0,0,0,0,0,0,0])
self.assertEqual(self.b.state_fromString("|ddd>").tolist(), [0,0,0,0,0,0,0,1])
self.assertEqual(self.b.state_fromString("|uuu>+|ddd>").tolist(), [1.,0,0,0,0,0,0,1.])
self.assertEqual(self.b.state_toString([1,0,0,1,0,0,0,0]),"|uuu>+|udd>")
self.assertEqual(self.b.state_toString([1,0,0,0,0,0,0,0]),"|uuu>")
self.assertEqual(self.b.state_toString([0,0,0,0,0,0,0,1]),"|ddd>")
def test_info(self):
self.assertEqual(self.b.state_info([1,0,0,0,0,0,0,0]),{'spin':1.5})
class TestOperatorTransform(unittest.TestCase):
def setUp(self):
self.p = Parameters()
self.basis_z = SimpleBasis(parameters=self.p,operator=[[1,0],[0,1]])
self.basis_x = SimpleBasis(parameters=self.p,operator=np.sqrt(2)*np.array([[1,1],[1,-1]]))
def test_auto_basis_transformation(self):
op1 = Operator([[1,0],[0,1]], basis=self.basis_z, parameters=self.p)
op2 = Operator([[0,1],[1,0]], basis=self.basis_x, parameters=self.p)
self.assertTrue( np.all(np.array([[2,0],[0,0]]) == (op1+op2)()) )
| [
"sys.path.insert",
"qubricks.wall.SimpleBasis",
"numpy.sqrt",
"qubricks.wall.SpinBasis",
"qubricks.Operator",
"parampy.Parameters",
"numpy.array"
] | [((27, 51), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (42, 51), False, 'import sys\n'), ((249, 270), 'qubricks.wall.SpinBasis', 'SpinBasis', ([], {'dim': '(2 ** 3)'}), '(dim=2 ** 3)\n', (258, 270), False, 'from qubricks.wall import SpinBasis, SimpleBasis\n'), ((1291, 1303), 'parampy.Parameters', 'Parameters', ([], {}), '()\n', (1301, 1303), False, 'from parampy import Parameters\n'), ((1321, 1378), 'qubricks.wall.SimpleBasis', 'SimpleBasis', ([], {'parameters': 'self.p', 'operator': '[[1, 0], [0, 1]]'}), '(parameters=self.p, operator=[[1, 0], [0, 1]])\n', (1332, 1378), False, 'from qubricks.wall import SpinBasis, SimpleBasis\n'), ((1520, 1585), 'qubricks.Operator', 'Operator', (['[[1, 0], [0, 1]]'], {'basis': 'self.basis_z', 'parameters': 'self.p'}), '([[1, 0], [0, 1]], basis=self.basis_z, parameters=self.p)\n', (1528, 1585), False, 'from qubricks import Operator\n'), ((1591, 1656), 'qubricks.Operator', 'Operator', (['[[0, 1], [1, 0]]'], {'basis': 'self.basis_x', 'parameters': 'self.p'}), '([[0, 1], [1, 0]], basis=self.basis_x, parameters=self.p)\n', (1599, 1656), False, 'from qubricks import Operator\n'), ((1431, 1441), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1438, 1441), True, 'import numpy as np\n'), ((1442, 1469), 'numpy.array', 'np.array', (['[[1, 1], [1, -1]]'], {}), '([[1, 1], [1, -1]])\n', (1450, 1469), True, 'import numpy as np\n'), ((1681, 1707), 'numpy.array', 'np.array', (['[[2, 0], [0, 0]]'], {}), '([[2, 0], [0, 0]])\n', (1689, 1707), True, 'import numpy as np\n')] |
import numpy as np
from rubin_sim.photUtils import Bandpass
__all__ = ["getImsimFluxNorm"]
def getImsimFluxNorm(sed, magmatch):
"""
Calculate the flux normalization of an SED in the imsim bandpass.
Parameters
-----------
sed is the SED to be normalized
magmatch is the desired magnitude in the imsim bandpass
Returns
--------
The factor by which the flux of sed needs to be multiplied to achieve
the desired magnitude.
"""
# This method works based on the assumption that the imsim bandpass
# is a delta function. If that ever ceases to be true, the unit test
# testSedUtils.py, which checks that the results of this method are
# identical to calling Sed.calcFluxNorm and passing in the imsim bandpass,
# will fail and we will know to modify this method.
if not hasattr(getImsimFluxNorm, 'imsim_wavelen'):
bp = Bandpass()
bp.imsimBandpass()
non_zero_dex = np.where(bp.sb > 0.0)[0][0]
getImsimFluxNorm.imsim_wavelen = bp.wavelen[non_zero_dex]
if sed.fnu is None:
sed.flambdaTofnu()
if (getImsimFluxNorm.imsim_wavelen < sed.wavelen.min() or
getImsimFluxNorm.imsim_wavelen > sed.wavelen.max()):
raise RuntimeError("Cannot normalize sed "
"at wavelength of %e nm\n" % getImsimFluxNorm.imsim_wavelen
+ "The SED does not cover that wavelength\n"
+ "(Covers %e < lambda %e)" % (sed.wavelen.min(), sed.wavelen.max()))
mag = -2.5*np.log10(np.interp(getImsimFluxNorm.imsim_wavelen, sed.wavelen, sed.fnu)) - sed.zp
dmag = magmatch - mag
return np.power(10, (-0.4*dmag))
| [
"numpy.where",
"rubin_sim.photUtils.Bandpass",
"numpy.interp",
"numpy.power"
] | [((1672, 1697), 'numpy.power', 'np.power', (['(10)', '(-0.4 * dmag)'], {}), '(10, -0.4 * dmag)\n', (1680, 1697), True, 'import numpy as np\n'), ((897, 907), 'rubin_sim.photUtils.Bandpass', 'Bandpass', ([], {}), '()\n', (905, 907), False, 'from rubin_sim.photUtils import Bandpass\n'), ((958, 979), 'numpy.where', 'np.where', (['(bp.sb > 0.0)'], {}), '(bp.sb > 0.0)\n', (966, 979), True, 'import numpy as np\n'), ((1561, 1624), 'numpy.interp', 'np.interp', (['getImsimFluxNorm.imsim_wavelen', 'sed.wavelen', 'sed.fnu'], {}), '(getImsimFluxNorm.imsim_wavelen, sed.wavelen, sed.fnu)\n', (1570, 1624), True, 'import numpy as np\n')] |
from abc import ABC
from pathlib import Path
from collections import defaultdict
import random
import numpy as np
from enum import Enum
import torch
from torch.utils.data import Dataset, DataLoader
import MinkowskiEngine as ME
from plyfile import PlyData
import lib.transforms as t
from lib.dataloader import InfSampler
from lib.voxelizer import Voxelizer
class DatasetPhase(Enum):
Train = 0
Val = 1
Val2 = 2
TrainVal = 3
Test = 4
def datasetphase_2str(arg):
if arg == DatasetPhase.Train:
return 'train'
elif arg == DatasetPhase.Val:
return 'val'
elif arg == DatasetPhase.Val2:
return 'val2'
elif arg == DatasetPhase.TrainVal:
return 'trainval'
elif arg == DatasetPhase.Test:
return 'test'
else:
raise ValueError('phase must be one of dataset enum.')
def str2datasetphase_type(arg):
if arg.upper() == 'TRAIN':
return DatasetPhase.Train
elif arg.upper() == 'VAL':
return DatasetPhase.Val
elif arg.upper() == 'VAL2':
return DatasetPhase.Val2
elif arg.upper() == 'TRAINVAL':
return DatasetPhase.TrainVal
elif arg.upper() == 'TEST':
return DatasetPhase.Test
else:
raise ValueError('phase must be one of train/val/test')
def cache(func):
def wrapper(self, *args, **kwargs):
# Assume that args[0] is index
index = args[0]
if self.cache:
if index not in self.cache_dict[func.__name__]:
results = func(self, *args, **kwargs)
self.cache_dict[func.__name__][index] = results
return self.cache_dict[func.__name__][index]
else:
return func(self, *args, **kwargs)
return wrapper
class DictDataset(Dataset, ABC):
IS_FULL_POINTCLOUD_EVAL = False
def __init__(self,
data_paths,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
cache=False,
data_root='/'):
"""
data_paths: list of lists, [[str_path_to_input, str_path_to_label], [...]]
"""
Dataset.__init__(self)
# Allows easier path concatenation
if not isinstance(data_root, Path):
data_root = Path(data_root)
self.data_root = data_root
self.data_paths = sorted(data_paths)
self.prevoxel_transform = prevoxel_transform
self.input_transform = input_transform
self.target_transform = target_transform
# dictionary of input
self.data_loader_dict = {
'input': (self.load_input, self.input_transform),
'target': (self.load_target, self.target_transform)
}
# For large dataset, do not cache
self.cache = cache
self.cache_dict = defaultdict(dict)
self.loading_key_order = ['input', 'target']
def load_input(self, index):
raise NotImplementedError
def load_target(self, index):
raise NotImplementedError
def get_classnames(self):
pass
def reorder_result(self, result):
return result
def __getitem__(self, index):
out_array = []
for k in self.loading_key_order:
loader, transformer = self.data_loader_dict[k]
v = loader(index)
if transformer:
v = transformer(v)
out_array.append(v)
return out_array
def __len__(self):
return len(self.data_paths)
class VoxelizationDatasetBase(DictDataset, ABC):
IS_TEMPORAL = False
CLIP_BOUND = (-1000, -1000, -1000, 1000, 1000, 1000)
ROTATION_AXIS = None
NUM_IN_CHANNEL = None
NUM_LABELS = -1 # Number of labels in the dataset, including all ignore classes
IGNORE_LABELS = None # labels that are not evaluated
def __init__(self,
data_paths,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
cache=False,
data_root='/',
ignore_mask=255,
return_transformation=False,
**kwargs):
"""
ignore_mask: label value for ignore class. It will not be used as a class in the loss or evaluation.
"""
DictDataset.__init__(
self,
data_paths,
prevoxel_transform=prevoxel_transform,
input_transform=input_transform,
target_transform=target_transform,
cache=cache,
data_root=data_root)
self.ignore_mask = ignore_mask
self.return_transformation = return_transformation
def __getitem__(self, index):
raise NotImplementedError
def load_ply(self, index):
filepath = self.data_root / self.data_paths[index]
plydata = PlyData.read(filepath)
data = plydata.elements[0].data
coords = np.array([data['x'], data['y'], data['z']], dtype=np.float32).T
feats = np.array([data['red'], data['green'], data['blue']], dtype=np.float32).T
labels = np.array(data['label'], dtype=np.int32)
return coords, feats, labels, None
def __len__(self):
num_data = len(self.data_paths)
return num_data
class VoxelizationDataset(VoxelizationDatasetBase):
"""This dataset loads RGB point clouds and their labels as a list of points
and voxelizes the pointcloud with sufficient data augmentation.
"""
# Voxelization arguments
VOXEL_SIZE = 0.05 # 5cm
# Coordinate Augmentation Arguments: Unlike feature augmentation, coordinate
# augmentation has to be done before voxelization
SCALE_AUGMENTATION_BOUND = (0.9, 1.1)
ROTATION_AUGMENTATION_BOUND = ((-np.pi / 6, np.pi / 6), (-np.pi, np.pi), (-np.pi / 6, np.pi / 6))
TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.05, 0.05), (-0.2, 0.2))
ELASTIC_DISTORT_PARAMS = None
# MISC.
PREVOXELIZATION_VOXEL_SIZE = None
# Augment coords to feats
AUGMENT_COORDS_TO_FEATS = False
def __init__(self,
data_paths,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
data_root='/',
ignore_label=255,
return_transformation=False,
augment_data=False,
config=None,
**kwargs):
self.augment_data = augment_data
self.config = config
VoxelizationDatasetBase.__init__(
self,
data_paths,
prevoxel_transform=prevoxel_transform,
input_transform=input_transform,
target_transform=target_transform,
cache=cache,
data_root=data_root,
ignore_mask=ignore_label,
return_transformation=return_transformation)
# Prevoxel transformations
self.voxelizer = Voxelizer(
voxel_size=self.VOXEL_SIZE,
clip_bound=self.CLIP_BOUND,
use_augmentation=augment_data,
scale_augmentation_bound=self.SCALE_AUGMENTATION_BOUND,
rotation_augmentation_bound=self.ROTATION_AUGMENTATION_BOUND,
translation_augmentation_ratio_bound=self.TRANSLATION_AUGMENTATION_RATIO_BOUND,
ignore_label=ignore_label)
# map labels not evaluated to ignore_label
label_map = {}
n_used = 0
for l in range(self.NUM_LABELS):
if l in self.IGNORE_LABELS:
label_map[l] = self.ignore_mask
else:
label_map[l] = n_used
n_used += 1
label_map[self.ignore_mask] = self.ignore_mask
self.label_map = label_map
self.NUM_LABELS -= len(self.IGNORE_LABELS)
def _augment_coords_to_feats(self, coords, feats, labels=None):
norm_coords = coords - coords.mean(0)
# color must come first.
if isinstance(coords, np.ndarray):
feats = np.concatenate((feats, norm_coords), 1)
else:
feats = torch.cat((feats, norm_coords), 1)
return coords, feats, labels
def convert_mat2cfl(self, mat):
# Generally, xyz,rgb,label
return mat[:, :3], mat[:, 3:-1], mat[:, -1]
def __getitem__(self, index):
coords, feats, labels, center = self.load_ply(index)
# Downsample the pointcloud with finer voxel size before transformation for memory and speed
if self.PREVOXELIZATION_VOXEL_SIZE is not None:
inds = ME.utils.sparse_quantize(
coords / self.PREVOXELIZATION_VOXEL_SIZE, return_index=True)
coords = coords[inds]
feats = feats[inds]
labels = labels[inds]
# Prevoxel transformations
if self.prevoxel_transform is not None:
coords, feats, labels = self.prevoxel_transform(coords, feats, labels)
coords, feats, labels, transformation = self.voxelizer.voxelize(
coords, feats, labels, center=center)
# map labels not used for evaluation to ignore_label
if self.input_transform is not None:
coords, feats, labels = self.input_transform(coords, feats, labels)
if self.target_transform is not None:
coords, feats, labels = self.target_transform(coords, feats, labels)
if self.IGNORE_LABELS is not None:
labels = np.array([self.label_map[x] for x in labels], dtype=np.int)
# Use coordinate features if config is set
if self.AUGMENT_COORDS_TO_FEATS:
coords, feats, labels = self._augment_coords_to_feats(coords, feats, labels)
return_args = [coords, feats, labels]
if self.return_transformation:
return_args.append(transformation.astype(np.float32))
return tuple(return_args)
class TemporalVoxelizationDataset(VoxelizationDataset):
IS_TEMPORAL = True
def __init__(self,
data_paths,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
data_root='/',
ignore_label=255,
temporal_dilation=1,
temporal_numseq=3,
return_transformation=False,
augment_data=False,
config=None,
**kwargs):
VoxelizationDataset.__init__(
self,
data_paths,
prevoxel_transform=prevoxel_transform,
input_transform=input_transform,
target_transform=target_transform,
data_root=data_root,
ignore_label=ignore_label,
return_transformation=return_transformation,
augment_data=augment_data,
config=config,
**kwargs)
self.temporal_dilation = temporal_dilation
self.temporal_numseq = temporal_numseq
temporal_window = temporal_dilation * (temporal_numseq - 1) + 1
self.numels = [len(p) - temporal_window + 1 for p in self.data_paths]
if any([numel <= 0 for numel in self.numels]):
raise ValueError('Your temporal window configuration is too wide for '
'this dataset. Please change the configuration.')
def load_world_pointcloud(self, filename):
raise NotImplementedError
def __getitem__(self, index):
for seq_idx, numel in enumerate(self.numels):
if index >= numel:
index -= numel
else:
break
numseq = self.temporal_numseq
if self.augment_data and self.config.temporal_rand_numseq:
numseq = random.randrange(1, self.temporal_numseq + 1)
dilations = [self.temporal_dilation for i in range(numseq - 1)]
if self.augment_data and self.config.temporal_rand_dilation:
dilations = [random.randrange(1, self.temporal_dilation + 1) for i in range(numseq - 1)]
files = [self.data_paths[seq_idx][index + sum(dilations[:i])] for i in range(numseq)]
world_pointclouds = [self.load_world_pointcloud(f) for f in files]
ptcs, centers = zip(*world_pointclouds)
# Downsample pointcloud for speed and memory
if self.PREVOXELIZATION_VOXEL_SIZE is not None:
new_ptcs = []
for ptc in ptcs:
inds = ME.utils.sparse_quantize(
ptc[:, :3] / self.PREVOXELIZATION_VOXEL_SIZE, return_index=True)
new_ptcs.append(ptc[inds])
ptcs = new_ptcs
# Apply prevoxel transformations
ptcs = [self.prevoxel_transform(ptc) for ptc in ptcs]
coords, feats, labels = zip(*ptcs)
outs = self.voxelizer.voxelize_temporal(
coords, feats, labels, centers=centers, return_transformation=self.return_transformation)
if self.return_transformation:
coords_t, feats_t, labels_t, transformation_t = outs
else:
coords_t, feats_t, labels_t = outs
joint_coords = np.vstack([
np.hstack((coords, np.ones((coords.shape[0], 1)) * i)) for i, coords in enumerate(coords_t)
])
joint_feats = np.vstack(feats_t)
joint_labels = np.hstack(labels_t)
# map labels not used for evaluation to ignore_label
if self.input_transform is not None:
joint_coords, joint_feats, joint_labels = self.input_transform(joint_coords, joint_feats,
joint_labels)
if self.target_transform is not None:
joint_coords, joint_feats, joint_labels = self.target_transform(joint_coords, joint_feats,
joint_labels)
if self.IGNORE_LABELS is not None:
joint_labels = np.array([self.label_map[x] for x in joint_labels], dtype=np.int)
return_args = [joint_coords, joint_feats, joint_labels]
if self.return_transformation:
pointclouds = np.vstack([
np.hstack((pointcloud[0][:, :6], np.ones((pointcloud[0].shape[0], 1)) * i))
for i, pointcloud in enumerate(world_pointclouds)
])
transformations = np.vstack(
[np.hstack((transformation, [i])) for i, transformation in enumerate(transformation_t)])
return_args.extend([pointclouds.astype(np.float32), transformations.astype(np.float32)])
return tuple(return_args)
def __len__(self):
num_data = sum(self.numels)
return num_data
def initialize_data_loader(DatasetClass,
config,
phase,
num_workers,
shuffle,
repeat,
augment_data,
batch_size,
limit_numpoints,
input_transform=None,
target_transform=None):
if isinstance(phase, str):
phase = str2datasetphase_type(phase)
if config.return_transformation:
collate_fn = t.cflt_collate_fn_factory(limit_numpoints)
else:
collate_fn = t.cfl_collate_fn_factory(limit_numpoints)
prevoxel_transform_train = []
if augment_data:
prevoxel_transform_train.append(t.ElasticDistortion(DatasetClass.ELASTIC_DISTORT_PARAMS))
if len(prevoxel_transform_train) > 0:
prevoxel_transforms = t.Compose(prevoxel_transform_train)
else:
prevoxel_transforms = None
input_transforms = []
if input_transform is not None:
input_transforms += input_transform
if augment_data:
input_transforms += [
t.RandomDropout(0.2),
t.RandomHorizontalFlip(DatasetClass.ROTATION_AXIS, DatasetClass.IS_TEMPORAL),
t.ChromaticAutoContrast(),
t.ChromaticTranslation(config.data_aug_color_trans_ratio),
t.ChromaticJitter(config.data_aug_color_jitter_std),
# t.HueSaturationTranslation(config.data_aug_hue_max, config.data_aug_saturation_max),
]
if len(input_transforms) > 0:
input_transforms = t.Compose(input_transforms)
else:
input_transforms = None
dataset = DatasetClass(
config,
prevoxel_transform=prevoxel_transforms,
input_transform=input_transforms,
target_transform=target_transform,
cache=config.cache_data,
augment_data=augment_data,
phase=phase)
data_args = {
'dataset': dataset,
'num_workers': num_workers,
'batch_size': batch_size,
'collate_fn': collate_fn,
}
if repeat:
data_args['sampler'] = InfSampler(dataset, shuffle)
else:
data_args['shuffle'] = shuffle
data_loader = DataLoader(**data_args)
return data_loader
| [
"lib.transforms.ChromaticTranslation",
"lib.transforms.ChromaticJitter",
"numpy.hstack",
"torch.utils.data.DataLoader",
"numpy.array",
"lib.transforms.RandomDropout",
"lib.transforms.cflt_collate_fn_factory",
"lib.voxelizer.Voxelizer",
"lib.dataloader.InfSampler",
"pathlib.Path",
"numpy.vstack",... | [((15582, 15605), 'torch.utils.data.DataLoader', 'DataLoader', ([], {}), '(**data_args)\n', (15592, 15605), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2008, 2030), 'torch.utils.data.Dataset.__init__', 'Dataset.__init__', (['self'], {}), '(self)\n', (2024, 2030), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2621, 2638), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (2632, 2638), False, 'from collections import defaultdict\n'), ((4475, 4497), 'plyfile.PlyData.read', 'PlyData.read', (['filepath'], {}), '(filepath)\n', (4487, 4497), False, 'from plyfile import PlyData\n'), ((4709, 4748), 'numpy.array', 'np.array', (["data['label']"], {'dtype': 'np.int32'}), "(data['label'], dtype=np.int32)\n", (4717, 4748), True, 'import numpy as np\n'), ((6437, 6777), 'lib.voxelizer.Voxelizer', 'Voxelizer', ([], {'voxel_size': 'self.VOXEL_SIZE', 'clip_bound': 'self.CLIP_BOUND', 'use_augmentation': 'augment_data', 'scale_augmentation_bound': 'self.SCALE_AUGMENTATION_BOUND', 'rotation_augmentation_bound': 'self.ROTATION_AUGMENTATION_BOUND', 'translation_augmentation_ratio_bound': 'self.TRANSLATION_AUGMENTATION_RATIO_BOUND', 'ignore_label': 'ignore_label'}), '(voxel_size=self.VOXEL_SIZE, clip_bound=self.CLIP_BOUND,\n use_augmentation=augment_data, scale_augmentation_bound=self.\n SCALE_AUGMENTATION_BOUND, rotation_augmentation_bound=self.\n ROTATION_AUGMENTATION_BOUND, translation_augmentation_ratio_bound=self.\n TRANSLATION_AUGMENTATION_RATIO_BOUND, ignore_label=ignore_label)\n', (6446, 6777), False, 'from lib.voxelizer import Voxelizer\n'), ((12141, 12159), 'numpy.vstack', 'np.vstack', (['feats_t'], {}), '(feats_t)\n', (12150, 12159), True, 'import numpy as np\n'), ((12179, 12198), 'numpy.hstack', 'np.hstack', (['labels_t'], {}), '(labels_t)\n', (12188, 12198), True, 'import numpy as np\n'), ((14013, 14055), 'lib.transforms.cflt_collate_fn_factory', 't.cflt_collate_fn_factory', (['limit_numpoints'], {}), '(limit_numpoints)\n', (14038, 14055), True, 'import lib.transforms as t\n'), ((14081, 14122), 'lib.transforms.cfl_collate_fn_factory', 't.cfl_collate_fn_factory', (['limit_numpoints'], {}), '(limit_numpoints)\n', (14105, 14122), True, 'import lib.transforms as t\n'), ((14336, 14371), 'lib.transforms.Compose', 't.Compose', (['prevoxel_transform_train'], {}), '(prevoxel_transform_train)\n', (14345, 14371), True, 'import lib.transforms as t\n'), ((14992, 15019), 'lib.transforms.Compose', 't.Compose', (['input_transforms'], {}), '(input_transforms)\n', (15001, 15019), True, 'import lib.transforms as t\n'), ((15493, 15521), 'lib.dataloader.InfSampler', 'InfSampler', (['dataset', 'shuffle'], {}), '(dataset, shuffle)\n', (15503, 15521), False, 'from lib.dataloader import InfSampler\n'), ((2129, 2144), 'pathlib.Path', 'Path', (['data_root'], {}), '(data_root)\n', (2133, 2144), False, 'from pathlib import Path\n'), ((4547, 4608), 'numpy.array', 'np.array', (["[data['x'], data['y'], data['z']]"], {'dtype': 'np.float32'}), "([data['x'], data['y'], data['z']], dtype=np.float32)\n", (4555, 4608), True, 'import numpy as np\n'), ((4623, 4693), 'numpy.array', 'np.array', (["[data['red'], data['green'], data['blue']]"], {'dtype': 'np.float32'}), "([data['red'], data['green'], data['blue']], dtype=np.float32)\n", (4631, 4693), True, 'import numpy as np\n'), ((7391, 7430), 'numpy.concatenate', 'np.concatenate', (['(feats, norm_coords)', '(1)'], {}), '((feats, norm_coords), 1)\n', (7405, 7430), True, 'import numpy as np\n'), ((7455, 7489), 'torch.cat', 'torch.cat', (['(feats, norm_coords)', '(1)'], {}), '((feats, norm_coords), 1)\n', (7464, 7489), False, 'import torch\n'), ((7889, 7978), 'MinkowskiEngine.utils.sparse_quantize', 'ME.utils.sparse_quantize', (['(coords / self.PREVOXELIZATION_VOXEL_SIZE)'], {'return_index': '(True)'}), '(coords / self.PREVOXELIZATION_VOXEL_SIZE,\n return_index=True)\n', (7913, 7978), True, 'import MinkowskiEngine as ME\n'), ((8681, 8740), 'numpy.array', 'np.array', (['[self.label_map[x] for x in labels]'], {'dtype': 'np.int'}), '([self.label_map[x] for x in labels], dtype=np.int)\n', (8689, 8740), True, 'import numpy as np\n'), ((10759, 10804), 'random.randrange', 'random.randrange', (['(1)', '(self.temporal_numseq + 1)'], {}), '(1, self.temporal_numseq + 1)\n', (10775, 10804), False, 'import random\n'), ((12760, 12825), 'numpy.array', 'np.array', (['[self.label_map[x] for x in joint_labels]'], {'dtype': 'np.int'}), '([self.label_map[x] for x in joint_labels], dtype=np.int)\n', (12768, 12825), True, 'import numpy as np\n'), ((14211, 14267), 'lib.transforms.ElasticDistortion', 't.ElasticDistortion', (['DatasetClass.ELASTIC_DISTORT_PARAMS'], {}), '(DatasetClass.ELASTIC_DISTORT_PARAMS)\n', (14230, 14267), True, 'import lib.transforms as t\n'), ((14564, 14584), 'lib.transforms.RandomDropout', 't.RandomDropout', (['(0.2)'], {}), '(0.2)\n', (14579, 14584), True, 'import lib.transforms as t\n'), ((14594, 14670), 'lib.transforms.RandomHorizontalFlip', 't.RandomHorizontalFlip', (['DatasetClass.ROTATION_AXIS', 'DatasetClass.IS_TEMPORAL'], {}), '(DatasetClass.ROTATION_AXIS, DatasetClass.IS_TEMPORAL)\n', (14616, 14670), True, 'import lib.transforms as t\n'), ((14680, 14705), 'lib.transforms.ChromaticAutoContrast', 't.ChromaticAutoContrast', ([], {}), '()\n', (14703, 14705), True, 'import lib.transforms as t\n'), ((14715, 14772), 'lib.transforms.ChromaticTranslation', 't.ChromaticTranslation', (['config.data_aug_color_trans_ratio'], {}), '(config.data_aug_color_trans_ratio)\n', (14737, 14772), True, 'import lib.transforms as t\n'), ((14782, 14833), 'lib.transforms.ChromaticJitter', 't.ChromaticJitter', (['config.data_aug_color_jitter_std'], {}), '(config.data_aug_color_jitter_std)\n', (14799, 14833), True, 'import lib.transforms as t\n'), ((10957, 11004), 'random.randrange', 'random.randrange', (['(1)', '(self.temporal_dilation + 1)'], {}), '(1, self.temporal_dilation + 1)\n', (10973, 11004), False, 'import random\n'), ((11399, 11492), 'MinkowskiEngine.utils.sparse_quantize', 'ME.utils.sparse_quantize', (['(ptc[:, :3] / self.PREVOXELIZATION_VOXEL_SIZE)'], {'return_index': '(True)'}), '(ptc[:, :3] / self.PREVOXELIZATION_VOXEL_SIZE,\n return_index=True)\n', (11423, 11492), True, 'import MinkowskiEngine as ME\n'), ((13155, 13187), 'numpy.hstack', 'np.hstack', (['(transformation, [i])'], {}), '((transformation, [i]))\n', (13164, 13187), True, 'import numpy as np\n'), ((12043, 12072), 'numpy.ones', 'np.ones', (['(coords.shape[0], 1)'], {}), '((coords.shape[0], 1))\n', (12050, 12072), True, 'import numpy as np\n'), ((12997, 13033), 'numpy.ones', 'np.ones', (['(pointcloud[0].shape[0], 1)'], {}), '((pointcloud[0].shape[0], 1))\n', (13004, 13033), True, 'import numpy as np\n')] |
import glob
import os
import numpy as np
from yt.data_objects.static_output import ParticleDataset
from yt.frontends.halo_catalog.data_structures import HaloCatalogFile
from yt.funcs import setdefaultattr
from yt.geometry.particle_geometry_handler import ParticleIndex
from yt.utilities import fortran_utils as fpu
from yt.utilities.cosmology import Cosmology
from .definitions import header_dt
from .fields import RockstarFieldInfo
class RockstarBinaryFile(HaloCatalogFile):
def __init__(self, ds, io, filename, file_id, range):
with open(filename, "rb") as f:
self.header = fpu.read_cattrs(f, header_dt, "=")
self._position_offset = f.tell()
f.seek(0, os.SEEK_END)
self._file_size = f.tell()
super(RockstarBinaryFile, self).__init__(ds, io, filename, file_id, range)
def _read_particle_positions(self, ptype, f=None):
"""
Read all particle positions in this file.
"""
if f is None:
close = True
f = open(self.filename, "rb")
else:
close = False
pcount = self.header["num_halos"]
pos = np.empty((pcount, 3), dtype="float64")
f.seek(self._position_offset, os.SEEK_SET)
halos = np.fromfile(f, dtype=self.io._halo_dt, count=pcount)
for i, ax in enumerate("xyz"):
pos[:, i] = halos[f"particle_position_{ax}"].astype("float64")
if close:
f.close()
return pos
class RockstarDataset(ParticleDataset):
_index_class = ParticleIndex
_file_class = RockstarBinaryFile
_field_info_class = RockstarFieldInfo
_suffix = ".bin"
def __init__(
self,
filename,
dataset_type="rockstar_binary",
units_override=None,
unit_system="cgs",
index_order=None,
index_filename=None,
):
super(RockstarDataset, self).__init__(
filename,
dataset_type,
units_override=units_override,
unit_system=unit_system,
)
def _parse_parameter_file(self):
with open(self.parameter_filename, "rb") as f:
hvals = fpu.read_cattrs(f, header_dt)
hvals.pop("unused")
self.dimensionality = 3
self.refine_by = 2
prefix = ".".join(self.parameter_filename.rsplit(".", 2)[:-2])
self.filename_template = f"{prefix}.%(num)s{self._suffix}"
self.file_count = len(glob.glob(prefix + ".*" + self._suffix))
# Now we can set up things we already know.
self.cosmological_simulation = 1
self.current_redshift = (1.0 / hvals["scale"]) - 1.0
self.hubble_constant = hvals["h0"]
self.omega_lambda = hvals["Ol"]
self.omega_matter = hvals["Om"]
cosmo = Cosmology(
hubble_constant=self.hubble_constant,
omega_matter=self.omega_matter,
omega_lambda=self.omega_lambda,
)
self.current_time = cosmo.lookback_time(self.current_redshift, 1e6).in_units(
"s"
)
self.periodicity = (True, True, True)
self.particle_types = "halos"
self.particle_types_raw = "halos"
self.domain_left_edge = np.array([0.0, 0.0, 0.0])
self.domain_right_edge = np.array([hvals["box_size"]] * 3)
self.domain_dimensions = np.ones(3, "int32")
self.parameters.update(hvals)
def _set_code_unit_attributes(self):
z = self.current_redshift
setdefaultattr(self, "length_unit", self.quan(1.0 / (1.0 + z), "Mpc / h"))
setdefaultattr(self, "mass_unit", self.quan(1.0, "Msun / h"))
setdefaultattr(self, "velocity_unit", self.quan(1.0, "km / s"))
setdefaultattr(self, "time_unit", self.length_unit / self.velocity_unit)
@classmethod
def _is_valid(cls, filename, *args, **kwargs):
if not filename.endswith(".bin"):
return False
with open(filename, mode="rb") as f:
header = fpu.read_cattrs(f, header_dt)
if header["magic"] == 18077126535843729616:
return True
return False
| [
"yt.utilities.fortran_utils.read_cattrs",
"numpy.fromfile",
"numpy.ones",
"numpy.array",
"numpy.empty",
"yt.utilities.cosmology.Cosmology",
"yt.funcs.setdefaultattr",
"glob.glob"
] | [((1160, 1198), 'numpy.empty', 'np.empty', (['(pcount, 3)'], {'dtype': '"""float64"""'}), "((pcount, 3), dtype='float64')\n", (1168, 1198), True, 'import numpy as np\n'), ((1266, 1318), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'self.io._halo_dt', 'count': 'pcount'}), '(f, dtype=self.io._halo_dt, count=pcount)\n', (1277, 1318), True, 'import numpy as np\n'), ((2800, 2916), 'yt.utilities.cosmology.Cosmology', 'Cosmology', ([], {'hubble_constant': 'self.hubble_constant', 'omega_matter': 'self.omega_matter', 'omega_lambda': 'self.omega_lambda'}), '(hubble_constant=self.hubble_constant, omega_matter=self.\n omega_matter, omega_lambda=self.omega_lambda)\n', (2809, 2916), False, 'from yt.utilities.cosmology import Cosmology\n'), ((3230, 3255), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (3238, 3255), True, 'import numpy as np\n'), ((3289, 3322), 'numpy.array', 'np.array', (["([hvals['box_size']] * 3)"], {}), "([hvals['box_size']] * 3)\n", (3297, 3322), True, 'import numpy as np\n'), ((3357, 3376), 'numpy.ones', 'np.ones', (['(3)', '"""int32"""'], {}), "(3, 'int32')\n", (3364, 3376), True, 'import numpy as np\n'), ((3724, 3796), 'yt.funcs.setdefaultattr', 'setdefaultattr', (['self', '"""time_unit"""', '(self.length_unit / self.velocity_unit)'], {}), "(self, 'time_unit', self.length_unit / self.velocity_unit)\n", (3738, 3796), False, 'from yt.funcs import setdefaultattr\n'), ((605, 639), 'yt.utilities.fortran_utils.read_cattrs', 'fpu.read_cattrs', (['f', 'header_dt', '"""="""'], {}), "(f, header_dt, '=')\n", (620, 639), True, 'from yt.utilities import fortran_utils as fpu\n'), ((2176, 2205), 'yt.utilities.fortran_utils.read_cattrs', 'fpu.read_cattrs', (['f', 'header_dt'], {}), '(f, header_dt)\n', (2191, 2205), True, 'from yt.utilities import fortran_utils as fpu\n'), ((2465, 2504), 'glob.glob', 'glob.glob', (["(prefix + '.*' + self._suffix)"], {}), "(prefix + '.*' + self._suffix)\n", (2474, 2504), False, 'import glob\n'), ((3999, 4028), 'yt.utilities.fortran_utils.read_cattrs', 'fpu.read_cattrs', (['f', 'header_dt'], {}), '(f, header_dt)\n', (4014, 4028), True, 'from yt.utilities import fortran_utils as fpu\n')] |
from typing import List, Tuple, Union, Any
import numpy as np
from collections import defaultdict
import itertools
import matplotlib.pyplot as plt
T_untokenized = Union[List[str], Tuple[List[str], List[Any]]]
def untokenize(raw: str, tokens: List[str],
return_mask: bool = False,
token_sym: Any = True,
untoken_sym: Any = False) -> T_untokenized:
"""Get between tokens symbols.
Args:
raw: Raw string.
tokens: List of tokens from raw string.
return_mask: Flag to return mask
for each new token. Format: list of
`token_sym`, `untoken_sym`.
token_sym: Object, denote token symbol.
untoken_sym: Object, denote untoken symbol.
Returns:
Tuple (full_tokens, tokens_mask) if `return_mask=True`,
else just list full_tokens.
"""
mask = []
untokenized = []
pos = raw.find(tokens[0])
if pos != 0:
untokenized.append(raw[:pos])
mask.append(untoken_sym)
raw = raw[pos:]
prev_token = tokens[0]
for token in tokens[1:]:
raw = raw[len(prev_token):]
pos = raw.find(token)
untokenized.append(prev_token)
mask.append(token_sym)
if pos:
mask.append(untoken_sym)
untokenized.append(raw[:pos])
prev_token = token
raw = raw[pos:]
untokenized.append(prev_token)
mask.append(token_sym)
cur = len(prev_token)
if cur != len(raw):
untokenized.append(raw[cur:])
mask.append(untoken_sym)
if return_mask:
return untokenized, mask
return untokenized
def find_positions(arr: List[str],
mask: List[bool]) -> List[int]:
"""Set positions and tokens.
Args:
tokens: List of tokens and untokens.
mask: Mask for tokens.
Returns:
List of positions of tokens.
"""
pos = []
for i, (token, istoken) in enumerate(zip(arr, mask)):
if istoken:
pos.append(i)
return pos
class IndexedString:
"""Indexed string."""
def __init__(self, raw_string: str, tokenizer: Any, force_order: bool = True):
"""
Args:
raw_string: Raw string.
tokenizer: Tokenizer class.
force_order: Save order, or use features as
bag-of-words.
"""
self.raw = raw_string
self.tokenizer = tokenizer
self.force_order = force_order
self.toks_ = self._tokenize(raw_string)
self.toks = [token.lower() for token in self.toks_]
self.as_list_, self.mask = untokenize(
self.raw, self.toks_, return_mask=True)
self.pos = find_positions(self.as_list_, self.mask)
self.as_np_ = np.array(self.as_list_)
self.inv = []
if not force_order:
pos = defaultdict(list)
self.vocab = {}
for token, cur in zip(self.toks, self.pos):
if token not in self.vocab:
self.vocab[token] = len(self.vocab)
self.inv.append(token)
idx = self.vocab[token]
pos[idx].append(cur)
self.pos = pos
else:
self.inv = self.toks_
def _tokenize(self, text: str) -> List[str]:
prep_text = self.tokenizer._tokenize(text)
tokens = self.tokenizer.tokenize_sentence(text)
return tokens
def word(self, idx: int) -> str:
"""Token by its index.
Args:
idx: Index of token.
Returns:
Token.
"""
return self.inv[idx]
def inverse_removing(self, to_del: Union[List[str], List[int]],
by_tokens=False) -> str:
"""Remove tokens.
Args:
to_del: Tokens (text of int) to del.
by_tokens: Flag if tokens are text or indexes.
Returns:
String without removed tokens.
"""
# todo: this type of mapping will be not use order,
# in case when we have not unique tokens.
assert (not self.force_order) or \
(self.force_order and not by_tokens)
if not self.force_order:
if by_tokens:
to_del = [self.t_i[token.lower()] for token in to_del]
to_del = np.array(to_del)
to_del = list(itertools.chain.from_iterable(
[self.pos[i] for i in to_del]))
else:
to_del = [self.pos[i] for i in to_del]
mask = np.ones_like(self.as_np_, dtype=bool)
mask[to_del] = False
new_str = ''.join(self.as_np_[mask])
return new_str
@property
def n_words(self) -> int:
"""Number of unique words."""
return len(self.pos)
def draw_html(tokens_and_weights: List[Tuple[str, float]],
cmap: Any = plt.get_cmap("bwr"),
token_template: str = """<span style="background-color: {color_hex}">{token}</span>""",
font_style: str = "font-size:14px;"
) -> str:
"""Get colored text in html format.
For color used gradient from cmap.
To normalize weights sigmoid is used.
Args:
tokens_and_weights: List of tokens.
cmap: ```matplotlib.colors.Colormap``` object.
token_template: Template for coloring the token.
font_style: Styling properties of html.
Returns:
HTML like string.
"""
def get_color_hex(weight):
rgba = cmap(1. / (1 + np.exp(weight)), bytes=True)
return '#%02X%02X%02X' % rgba[:3]
tokens_html = [
token_template.format(token=token, color_hex=get_color_hex(weight))
for token, weight in tokens_and_weights
]
raw_html = """<p style="{}">{}</p>""".format(font_style, ' '.join(tokens_html))
return raw_html
| [
"numpy.ones_like",
"numpy.exp",
"numpy.array",
"itertools.chain.from_iterable",
"collections.defaultdict",
"matplotlib.pyplot.get_cmap"
] | [((5128, 5147), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""bwr"""'], {}), "('bwr')\n", (5140, 5147), True, 'import matplotlib.pyplot as plt\n'), ((2877, 2900), 'numpy.array', 'np.array', (['self.as_list_'], {}), '(self.as_list_)\n', (2885, 2900), True, 'import numpy as np\n'), ((4767, 4804), 'numpy.ones_like', 'np.ones_like', (['self.as_np_'], {'dtype': 'bool'}), '(self.as_np_, dtype=bool)\n', (4779, 4804), True, 'import numpy as np\n'), ((2969, 2986), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2980, 2986), False, 'from collections import defaultdict\n'), ((4561, 4577), 'numpy.array', 'np.array', (['to_del'], {}), '(to_del)\n', (4569, 4577), True, 'import numpy as np\n'), ((4604, 4664), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['[self.pos[i] for i in to_del]'], {}), '([self.pos[i] for i in to_del])\n', (4633, 4664), False, 'import itertools\n'), ((5792, 5806), 'numpy.exp', 'np.exp', (['weight'], {}), '(weight)\n', (5798, 5806), True, 'import numpy as np\n')] |
"""
Build an electrophysiological dataset
=====================================
In Frites, a dataset is a structure for grouping the electrophysiological data
(e.g MEG / EEG / Intracranial) coming from multiple subjects. In addition,
some basic operations can also be performed (like slicing, smoothing etc.). In
this example we illutrate how to define a dataset using NumPy arrays.
"""
import numpy as np
from frites.dataset import DatasetEphy
import matplotlib.pyplot as plt
###############################################################################
# Create artificial data
# ----------------------
#
# We start by creating some random data for several subjects. To do that, each
# subject is going have a 3 dimensional array of shape
# (n_epochs, n_channels, n_times). Then, all of the arrays are grouped together
# in a list of length (n_subjects,)
n_subjects = 5
n_epochs = 10
n_channels = 5
n_times = 1000
sf = 512
x, ch = [], []
for k in range(n_subjects):
# generate single subject data
x_suj = np.random.rand(n_epochs, n_channels, n_times)
# generate some random channel names
ch_suj = np.array([f"ch_{r}" for r in range(n_channels)])
# concatenate in a list
x.append(x_suj)
ch.append(ch_suj)
# finally lets create a time vector
times = np.arange(n_times) / sf
###############################################################################
# Create the dataset
# ------------------
#
# The creation of the dataset is performed using the class
# :class:`frites.dataset.DatasetEphy`
dt = DatasetEphy(x.copy(), roi=ch, times=times)
print(dt)
plt.plot(dt.times, dt.x[0][:, 0, :].T)
plt.xlabel('Times')
plt.title('Electrophysiological data of the first subject, for the first '
'channel')
plt.show()
###############################################################################
# Data smoothing
# --------------
#
# If you have MNE-Python installed, you can also smooth the data using
# :class:`frites.dataset.DatasetEphy.savgol_filter`. One important thing is
# that operations are performed inplace, which means that once launched, the
# data are modified inside the dataset without copy
# high cut-off frequency at 4Hz
dt.savgol_filter(4)
plt.plot(dt.times, dt.x[0][:, 0, :].T)
plt.xlabel('Times')
plt.title('Smoothed dataset')
plt.show()
###############################################################################
# Data resampling
# ---------------
#
# Still using MNE-Python, you can also resample the dataset using
# :class:`frites.dataset.DatasetEphy.resample`
# resample the dataset using a new sampling rate of 256Hz
dt.resample(256)
plt.plot(dt.times, dt.x[0][:, 0, :].T)
plt.xlabel('Times')
plt.title('Resampled dataset')
plt.show()
###############################################################################
# Spatio-temporal slicing
# -----------------------
#
# The dataset also supports some basic slicing operations through time and
# space. Slicing is still performed inplace
# temporal selection between [0.25, 1.75]
dt[0.25:1.75, :] # the ':' symbol means that we are selecting every channel
# sphinx_gallery_thumbnail_number = 3
plt.plot(dt.times, dt.x[0][:, 0, :].T)
plt.xlabel('Times')
plt.title('Temporal slicing')
plt.show()
# spatial selection of channels ch_0 and ch_1
dt[:, ['ch_0', 'ch_1']]
print(dt.roi)
| [
"numpy.random.rand",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((1592, 1630), 'matplotlib.pyplot.plot', 'plt.plot', (['dt.times', 'dt.x[0][:, 0, :].T'], {}), '(dt.times, dt.x[0][:, 0, :].T)\n', (1600, 1630), True, 'import matplotlib.pyplot as plt\n'), ((1631, 1650), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Times"""'], {}), "('Times')\n", (1641, 1650), True, 'import matplotlib.pyplot as plt\n'), ((1651, 1738), 'matplotlib.pyplot.title', 'plt.title', (['"""Electrophysiological data of the first subject, for the first channel"""'], {}), "(\n 'Electrophysiological data of the first subject, for the first channel')\n", (1660, 1738), True, 'import matplotlib.pyplot as plt\n'), ((1747, 1757), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1755, 1757), True, 'import matplotlib.pyplot as plt\n'), ((2205, 2243), 'matplotlib.pyplot.plot', 'plt.plot', (['dt.times', 'dt.x[0][:, 0, :].T'], {}), '(dt.times, dt.x[0][:, 0, :].T)\n', (2213, 2243), True, 'import matplotlib.pyplot as plt\n'), ((2244, 2263), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Times"""'], {}), "('Times')\n", (2254, 2263), True, 'import matplotlib.pyplot as plt\n'), ((2264, 2293), 'matplotlib.pyplot.title', 'plt.title', (['"""Smoothed dataset"""'], {}), "('Smoothed dataset')\n", (2273, 2293), True, 'import matplotlib.pyplot as plt\n'), ((2294, 2304), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2302, 2304), True, 'import matplotlib.pyplot as plt\n'), ((2615, 2653), 'matplotlib.pyplot.plot', 'plt.plot', (['dt.times', 'dt.x[0][:, 0, :].T'], {}), '(dt.times, dt.x[0][:, 0, :].T)\n', (2623, 2653), True, 'import matplotlib.pyplot as plt\n'), ((2654, 2673), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Times"""'], {}), "('Times')\n", (2664, 2673), True, 'import matplotlib.pyplot as plt\n'), ((2674, 2704), 'matplotlib.pyplot.title', 'plt.title', (['"""Resampled dataset"""'], {}), "('Resampled dataset')\n", (2683, 2704), True, 'import matplotlib.pyplot as plt\n'), ((2705, 2715), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2713, 2715), True, 'import matplotlib.pyplot as plt\n'), ((3129, 3167), 'matplotlib.pyplot.plot', 'plt.plot', (['dt.times', 'dt.x[0][:, 0, :].T'], {}), '(dt.times, dt.x[0][:, 0, :].T)\n', (3137, 3167), True, 'import matplotlib.pyplot as plt\n'), ((3168, 3187), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Times"""'], {}), "('Times')\n", (3178, 3187), True, 'import matplotlib.pyplot as plt\n'), ((3188, 3217), 'matplotlib.pyplot.title', 'plt.title', (['"""Temporal slicing"""'], {}), "('Temporal slicing')\n", (3197, 3217), True, 'import matplotlib.pyplot as plt\n'), ((3218, 3228), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3226, 3228), True, 'import matplotlib.pyplot as plt\n'), ((1023, 1068), 'numpy.random.rand', 'np.random.rand', (['n_epochs', 'n_channels', 'n_times'], {}), '(n_epochs, n_channels, n_times)\n', (1037, 1068), True, 'import numpy as np\n'), ((1286, 1304), 'numpy.arange', 'np.arange', (['n_times'], {}), '(n_times)\n', (1295, 1304), True, 'import numpy as np\n')] |
import os
import astropy.constants as const
import astropy.units as u
import numpy as np
from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun
from astropy.time import Time
from sora.config import input_tests
__all__ = ['plot_occ_map']
def xy2latlon(x, y, loncen, latcen, time):
"""Calculates the longitude and latitude given projected positions x and y.
Parameters
----------
x : `int`, `float`
Projected position in x, in the GCRS, in meters.
y : `int`, `float`
Projected position in y, in the GCRS, in meters.
loncen : `int`, `float`
Center longitude of projection, in degrees.
latcen : `int`, `float`
Center latitude of projection, in degrees.
time : `astropy.time.Time`
Time of referred projection.
Returns
-------
lon, lat : `list`
Longitude and Latitude whose projection at loncen, lat results
in x, y. (deg).
"""
r = const.R_earth.to(u.m).value
site_cen = EarthLocation(loncen*u.deg, latcen*u.deg)
itrs_cen = site_cen.get_itrs(obstime=time)
gcrs_cen = itrs_cen.transform_to(GCRS(obstime=time))
z = np.array(y, ndmin=1)
y = np.array(x, ndmin=1)
x2 = r*r-y*y-z*z
a = np.where(x2 >= 0.0)
x = np.sqrt(x2[a])
y = y[a]
z = z[a]
lon = np.repeat(1e+31, len(x2))
lat = np.repeat(1e+31, len(x2))
center_frame = SkyOffsetFrame(origin=gcrs_cen)
if len(x) > 0:
n = 0
if not time.isscalar and len(time) == len(x2):
time = time[a]
while True:
n += 1
new_pos = SkyCoord(x*u.m, y*u.m, z*u.m, representation_type='cartesian', frame=center_frame[a])
n_coord = new_pos.transform_to(GCRS(obstime=time))
n_itrs = n_coord.transform_to(ITRS(obstime=time))
n_site = n_itrs.earth_location
n_site = EarthLocation(n_site.lon, n_site.lat, 0)
itrs_site = n_site.get_itrs(obstime=time)
gcrs_site = itrs_site.transform_to(GCRS(obstime=time))
target1 = gcrs_site.transform_to(center_frame[a])
if n == 4:
lon[a] = n_site.lon.deg
lat[a] = n_site.lat.deg
break
x = target1.cartesian.x.to(u.m).value
return lon, lat
def latlon2xy(lon, lat, loncen, latcen):
"""Calculates the projection of longitude and latitude in the loncen,
latcen direction.
Parameters
----------
lon : `int`, `float`
Longitude to calculate projection.
lat : `int`, `float`
Latitude to calculate projection.
loncen : `int`, `float`
Center longitude of projection, in degrees.
latcen : `int`, `float`
Center latitude of projection, in degrees.
Returns
-------
x, y : `list`
Projection of lon, lat at loncen, latcen, in the ITRS (meters).
"""
site_cen = EarthLocation(loncen*u.deg, latcen*u.deg)
itrs_cen = site_cen.get_itrs()
lon = np.array(lon, ndmin=1)
lat = np.array(lat, ndmin=1)
site = EarthLocation(lon*u.deg, lat*u.deg, height=0*u.m)
itrs_site = site.get_itrs()
target = itrs_site.transform_to(SkyOffsetFrame(origin=itrs_cen))
y = target.cartesian.y.to(u.m).value
z = target.cartesian.z.to(u.m).value
k = np.where(target.cartesian.x.to(u.m).value < 0.0)
y[k] = 1e+31
z[k] = 1e+31
return y, z
def plot_occ_map(name, radius, coord, time, ca, pa, vel, dist, mag=0, longi=0, **kwargs):
"""Plots the map of the occultation.
Parameters
----------
name : `str`
Name of the object.
radius : `int`, `float`
Radius of the object, in km.
coord : `str`, `astropy.coordinates.SkyCoord`
Coordinates of the star (``"hh mm ss.sss dd mm ss.sss"`` or
``"hh.hhhhhhhh dd.dddddddd"``).
time : `str`, `astropy.time.Time`
Instant of Closest Approach (iso or isot format).
ca : `int`, `float`
Closest Approach Distance, in arcsec.
pa : `int`, `float`
Position Angle at C/A, in degrees.
vel : `int`, `float`
Velocity of the event, in km/s.
dist : `int`, `float`
Object distance at C/A, in AU.
mag : `int`, `float`, default=0
Mag* = Normalized magnitude to vel=20km/s.
longi : `int`, `float`, default=0
East longitude of sub-planet point, deg, positive towards East.
nameimg : `str`
Change the name of the imaged saved.
path : `str`
Path to a directory where to save map.
resolution : `int`, default=2
Cartopy feature resolution.\n
- ``1`` means a resolution of "10m";\n
- ``2`` a resolution of "50m";\n
- ``3`` a resolution of "100m".
states : `bool`
If True, plots the states borders of the countries. The states
of some countries will only be shown depending on the resolution.
zoom : `int`, `float`
Zooms in or out of the map.
centermap_geo : `list`, default=None
Center the map given coordinates in longitude and latitude. It must be
a list with two numbers.
centermap_delta : `list`, default=None
Displace the center of the map given displacement in X and Y, in km.
It must be a list with two numbers.
centerproj : `list`
Rotates the Earth to show occultation with the center projected at a
given longitude and latitude. It must be a list with two numbers.
labels : `bool`, default=True
Plots text above and below the map with the occultation parameters.
meridians : `int`, default=30
Plots lines representing the meridians for given interval, in degrees.
parallels : `int`, default=30
Plots lines representing the parallels for given interval, in degrees.
sites : `dict`
Plots site positions in map. It must be a python dictionary where the
key is the `name` of the site, and the value is a list with `longitude`,
`latitude`, `delta_x`, `delta_y` and `color`. `delta_x` and `delta_y`
are displacement, in km, from the point position of the site in the map
and the `name`. `color` is the color of the point.
site_name : `bool`
If True, it prints the name of the sites given, else it plots only the points.
site_box_alpha : `int`, `float`, default=0
Sets the transparency of a box surrounding each station name. 0 equals to
transparent, and 1 equals to opaque.
countries : `dict`
Plots the names of countries. It must be a python dictionary where the
key is the name of the country and the value is a list with longitude
and latitude of the lower left part of the text.
offset : `list`
Applies an offset to the ephemeris, calculating new CA and instant of
CA. It is a pair of `delta_RA*cosDEC` and `delta_DEC`.
mapstyle : `int`, default=1
Define the color style of the map. ``'1'`` is the default black
and white scale. ``'2'`` is a colored map.
error : `int`, `float`
Ephemeris error in mas. It plots a dashed line representing radius + error.
ercolor : `str`
Changes the color of the lines of the error bar.
ring : `int`, `float`
Plots a dashed line representing the location of a ring. It is given
in km, from the center.
rncolor : `str`
Changes the color of ring lines.
atm : `int`, `float`
Plots a dashed line representing the location of an atmosphere. It is
given in km, from the center.
atcolor : `str`
Changes the color of atm lines.
chord_delta : `list`
List with distances from center to plot chords.
chord_geo : `2d-list`
List with pairs of coordinates to plot chords.
chcolor : `str`, default='grey'
Color of the line of the chords.
heights : `list`
It plots a circular dashed line showing the locations where the observer
would observe the occultation at a given height above the horizons.
This must be a list.
hcolor : `str`
Changes the color of the height lines.
mapsize : `list`, default= [46.0, 38.0]
The size of figure, in cm. It must be a list with two values.
cpoints : `int`, `float`, default=60
Interval for the small points marking the center of shadow, in seconds.
ptcolor : `str`
Change the color of the center points.
alpha : `float`, default=0.2
The transparency of the night shade, where 0.0 is full transparency and
1.0 is full black.
fmt : `str`, default:'png'
The format to save the image. It is parsed directly by `matplotlib.pyplot`.
dpi : `int`, default=100
Resolution in "dots per inch". It defines the quality of the image.
lncolor : `str`
Changes the color of the line that represents the limits of the shadow
over Earth.
outcolor :`str`
Changes the color of the lines that represents the limits of the shadow
outside Earth.
scale : `int`, `float`
Arbitrary scale for the size of the name of the site.
cscale : `int`, `float`
Arbitrary scale for the name of the country.
sscale : `int`, `float`
Arbitrary scale for the size of point of the site.
pscale : `int`, `float`
Arbitrary scale for the size of the points that represent the center of
the shadow.
arrow : `bool`
If True, it plots the arrow with the occultation direction.
Important
---------
Required parameters to plot an occultation map: 'name', 'radius', 'coord',
'time', 'ca', 'pa', 'vel', and 'dist'.
Note
----
The parameters 'mag' and 'longi' are optional and only printed in label.
All other remaining parameters can be used to further customize the Map
configuration.
When producing the map, only one of 'centermap_geo' or 'centermap_delta'
options can be used at a time.
"""
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
allowed_kwargs = ['alpha', 'arrow', 'atcolor', 'atm', 'centermap_delta', 'centermap_geo', 'centerproj',
'chcolor', 'chord_delta', 'chord_geo', 'countries', 'cpoints', 'cscale', 'dpi', 'ercolor',
'error', 'fmt', 'hcolor', 'heights', 'labels', 'lncolor', 'mapsize', 'mapstyle', 'meridians',
'nameimg', 'nscale', 'offset', 'outcolor', 'parallels', 'path', 'pscale', 'ptcolor',
'resolution', 'ring', 'rncolor', 'site_name', 'sites', 'sscale', 'states', 'zoom',
'site_box_alpha']
input_tests.check_kwargs(kwargs, allowed_kwargs=allowed_kwargs)
if not type(name) == str:
raise TypeError('name keyword must be a string')
radius = radius*u.km
occs = {}
try:
occs['stars'] = SkyCoord(coord, frame='icrs', unit=(u.hourangle, u.degree))
except:
raise KeyError('"star" keyword is not in the format: "hh mm ss.sss dd mm ss.sss" or "hh.hhhhhhhh dd.dddddddd"')
try:
occs['datas'] = Time(time)
except:
raise KeyError('"time" keyword is not a iso or isot time format')
occs['ca'] = ca*u.arcsec
occs['posa'] = pa*u.deg
occs['vel'] = vel*(u.km/u.s)
occs['dist'] = dist*u.AU
occs['magG'] = mag
occs['longi'] = longi
mapstyle = kwargs.get('mapstyle', 1)
if mapstyle not in [1, 2]:
raise ValueError('mapstyle must be 1 or 2]')
resolution = kwargs.get('resolution', 2)
if resolution not in [1, 2, 3]:
raise TypeError('resolution keyword must be one of these: [1, 2, 3] where 1=10m, 2=50m and 3=100m')
res = ['10m', '50m', '110m']
resolution = res[resolution-1]
nameimg = kwargs.get('nameimg', '{}_{}'.format(name, occs['datas'].isot.replace(':', '_')))
fmt = kwargs.get('fmt', 'png')
dpi = kwargs.get('dpi', 100)
step = kwargs.get('step', 1)
mapsize = kwargs.get('mapsize', [46.0, 38.0])*u.cm
erro = kwargs.get('error', None)
ring = kwargs.get('ring', None)
atm = kwargs.get('atm', None)
cpoints = kwargs.get('cpoints', 60)
states = kwargs.get('states', True)
labels = kwargs.get('labels', True)
meridians = kwargs.get('meridians', 30)
parallels = kwargs.get('parallels', 30)
nscale = kwargs.get('nscale', 1)
cscale = kwargs.get('cscale', 1)
sscale = kwargs.get('sscale', 1)
pscale = kwargs.get('pscale', 1)
heights = np.array(kwargs.get('heights'), None)
alpha = kwargs.get('alpha', 0.2)
site_box_alpha = kwargs.get('site_box_alpha', 0.0)
centermap_geo = kwargs.get('centermap_geo', None)
centermap_delta = kwargs.get('centermap_delta', None)
if 'centermap_geo' in kwargs and 'centermap_delta' in kwargs:
raise ValueError('User must give "centermap_geo" OR "centermap_delta"')
zoom = kwargs.get('zoom', 1)
if zoom <= 0:
raise ValueError('zoom can not be equal or smaller than 0.')
off_ra, off_de = kwargs.get('offset', [0.0, 0.0])*u.mas
arrow = kwargs.get('arrow', True)
site_name = kwargs.get('site_name', True)
path = kwargs.get('path', '.')
if not os.path.exists(path):
raise IOError('Path does not exists')
chord_delta = np.array(kwargs.get('chord_delta', []), ndmin=1)*u.km
chord_geo = kwargs.get('chord_geo', [])
if len(chord_geo) > 0:
try:
b = np.array(chord_geo, ndmin=2)
chord_geo = b.reshape(len(b), 2)
except:
raise ValueError('chord_geo must a set of pairs with longitude and latitude')
chord_geo = EarthLocation(*chord_geo.T)
sites = {}
if 'sites' in kwargs.keys():
if type(kwargs['sites']) == str and os.path.isfile(kwargs['sites']):
data = np.loadtxt(kwargs['sites'], dtype={'names': ('name', 'lon', 'lat', 'offx', 'offy', 'color'),
'formats': ('S30', 'f8', 'f8', 'f8', 'f8', 'S30')},
delimiter=',', ndmin=1)
for i, s in enumerate(data):
sites[s['name'].strip().decode()] = [s['lon'], s['lat'], s['offx'], s['offy'], s['color'].strip().decode()]
elif type(kwargs['sites']) == dict:
sites = kwargs['sites']
else:
raise TypeError('sites keyword must be a file or a dictionary')
countries = {}
if 'countries' in kwargs.keys():
if type(kwargs['countries']) == str and os.path.isfile(kwargs['countries']):
data = np.loadtxt(kwargs['countries'], dtype={'names': ('name', 'lon', 'lat'), 'formats': ('S30', 'f8', 'f8')},
delimiter=',', ndmin=1)
for i, c in enumerate(data):
countries[c['name'].strip().decode()] = [c['lon'], c['lat']]
elif type(kwargs['countries']) == dict:
countries = kwargs['countries']
else:
raise TypeError('country keyword must be a file or a dictionary')
# calculates offsets
dca = off_ra*np.sin(occs['posa']) + off_de*np.cos(occs['posa'])
dt = (-(off_ra * np.cos(occs['posa']) - off_de * np.sin(occs['posa'])).to(u.rad) * occs['dist'].to(u.km) / np.absolute(
occs['vel'])).value * u.s
ca1 = occs['ca'] + dca
data = occs['datas'] + dt
# define map parameters
center_gcrs = GCRS(occs['stars'].ra, occs['stars'].dec, 1*u.R_earth, obstime=data)
center_itrs = center_gcrs.transform_to(ITRS(obstime=data))
center_map = center_itrs.earth_location
centert = True
if 'centerproj' in kwargs.keys():
if type(kwargs['centerproj']) == EarthLocation:
center_map = kwargs['centerproj']
elif np.array(kwargs['centerproj']).shape == (2,):
center_map = EarthLocation.from_geodetic(*kwargs['centerproj'], 0.0)
else:
raise TypeError('centerproj must be an Astropy EarthLocation Object or an array with Longitude and Latitude only')
centert = False
fig = plt.figure(figsize=(mapsize.to(u.imperial.inch).value),facecolor='w')
projection = ccrs.Orthographic(central_longitude=center_map.lon.value, central_latitude=center_map.lat.value)
if labels:
axf = plt.axes(projection=projection)
else:
axf = plt.axes([-0.001, -0.001, 1.002, 1.002], projection=projection)
axf.set_global()
# calculates regions for zoom
limits = None
r = const.R_earth.to(u.m).value
if centermap_geo is not None:
cx, cy = latlon2xy(centermap_geo[0], centermap_geo[1], center_map.lon.value, center_map.lat.value)
limits = [cx/1000.0, cy/1000.0]
if np.any(np.absolute(limits) > r):
raise ValueError('Coordinates for centermap_geo are outside the visible range.')
elif centermap_delta is not None:
limits = centermap_delta
elif zoom != 1:
limits = [0, 0]
if limits is not None:
dr = r/zoom
l0 = (limits[0]*u.km).to(u.m).value
l1 = (limits[1]*u.km).to(u.m).value
dmsize = mapsize[0]/mapsize[1]
if mapsize[1] < mapsize[0]:
lx = l0 - dr*dmsize
ux = l0 + dr*dmsize
ly = l1 - dr
uy = l1 + dr
else:
lx = l0 - dr
ux = l0 + dr
ly = l1 - dr/dmsize
uy = l1 + dr/dmsize
axf.set_xlim(lx, ux)
axf.set_ylim(ly, uy)
if labels and zoom > 1:
centert = False
# plots features
axf.coastlines(resolution=resolution, color='0.3')
ocean = cfeature.NaturalEarthFeature('physical', 'ocean', resolution)
land = cfeature.NaturalEarthFeature('physical', 'land', resolution)
border = cfeature.NaturalEarthFeature('cultural', 'admin_0_countries', resolution)
if mapstyle == 1:
axf.add_feature(ocean, zorder=0, color='0.9')
axf.add_feature(land, zorder=0, edgecolor='None', color='1.0')
axf.add_feature(border, zorder=0.1, edgecolor='0.4', facecolor='None')
axf.add_feature(cfeature.RIVERS, zorder=0, edgecolor='0.7')
axf.add_feature(cfeature.LAKES, zorder=0, color='0.7')
ptcolor = 'black'
lncolor = 'blue'
ercolor = 'blue'
rncolor = 'blue'
atcolor = 'blue'
outcolor = 'red'
hcolor = 'black'
chcolor = 'gray'
elif mapstyle == 2:
axf.add_feature(ocean, zorder=0, facecolor=cfeature.COLORS['water'])
axf.add_feature(land, zorder=0, edgecolor='None', facecolor=cfeature.COLORS['land'])
axf.add_feature(border, zorder=0, edgecolor='0.5', facecolor=cfeature.COLORS['land'])
axf.add_feature(border, zorder=0.1, edgecolor='0.5', facecolor='None')
axf.add_feature(cfeature.RIVERS, zorder=0)
axf.add_feature(cfeature.LAKES, zorder=0)
ptcolor = 'red'
lncolor = 'blue'
ercolor = 'red'
rncolor = 'black'
atcolor = 'black'
outcolor = 'red'
hcolor = 'black'
chcolor = 'gray'
if states:
states_r = cfeature.NaturalEarthFeature('cultural', 'admin_1_states_provinces', resolution)
axf.add_feature(states_r, zorder=0, edgecolor='0.6', facecolor='None')
gl = axf.gridlines(xlocs=np.arange(-180, 180.001, meridians), ylocs=np.arange(-90, 90.001, parallels))
gl.n_steps = 180
sun = get_sun(data)
sun_lat = sun.dec
sun_lon = sun.ra - data.sidereal_time('mean', 'greenwich')
pole_lon = sun_lon.deg
pole_lat = sun_lat.deg
proj_sun = ccrs.Orthographic(central_longitude=pole_lon+180, central_latitude=-pole_lat)
bordx = r*np.cos(np.arange(0, 361, 0.5)*u.deg)
bordy = r*np.sin(np.arange(0, 361, 0.5)*u.deg)
axf.fill(bordx, bordy, transform=proj_sun, linewidth=0, color='black', alpha=alpha)
axf.fill(bordx*np.cos(18*u.deg), bordy*np.cos(18*u.deg), transform=proj_sun, linewidth=0, color='black', alpha=alpha)
ptcolor = kwargs.get('ptcolor', ptcolor)
lncolor = kwargs.get('lncolor', lncolor)
ercolor = kwargs.get('ercolor', ercolor)
rncolor = kwargs.get('rncolor', rncolor)
atcolor = kwargs.get('atcolor', atcolor)
outcolor = kwargs.get('outcolor', outcolor)
hcolor = kwargs.get('hcolor', hcolor)
chcolor = kwargs.get('chcolor', chcolor)
# calculates path
vec = np.arange(0, int(8000/(np.absolute(occs['vel'].value))), step)
vec = np.sort(np.concatenate((vec, -vec[1:]), axis=0))
pa = Angle(occs['posa'])
pa.wrap_at('180d', inplace=True)
if pa > 90*u.deg:
paplus = pa - 180*u.deg
elif pa < -90*u.deg:
paplus = pa + 180*u.deg
else:
paplus = pa
deltatime = vec*u.s
datas1 = data + deltatime
centers_gcrs = GCRS(np.repeat(occs['stars'].ra, len(datas1)), np.repeat(occs['stars'].dec, len(datas1)),
1*u.R_earth, obstime=datas1)
centers_itrs = centers_gcrs.transform_to(ITRS(obstime=datas1))
centers = centers_itrs.earth_location
dista = (occs['dist'].to(u.km)*ca1.to(u.rad)).value*u.km
ax = dista*np.sin(pa) + (deltatime*occs['vel'])*np.cos(paplus)
by = dista*np.cos(pa) - (deltatime*occs['vel'])*np.sin(paplus)
ax2 = ax - radius * np.sin(paplus)
by2 = by - radius * np.cos(paplus)
lon1, lat1 = xy2latlon(ax2.to(u.m).value, by2.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon1 < 1e+30)
axf.plot(lon1[j], lat1[j], transform=ccrs.Geodetic(), color=lncolor)
j = np.where(lon1 > 1e+30)
if 'centerproj' not in kwargs:
plt.plot(ax2[j].to(u.m).value, by2[j].to(u.m).value, color=outcolor, clip_on=(not centert), zorder=-0.2)
ax3 = ax + radius * np.sin(paplus)
by3 = by + radius * np.cos(paplus)
lon2, lat2 = xy2latlon(ax3.to(u.m).value, by3.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon2 < 1e+30)
axf.plot(lon2[j], lat2[j], transform=ccrs.Geodetic(), color=lncolor)
j = np.where(lon2 > 1e+30)
if 'centerproj' not in kwargs:
plt.plot(ax3[j].to(u.m).value, by3[j].to(u.m).value, color=outcolor, clip_on=(not centert), zorder=-0.2)
# plots chords_delta
for val in chord_delta:
ax2 = ax + val*np.sin(paplus)
by2 = by + val*np.cos(paplus)
lon1, lat1 = xy2latlon(ax2.to(u.m).value, by2.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon1 < 1e+30)
axf.plot(lon1[j], lat1[j], transform=ccrs.Geodetic(), color=chcolor)
# plots chords_geo
for coord_geo in chord_geo:
xt, yt = latlon2xy(coord_geo.lon.deg, coord_geo.lat.deg, centers.lon.value, centers.lat.value)*u.m
val = np.sqrt((xt-ax)**2 + (yt-by)**2)
k = val.argmin()
ang = np.arctan2((yt-by)[k], (xt-ax)[k])
val = np.sign(np.sin(ang))*val[k]
ax2 = ax + val*np.sin(paplus)
by2 = by + val*np.cos(paplus)
lon1, lat1 = xy2latlon(ax2.to(u.m).value, by2.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon1 < 1e+30)
axf.plot(lon1[j], lat1[j], transform=ccrs.Geodetic(), color=chcolor)
# plots error
if erro is not None:
err = erro*u.mas
errd = (occs['dist'].to(u.km)*err.to(u.rad)).value*u.km
ax2 = ax - errd*np.sin(paplus) - radius*np.sin(paplus)
by2 = by - errd*np.cos(paplus) - radius*np.cos(paplus)
lon1, lat1 = xy2latlon(ax2.to(u.m).value, by2.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon1 < 1e+30)
axf.plot(lon1[j], lat1[j], '--', transform=ccrs.Geodetic(), color=ercolor)
ax3 = ax + errd*np.sin(paplus) + radius*np.sin(paplus)
by3 = by + errd*np.cos(paplus) + radius*np.cos(paplus)
lon2, lat2 = xy2latlon(ax3.to(u.m).value, by3.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon2 < 1e+30)
axf.plot(lon2[j], lat2[j], '--', transform=ccrs.Geodetic(), color=ercolor)
# plots ring
if ring is not None:
rng = ring*u.km
ax2 = ax - rng*np.sin(paplus)
by2 = by - rng*np.cos(paplus)
lon1, lat1 = xy2latlon(ax2.to(u.m).value, by2.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon1 < 1e+30)
axf.plot(lon1[j], lat1[j], '--', transform=ccrs.Geodetic(), color=rncolor)
ax3 = ax + rng*np.sin(paplus)
by3 = by + rng*np.cos(paplus)
lon2, lat2 = xy2latlon(ax3.to(u.m).value, by3.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon2 < 1e+30)
axf.plot(lon2[j], lat2[j], '--', transform=ccrs.Geodetic(), color=rncolor)
# plots atm
if atm is not None:
atmo = atm*u.km
ax2 = ax - atmo*np.sin(paplus)
by2 = by - atmo*np.cos(paplus)
lon1, lat1 = xy2latlon(ax2.to(u.m).value, by2.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon1 < 1e+30)
axf.plot(lon1[j], lat1[j], '--', transform=ccrs.Geodetic(), color=atcolor)
ax3 = ax + atmo*np.sin(paplus)
by3 = by + atmo*np.cos(paplus)
lon2, lat2 = xy2latlon(ax3.to(u.m).value, by3.to(u.m).value, centers.lon.value, centers.lat.value, datas1)
j = np.where(lon2 < 1e+30)
axf.plot(lon2[j], lat2[j], '--', transform=ccrs.Geodetic(), color=atcolor)
# plots center points
vec = np.arange(0, int(8000/(np.absolute(occs['vel'].value))), cpoints)
deltatime = np.sort(np.concatenate((vec, -vec[1:]), axis=0))*u.s
axc = dista*np.sin(pa) + (deltatime*occs['vel'])*np.cos(paplus)
byc = dista*np.cos(pa) - (deltatime*occs['vel'])*np.sin(paplus)
plt.plot(axc.to(u.m).value, byc.to(u.m).value, 'o', color=ptcolor, clip_on=(not centert),
markersize=mapsize[0].value*pscale*8.0/46.0, zorder=-0.2)
datas2 = data + deltatime
centers_p_gcrs = GCRS(np.repeat(occs['stars'].ra, len(datas2)), np.repeat(occs['stars'].dec, len(datas2)),
1*u.R_earth, obstime=datas2)
centers_p_itrs = centers_p_gcrs.transform_to(ITRS(obstime=datas2))
centers_p = centers_p_itrs.earth_location
clon1, clat1 = xy2latlon(axc.to(u.m).value, byc.to(u.m).value, centers_p.lon.value, centers_p.lat.value, datas2)
j = np.where(clon1 < 1e+30)
axf.plot(clon1[j], clat1[j], 'o', transform=ccrs.Geodetic(), color=ptcolor, clip_on=True,
markersize=mapsize[0].value*pscale*8.0/46.0)
datas1 = data + deltatime
center_gcrs = GCRS(np.repeat(occs['stars'].ra, 1), np.repeat(occs['stars'].dec, 1),
1*u.R_earth, obstime=data)
center_itrs = center_gcrs.transform_to(ITRS(obstime=data))
center = center_itrs.earth_location
xp = [(dista.to(u.m)*np.sin(pa)).value]
yp = [(dista.to(u.m)*np.cos(pa)).value]
loncen, latcen = xy2latlon(xp, yp, center.lon.value, center.lat.value, data)
j = np.where(loncen < 1e+30)
if len(j) > 0:
axf.plot(loncen[j], latcen[j], 'o', transform=ccrs.Geodetic(), color=ptcolor, clip_on=True,
markersize=mapsize[0].value*pscale*24.0/46.0)
elif not centert:
plt.plot(xp, yp, 'o', color=ptcolor, clip_on=False, markersize=mapsize[0].value*pscale*24.0/46.0)
# plots the heights
if 'heights' in kwargs.keys():
for h in heights:
lonb, latb = xy2latlon(bordx * np.cos(h * u.deg), bordy * np.cos(h * u.deg), center.lon.value,
center.lat.value, data)
axf.plot(lonb, latb, transform=ccrs.Geodetic(), linestyle='dotted', color=hcolor)
# plots the the direction arrow
if arrow:
if limits is None:
dx = 1000000*(np.sin(paplus+90*u.deg)*np.sign(occs['vel'])).value
dy = 1000000*(np.cos(paplus+90*u.deg)*np.sign(occs['vel'])).value
plt.annotate('', xy=(5500000+dx, -5500000+dy), xycoords='data',
xytext=(5500000, -5500000), textcoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='top', annotation_clip=False
)
else:
dx = (1000000/zoom) * (np.sin(paplus + 90 * u.deg) * np.sign(occs['vel'])).value
dy = (1000000/zoom) * (np.cos(paplus + 90 * u.deg) * np.sign(occs['vel'])).value
plt.annotate('', xy=(lx + (ux-lx)*0.9 + dx, ly + (uy-ly)*0.1 + dy), xycoords='data',
xytext=(lx + (ux-lx)*0.9, ly + (uy-ly)*0.1), textcoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='top', annotation_clip=False
)
# plots the countries names
for country in countries.keys():
plt.text(countries[country][0], countries[country][1], country, transform=ccrs.Geodetic(),
weight='bold', color='grey', fontsize=30*cscale, family='monospace')
# plots the sites
for site in sites.keys():
s = EarthLocation.from_geodetic(sites[site][0], sites[site][1], 0.0*u.km)
axf.plot(s.lon.deg, s.lat.deg, 'o', transform=ccrs.Geodetic(),
markersize=mapsize[0].value*sscale*10.0/46.0, color=sites[site][4])
if site_name:
xt, yt = latlon2xy(s.lon.deg, s.lat.deg, center_map.lon.value, center_map.lat.value)
axf.text(xt + sites[site][2]*1000, yt+sites[site][3]*1000, site, weight='bold',
fontsize=25*nscale, family='monospace',
bbox={'facecolor': 'white', 'alpha': site_box_alpha, 'pad': 2, 'edgecolor':'none'})
# Define the title and label of the output
title = ('Object Diam Tmax dots <> ra_offset_dec\n'
'{:10s} {:4.0f} km {:5.1f}s {:02d} s <>{:+6.1f} {:+6.1f} \n'.
format(name, 2*radius.value, (2*radius/np.absolute(occs['vel'])).value,
cpoints, off_ra.value, off_de.value))
labelx = ("\n year-m-d h:m:s UT ra__dec__J2000__candidate C/A P/A vel Delta G* long\n"
"{} {:02d} {:02d} {:07.4f} {:+03d} {:02d} {:06.3f} {:6.3f} {:6.2f} {:6.2f} {:5.2f} {:5.1f} {:3.0f}".
format(data.iso, int(occs['stars'].ra.hms.h), int(occs['stars'].ra.hms.m), occs['stars'].ra.hms.s,
int(occs['stars'].dec.dms.d), np.absolute(int(occs['stars'].dec.dms.m)),
np.absolute(occs['stars'].dec.dms.s), ca1.value, occs['posa'].value,
occs['vel'].value, occs['dist'].value, occs['magG'], occs['longi']))
# plots the map
if labels:
axf.set_title(title, family='monospace', weight='bold', fontsize=22)
axf.text(0.5, -0.1, labelx, va='bottom', ha='center', rotation='horizontal', rotation_mode='anchor',
transform=axf.transAxes, family='monospace', weight='bold', fontsize=22)
filepath = os.path.join(path, '{}.{}'.format(nameimg, fmt))
plt.savefig(filepath, format=fmt, dpi=dpi)
print('{}.{} generated'.format(nameimg, fmt))
plt.clf()
plt.close()
| [
"astropy.coordinates.EarthLocation",
"numpy.sqrt",
"astropy.coordinates.GCRS",
"astropy.coordinates.get_sun",
"sora.config.input_tests.check_kwargs",
"numpy.array",
"numpy.arctan2",
"astropy.constants.R_earth.to",
"numpy.sin",
"numpy.arange",
"os.path.exists",
"numpy.repeat",
"astropy.coordi... | [((1032, 1077), 'astropy.coordinates.EarthLocation', 'EarthLocation', (['(loncen * u.deg)', '(latcen * u.deg)'], {}), '(loncen * u.deg, latcen * u.deg)\n', (1045, 1077), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((1187, 1207), 'numpy.array', 'np.array', (['y'], {'ndmin': '(1)'}), '(y, ndmin=1)\n', (1195, 1207), True, 'import numpy as np\n'), ((1216, 1236), 'numpy.array', 'np.array', (['x'], {'ndmin': '(1)'}), '(x, ndmin=1)\n', (1224, 1236), True, 'import numpy as np\n'), ((1267, 1286), 'numpy.where', 'np.where', (['(x2 >= 0.0)'], {}), '(x2 >= 0.0)\n', (1275, 1286), True, 'import numpy as np\n'), ((1295, 1309), 'numpy.sqrt', 'np.sqrt', (['x2[a]'], {}), '(x2[a])\n', (1302, 1309), True, 'import numpy as np\n'), ((1428, 1459), 'astropy.coordinates.SkyOffsetFrame', 'SkyOffsetFrame', ([], {'origin': 'gcrs_cen'}), '(origin=gcrs_cen)\n', (1442, 1459), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((2935, 2980), 'astropy.coordinates.EarthLocation', 'EarthLocation', (['(loncen * u.deg)', '(latcen * u.deg)'], {}), '(loncen * u.deg, latcen * u.deg)\n', (2948, 2980), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((3023, 3045), 'numpy.array', 'np.array', (['lon'], {'ndmin': '(1)'}), '(lon, ndmin=1)\n', (3031, 3045), True, 'import numpy as np\n'), ((3056, 3078), 'numpy.array', 'np.array', (['lat'], {'ndmin': '(1)'}), '(lat, ndmin=1)\n', (3064, 3078), True, 'import numpy as np\n'), ((3090, 3145), 'astropy.coordinates.EarthLocation', 'EarthLocation', (['(lon * u.deg)', '(lat * u.deg)'], {'height': '(0 * u.m)'}), '(lon * u.deg, lat * u.deg, height=0 * u.m)\n', (3103, 3145), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((10728, 10791), 'sora.config.input_tests.check_kwargs', 'input_tests.check_kwargs', (['kwargs'], {'allowed_kwargs': 'allowed_kwargs'}), '(kwargs, allowed_kwargs=allowed_kwargs)\n', (10752, 10791), False, 'from sora.config import input_tests\n'), ((15436, 15506), 'astropy.coordinates.GCRS', 'GCRS', (["occs['stars'].ra", "occs['stars'].dec", '(1 * u.R_earth)'], {'obstime': 'data'}), "(occs['stars'].ra, occs['stars'].dec, 1 * u.R_earth, obstime=data)\n", (15440, 15506), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((16173, 16274), 'cartopy.crs.Orthographic', 'ccrs.Orthographic', ([], {'central_longitude': 'center_map.lon.value', 'central_latitude': 'center_map.lat.value'}), '(central_longitude=center_map.lon.value, central_latitude=\n center_map.lat.value)\n', (16190, 16274), True, 'import cartopy.crs as ccrs\n'), ((17621, 17682), 'cartopy.feature.NaturalEarthFeature', 'cfeature.NaturalEarthFeature', (['"""physical"""', '"""ocean"""', 'resolution'], {}), "('physical', 'ocean', resolution)\n", (17649, 17682), True, 'import cartopy.feature as cfeature\n'), ((17694, 17754), 'cartopy.feature.NaturalEarthFeature', 'cfeature.NaturalEarthFeature', (['"""physical"""', '"""land"""', 'resolution'], {}), "('physical', 'land', resolution)\n", (17722, 17754), True, 'import cartopy.feature as cfeature\n'), ((17768, 17841), 'cartopy.feature.NaturalEarthFeature', 'cfeature.NaturalEarthFeature', (['"""cultural"""', '"""admin_0_countries"""', 'resolution'], {}), "('cultural', 'admin_0_countries', resolution)\n", (17796, 17841), True, 'import cartopy.feature as cfeature\n'), ((19401, 19414), 'astropy.coordinates.get_sun', 'get_sun', (['data'], {}), '(data)\n', (19408, 19414), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((19569, 19648), 'cartopy.crs.Orthographic', 'ccrs.Orthographic', ([], {'central_longitude': '(pole_lon + 180)', 'central_latitude': '(-pole_lat)'}), '(central_longitude=pole_lon + 180, central_latitude=-pole_lat)\n', (19586, 19648), True, 'import cartopy.crs as ccrs\n'), ((20484, 20503), 'astropy.coordinates.Angle', 'Angle', (["occs['posa']"], {}), "(occs['posa'])\n", (20489, 20503), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((21401, 21423), 'numpy.where', 'np.where', (['(lon1 < 1e+30)'], {}), '(lon1 < 1e+30)\n', (21409, 21423), True, 'import numpy as np\n'), ((21505, 21527), 'numpy.where', 'np.where', (['(lon1 > 1e+30)'], {}), '(lon1 > 1e+30)\n', (21513, 21527), True, 'import numpy as np\n'), ((21874, 21896), 'numpy.where', 'np.where', (['(lon2 < 1e+30)'], {}), '(lon2 < 1e+30)\n', (21882, 21896), True, 'import numpy as np\n'), ((21978, 22000), 'numpy.where', 'np.where', (['(lon2 > 1e+30)'], {}), '(lon2 > 1e+30)\n', (21986, 22000), True, 'import numpy as np\n'), ((26273, 26296), 'numpy.where', 'np.where', (['(clon1 < 1e+30)'], {}), '(clon1 < 1e+30)\n', (26281, 26296), True, 'import numpy as np\n'), ((26898, 26922), 'numpy.where', 'np.where', (['(loncen < 1e+30)'], {}), '(loncen < 1e+30)\n', (26906, 26922), True, 'import numpy as np\n'), ((31007, 31049), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filepath'], {'format': 'fmt', 'dpi': 'dpi'}), '(filepath, format=fmt, dpi=dpi)\n', (31018, 31049), True, 'import matplotlib.pyplot as plt\n'), ((31104, 31113), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (31111, 31113), True, 'import matplotlib.pyplot as plt\n'), ((31118, 31129), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (31127, 31129), True, 'import matplotlib.pyplot as plt\n'), ((989, 1010), 'astropy.constants.R_earth.to', 'const.R_earth.to', (['u.m'], {}), '(u.m)\n', (1005, 1010), True, 'import astropy.constants as const\n'), ((1158, 1176), 'astropy.coordinates.GCRS', 'GCRS', ([], {'obstime': 'time'}), '(obstime=time)\n', (1162, 1176), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((3209, 3240), 'astropy.coordinates.SkyOffsetFrame', 'SkyOffsetFrame', ([], {'origin': 'itrs_cen'}), '(origin=itrs_cen)\n', (3223, 3240), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((10954, 11013), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['coord'], {'frame': '"""icrs"""', 'unit': '(u.hourangle, u.degree)'}), "(coord, frame='icrs', unit=(u.hourangle, u.degree))\n", (10962, 11013), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((11180, 11190), 'astropy.time.Time', 'Time', (['time'], {}), '(time)\n', (11184, 11190), False, 'from astropy.time import Time\n'), ((13257, 13277), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (13271, 13277), False, 'import os\n'), ((13697, 13724), 'astropy.coordinates.EarthLocation', 'EarthLocation', (['*chord_geo.T'], {}), '(*chord_geo.T)\n', (13710, 13724), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((15548, 15566), 'astropy.coordinates.ITRS', 'ITRS', ([], {'obstime': 'data'}), '(obstime=data)\n', (15552, 15566), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((16299, 16330), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': 'projection'}), '(projection=projection)\n', (16307, 16330), True, 'import matplotlib.pyplot as plt\n'), ((16355, 16418), 'matplotlib.pyplot.axes', 'plt.axes', (['[-0.001, -0.001, 1.002, 1.002]'], {'projection': 'projection'}), '([-0.001, -0.001, 1.002, 1.002], projection=projection)\n', (16363, 16418), True, 'import matplotlib.pyplot as plt\n'), ((16501, 16522), 'astropy.constants.R_earth.to', 'const.R_earth.to', (['u.m'], {}), '(u.m)\n', (16517, 16522), True, 'import astropy.constants as const\n'), ((19102, 19187), 'cartopy.feature.NaturalEarthFeature', 'cfeature.NaturalEarthFeature', (['"""cultural"""', '"""admin_1_states_provinces"""', 'resolution'], {}), "('cultural', 'admin_1_states_provinces', resolution\n )\n", (19130, 19187), True, 'import cartopy.feature as cfeature\n'), ((20434, 20473), 'numpy.concatenate', 'np.concatenate', (['(vec, -vec[1:])'], {'axis': '(0)'}), '((vec, -vec[1:]), axis=0)\n', (20448, 20473), True, 'import numpy as np\n'), ((20943, 20963), 'astropy.coordinates.ITRS', 'ITRS', ([], {'obstime': 'datas1'}), '(obstime=datas1)\n', (20947, 20963), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((22406, 22428), 'numpy.where', 'np.where', (['(lon1 < 1e+30)'], {}), '(lon1 < 1e+30)\n', (22414, 22428), True, 'import numpy as np\n'), ((22683, 22723), 'numpy.sqrt', 'np.sqrt', (['((xt - ax) ** 2 + (yt - by) ** 2)'], {}), '((xt - ax) ** 2 + (yt - by) ** 2)\n', (22690, 22723), True, 'import numpy as np\n'), ((22755, 22793), 'numpy.arctan2', 'np.arctan2', (['(yt - by)[k]', '(xt - ax)[k]'], {}), '((yt - by)[k], (xt - ax)[k])\n', (22765, 22793), True, 'import numpy as np\n'), ((23035, 23057), 'numpy.where', 'np.where', (['(lon1 < 1e+30)'], {}), '(lon1 < 1e+30)\n', (23043, 23057), True, 'import numpy as np\n'), ((23521, 23543), 'numpy.where', 'np.where', (['(lon1 < 1e+30)'], {}), '(lon1 < 1e+30)\n', (23529, 23543), True, 'import numpy as np\n'), ((23881, 23903), 'numpy.where', 'np.where', (['(lon2 < 1e+30)'], {}), '(lon2 < 1e+30)\n', (23889, 23903), True, 'import numpy as np\n'), ((24257, 24279), 'numpy.where', 'np.where', (['(lon1 < 1e+30)'], {}), '(lon1 < 1e+30)\n', (24265, 24279), True, 'import numpy as np\n'), ((24567, 24589), 'numpy.where', 'np.where', (['(lon2 < 1e+30)'], {}), '(lon2 < 1e+30)\n', (24575, 24589), True, 'import numpy as np\n'), ((24943, 24965), 'numpy.where', 'np.where', (['(lon1 < 1e+30)'], {}), '(lon1 < 1e+30)\n', (24951, 24965), True, 'import numpy as np\n'), ((25255, 25277), 'numpy.where', 'np.where', (['(lon2 < 1e+30)'], {}), '(lon2 < 1e+30)\n', (25263, 25277), True, 'import numpy as np\n'), ((26080, 26100), 'astropy.coordinates.ITRS', 'ITRS', ([], {'obstime': 'datas2'}), '(obstime=datas2)\n', (26084, 26100), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((26503, 26533), 'numpy.repeat', 'np.repeat', (["occs['stars'].ra", '(1)'], {}), "(occs['stars'].ra, 1)\n", (26512, 26533), True, 'import numpy as np\n'), ((26535, 26566), 'numpy.repeat', 'np.repeat', (["occs['stars'].dec", '(1)'], {}), "(occs['stars'].dec, 1)\n", (26544, 26566), True, 'import numpy as np\n'), ((26661, 26679), 'astropy.coordinates.ITRS', 'ITRS', ([], {'obstime': 'data'}), '(obstime=data)\n', (26665, 26679), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((29071, 29142), 'astropy.coordinates.EarthLocation.from_geodetic', 'EarthLocation.from_geodetic', (['sites[site][0]', 'sites[site][1]', '(0.0 * u.km)'], {}), '(sites[site][0], sites[site][1], 0.0 * u.km)\n', (29098, 29142), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((30468, 30504), 'numpy.absolute', 'np.absolute', (["occs['stars'].dec.dms.s"], {}), "(occs['stars'].dec.dms.s)\n", (30479, 30504), True, 'import numpy as np\n'), ((1636, 1732), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(x * u.m)', '(y * u.m)', '(z * u.m)'], {'representation_type': '"""cartesian"""', 'frame': 'center_frame[a]'}), "(x * u.m, y * u.m, z * u.m, representation_type='cartesian', frame=\n center_frame[a])\n", (1644, 1732), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((1911, 1951), 'astropy.coordinates.EarthLocation', 'EarthLocation', (['n_site.lon', 'n_site.lat', '(0)'], {}), '(n_site.lon, n_site.lat, 0)\n', (1924, 1951), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((13497, 13525), 'numpy.array', 'np.array', (['chord_geo'], {'ndmin': '(2)'}), '(chord_geo, ndmin=2)\n', (13505, 13525), True, 'import numpy as np\n'), ((13818, 13849), 'os.path.isfile', 'os.path.isfile', (["kwargs['sites']"], {}), "(kwargs['sites'])\n", (13832, 13849), False, 'import os\n'), ((13870, 14046), 'numpy.loadtxt', 'np.loadtxt', (["kwargs['sites']"], {'dtype': "{'names': ('name', 'lon', 'lat', 'offx', 'offy', 'color'), 'formats': (\n 'S30', 'f8', 'f8', 'f8', 'f8', 'S30')}", 'delimiter': '""","""', 'ndmin': '(1)'}), "(kwargs['sites'], dtype={'names': ('name', 'lon', 'lat', 'offx',\n 'offy', 'color'), 'formats': ('S30', 'f8', 'f8', 'f8', 'f8', 'S30')},\n delimiter=',', ndmin=1)\n", (13880, 14046), True, 'import numpy as np\n'), ((14563, 14598), 'os.path.isfile', 'os.path.isfile', (["kwargs['countries']"], {}), "(kwargs['countries'])\n", (14577, 14598), False, 'import os\n'), ((14619, 14751), 'numpy.loadtxt', 'np.loadtxt', (["kwargs['countries']"], {'dtype': "{'names': ('name', 'lon', 'lat'), 'formats': ('S30', 'f8', 'f8')}", 'delimiter': '""","""', 'ndmin': '(1)'}), "(kwargs['countries'], dtype={'names': ('name', 'lon', 'lat'),\n 'formats': ('S30', 'f8', 'f8')}, delimiter=',', ndmin=1)\n", (14629, 14751), True, 'import numpy as np\n'), ((15123, 15143), 'numpy.sin', 'np.sin', (["occs['posa']"], {}), "(occs['posa'])\n", (15129, 15143), True, 'import numpy as np\n'), ((15153, 15173), 'numpy.cos', 'np.cos', (["occs['posa']"], {}), "(occs['posa'])\n", (15159, 15173), True, 'import numpy as np\n'), ((19292, 19327), 'numpy.arange', 'np.arange', (['(-180)', '(180.001)', 'meridians'], {}), '(-180, 180.001, meridians)\n', (19301, 19327), True, 'import numpy as np\n'), ((19335, 19368), 'numpy.arange', 'np.arange', (['(-90)', '(90.001)', 'parallels'], {}), '(-90, 90.001, parallels)\n', (19344, 19368), True, 'import numpy as np\n'), ((19856, 19874), 'numpy.cos', 'np.cos', (['(18 * u.deg)'], {}), '(18 * u.deg)\n', (19862, 19874), True, 'import numpy as np\n'), ((19880, 19898), 'numpy.cos', 'np.cos', (['(18 * u.deg)'], {}), '(18 * u.deg)\n', (19886, 19898), True, 'import numpy as np\n'), ((21084, 21094), 'numpy.sin', 'np.sin', (['pa'], {}), '(pa)\n', (21090, 21094), True, 'import numpy as np\n'), ((21121, 21135), 'numpy.cos', 'np.cos', (['paplus'], {}), '(paplus)\n', (21127, 21135), True, 'import numpy as np\n'), ((21151, 21161), 'numpy.cos', 'np.cos', (['pa'], {}), '(pa)\n', (21157, 21161), True, 'import numpy as np\n'), ((21188, 21202), 'numpy.sin', 'np.sin', (['paplus'], {}), '(paplus)\n', (21194, 21202), True, 'import numpy as np\n'), ((21228, 21242), 'numpy.sin', 'np.sin', (['paplus'], {}), '(paplus)\n', (21234, 21242), True, 'import numpy as np\n'), ((21267, 21281), 'numpy.cos', 'np.cos', (['paplus'], {}), '(paplus)\n', (21273, 21281), True, 'import numpy as np\n'), ((21465, 21480), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (21478, 21480), True, 'import cartopy.crs as ccrs\n'), ((21701, 21715), 'numpy.sin', 'np.sin', (['paplus'], {}), '(paplus)\n', (21707, 21715), True, 'import numpy as np\n'), ((21740, 21754), 'numpy.cos', 'np.cos', (['paplus'], {}), '(paplus)\n', (21746, 21754), True, 'import numpy as np\n'), ((21938, 21953), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (21951, 21953), True, 'import cartopy.crs as ccrs\n'), ((25488, 25527), 'numpy.concatenate', 'np.concatenate', (['(vec, -vec[1:])'], {'axis': '(0)'}), '((vec, -vec[1:]), axis=0)\n', (25502, 25527), True, 'import numpy as np\n'), ((25549, 25559), 'numpy.sin', 'np.sin', (['pa'], {}), '(pa)\n', (25555, 25559), True, 'import numpy as np\n'), ((25586, 25600), 'numpy.cos', 'np.cos', (['paplus'], {}), '(paplus)\n', (25592, 25600), True, 'import numpy as np\n'), ((25617, 25627), 'numpy.cos', 'np.cos', (['pa'], {}), '(pa)\n', (25623, 25627), True, 'import numpy as np\n'), ((25654, 25668), 'numpy.sin', 'np.sin', (['paplus'], {}), '(paplus)\n', (25660, 25668), True, 'import numpy as np\n'), ((26345, 26360), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (26358, 26360), True, 'import cartopy.crs as ccrs\n'), ((27135, 27243), 'matplotlib.pyplot.plot', 'plt.plot', (['xp', 'yp', '"""o"""'], {'color': 'ptcolor', 'clip_on': '(False)', 'markersize': '(mapsize[0].value * pscale * 24.0 / 46.0)'}), "(xp, yp, 'o', color=ptcolor, clip_on=False, markersize=mapsize[0].\n value * pscale * 24.0 / 46.0)\n", (27143, 27243), True, 'import matplotlib.pyplot as plt\n'), ((1765, 1783), 'astropy.coordinates.GCRS', 'GCRS', ([], {'obstime': 'time'}), '(obstime=time)\n', (1769, 1783), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((1827, 1845), 'astropy.coordinates.ITRS', 'ITRS', ([], {'obstime': 'time'}), '(obstime=time)\n', (1831, 1845), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((2053, 2071), 'astropy.coordinates.GCRS', 'GCRS', ([], {'obstime': 'time'}), '(obstime=time)\n', (2057, 2071), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((15285, 15309), 'numpy.absolute', 'np.absolute', (["occs['vel']"], {}), "(occs['vel'])\n", (15296, 15309), True, 'import numpy as np\n'), ((15855, 15910), 'astropy.coordinates.EarthLocation.from_geodetic', 'EarthLocation.from_geodetic', (["*kwargs['centerproj']", '(0.0)'], {}), "(*kwargs['centerproj'], 0.0)\n", (15882, 15910), False, 'from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun\n'), ((16728, 16747), 'numpy.absolute', 'np.absolute', (['limits'], {}), '(limits)\n', (16739, 16747), True, 'import numpy as np\n'), ((19668, 19690), 'numpy.arange', 'np.arange', (['(0)', '(361)', '(0.5)'], {}), '(0, 361, 0.5)\n', (19677, 19690), True, 'import numpy as np\n'), ((19719, 19741), 'numpy.arange', 'np.arange', (['(0)', '(361)', '(0.5)'], {}), '(0, 361, 0.5)\n', (19728, 19741), True, 'import numpy as np\n'), ((20376, 20406), 'numpy.absolute', 'np.absolute', (["occs['vel'].value"], {}), "(occs['vel'].value)\n", (20387, 20406), True, 'import numpy as np\n'), ((22226, 22240), 'numpy.sin', 'np.sin', (['paplus'], {}), '(paplus)\n', (22232, 22240), True, 'import numpy as np\n'), ((22264, 22278), 'numpy.cos', 'np.cos', (['paplus'], {}), '(paplus)\n', (22270, 22278), True, 'import numpy as np\n'), ((22474, 22489), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (22487, 22489), True, 'import cartopy.crs as ccrs\n'), ((22812, 22823), 'numpy.sin', 'np.sin', (['ang'], {}), '(ang)\n', (22818, 22823), True, 'import numpy as np\n'), ((22855, 22869), 'numpy.sin', 'np.sin', (['paplus'], {}), '(paplus)\n', (22861, 22869), True, 'import numpy as np\n'), ((22893, 22907), 'numpy.cos', 'np.cos', (['paplus'], {}), '(paplus)\n', (22899, 22907), True, 'import numpy as np\n'), ((23103, 23118), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (23116, 23118), True, 'import cartopy.crs as ccrs\n'), ((23316, 23330), 'numpy.sin', 'np.sin', (['paplus'], {}), '(paplus)\n', (23322, 23330), True, 'import numpy as np\n'), ((23379, 23393), 'numpy.cos', 'np.cos', (['paplus'], {}), '(paplus)\n', (23385, 23393), True, 'import numpy as np\n'), ((23595, 23610), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (23608, 23610), True, 'import cartopy.crs as ccrs\n'), ((23676, 23690), 'numpy.sin', 'np.sin', (['paplus'], {}), '(paplus)\n', (23682, 23690), True, 'import numpy as np\n'), ((23739, 23753), 'numpy.cos', 'np.cos', (['paplus'], {}), '(paplus)\n', (23745, 23753), True, 'import numpy as np\n'), ((23955, 23970), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (23968, 23970), True, 'import cartopy.crs as ccrs\n'), ((24077, 24091), 'numpy.sin', 'np.sin', (['paplus'], {}), '(paplus)\n', (24083, 24091), True, 'import numpy as np\n'), ((24115, 24129), 'numpy.cos', 'np.cos', (['paplus'], {}), '(paplus)\n', (24121, 24129), True, 'import numpy as np\n'), ((24331, 24346), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (24344, 24346), True, 'import cartopy.crs as ccrs\n'), ((24387, 24401), 'numpy.sin', 'np.sin', (['paplus'], {}), '(paplus)\n', (24393, 24401), True, 'import numpy as np\n'), ((24425, 24439), 'numpy.cos', 'np.cos', (['paplus'], {}), '(paplus)\n', (24431, 24439), True, 'import numpy as np\n'), ((24641, 24656), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (24654, 24656), True, 'import cartopy.crs as ccrs\n'), ((24762, 24776), 'numpy.sin', 'np.sin', (['paplus'], {}), '(paplus)\n', (24768, 24776), True, 'import numpy as np\n'), ((24801, 24815), 'numpy.cos', 'np.cos', (['paplus'], {}), '(paplus)\n', (24807, 24815), True, 'import numpy as np\n'), ((25017, 25032), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (25030, 25032), True, 'import cartopy.crs as ccrs\n'), ((25074, 25088), 'numpy.sin', 'np.sin', (['paplus'], {}), '(paplus)\n', (25080, 25088), True, 'import numpy as np\n'), ((25113, 25127), 'numpy.cos', 'np.cos', (['paplus'], {}), '(paplus)\n', (25119, 25127), True, 'import numpy as np\n'), ((25329, 25344), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (25342, 25344), True, 'import cartopy.crs as ccrs\n'), ((25421, 25451), 'numpy.absolute', 'np.absolute', (["occs['vel'].value"], {}), "(occs['vel'].value)\n", (25432, 25451), True, 'import numpy as np\n'), ((26746, 26756), 'numpy.sin', 'np.sin', (['pa'], {}), '(pa)\n', (26752, 26756), True, 'import numpy as np\n'), ((26790, 26800), 'numpy.cos', 'np.cos', (['pa'], {}), '(pa)\n', (26796, 26800), True, 'import numpy as np\n'), ((26996, 27011), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (27009, 27011), True, 'import cartopy.crs as ccrs\n'), ((28903, 28918), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (28916, 28918), True, 'import cartopy.crs as ccrs\n'), ((29195, 29210), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (29208, 29210), True, 'import cartopy.crs as ccrs\n'), ((29919, 29943), 'numpy.absolute', 'np.absolute', (["occs['vel']"], {}), "(occs['vel'])\n", (29930, 29943), True, 'import numpy as np\n'), ((15784, 15814), 'numpy.array', 'np.array', (["kwargs['centerproj']"], {}), "(kwargs['centerproj'])\n", (15792, 15814), True, 'import numpy as np\n'), ((23292, 23306), 'numpy.sin', 'np.sin', (['paplus'], {}), '(paplus)\n', (23298, 23306), True, 'import numpy as np\n'), ((23355, 23369), 'numpy.cos', 'np.cos', (['paplus'], {}), '(paplus)\n', (23361, 23369), True, 'import numpy as np\n'), ((23652, 23666), 'numpy.sin', 'np.sin', (['paplus'], {}), '(paplus)\n', (23658, 23666), True, 'import numpy as np\n'), ((23715, 23729), 'numpy.cos', 'np.cos', (['paplus'], {}), '(paplus)\n', (23721, 23729), True, 'import numpy as np\n'), ((27362, 27379), 'numpy.cos', 'np.cos', (['(h * u.deg)'], {}), '(h * u.deg)\n', (27368, 27379), True, 'import numpy as np\n'), ((27389, 27406), 'numpy.cos', 'np.cos', (['(h * u.deg)'], {}), '(h * u.deg)\n', (27395, 27406), True, 'import numpy as np\n'), ((27528, 27543), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (27541, 27543), True, 'import cartopy.crs as ccrs\n'), ((27683, 27710), 'numpy.sin', 'np.sin', (['(paplus + 90 * u.deg)'], {}), '(paplus + 90 * u.deg)\n', (27689, 27710), True, 'import numpy as np\n'), ((27707, 27727), 'numpy.sign', 'np.sign', (["occs['vel']"], {}), "(occs['vel'])\n", (27714, 27727), True, 'import numpy as np\n'), ((27761, 27788), 'numpy.cos', 'np.cos', (['(paplus + 90 * u.deg)'], {}), '(paplus + 90 * u.deg)\n', (27767, 27788), True, 'import numpy as np\n'), ((27785, 27805), 'numpy.sign', 'np.sign', (["occs['vel']"], {}), "(occs['vel'])\n", (27792, 27805), True, 'import numpy as np\n'), ((28212, 28239), 'numpy.sin', 'np.sin', (['(paplus + 90 * u.deg)'], {}), '(paplus + 90 * u.deg)\n', (28218, 28239), True, 'import numpy as np\n'), ((28242, 28262), 'numpy.sign', 'np.sign', (["occs['vel']"], {}), "(occs['vel'])\n", (28249, 28262), True, 'import numpy as np\n'), ((28305, 28332), 'numpy.cos', 'np.cos', (['(paplus + 90 * u.deg)'], {}), '(paplus + 90 * u.deg)\n', (28311, 28332), True, 'import numpy as np\n'), ((28335, 28355), 'numpy.sign', 'np.sign', (["occs['vel']"], {}), "(occs['vel'])\n", (28342, 28355), True, 'import numpy as np\n'), ((15195, 15215), 'numpy.cos', 'np.cos', (["occs['posa']"], {}), "(occs['posa'])\n", (15201, 15215), True, 'import numpy as np\n'), ((15227, 15247), 'numpy.sin', 'np.sin', (["occs['posa']"], {}), "(occs['posa'])\n", (15233, 15247), True, 'import numpy as np\n')] |
# calculation of time (in seconds) that elapsed between the stimulation is applied and the VAS
# score is register
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# set path
path = '../data/data_sub.xlsx'
dataFrame = pd.read_excel(path, header=2, sheet_name='trials_noTime')
headers = dataFrame.columns
vasSubjectNeck1 = []
vasSubjectNeck2 = []
vasSubjectNeck3 = []
vasSubjectNeck4 = []
vasSubjectNeck5 = []
vasSubjectNeck6 = []
vasSubjectForearm1 = []
vasSubjectForearm2 = []
vasSubjectForearm3 = []
vasSubjectForearm4 = []
vasSubjectForearm5 = []
vasSubjectForearm6 = []
vasSubjectTactor1 = []
vasSubjectTactor2 = []
vasSubjectTactor3 = []
vasSubjectTactor4 = []
vasSubjectTactor5 = []
vasSubjectTactor6 = []
trials = 5
fields = 6
subjects = 9
temp1 = []
temp2 = []
temp3 = []
temp4 = []
temp5 = []
temp6 = []
meanNeck = []
medianNeck = []
meanForearm = []
medianForearm = []
meanTactor = []
medianTactor = []
speedNeck = []
speedForearm = []
speedTactor = []
for t in range(0, subjects):
for u in range(0, len(dataFrame)):
# stores the VAS score from each subject in the neck area
if dataFrame[headers[(t * fields + 1)]][u] == 3:
# vasSubjectNeck1.append([dataFrame[headers[t * fields]][u], dataFrame[headers[(t * fields) + 1]][u]])
vasSubjectNeck1.append(dataFrame[headers[t * fields]][u])
if len(vasSubjectNeck1) == trials:
temp1.append(vasSubjectNeck1)
meanNeck.append(np.mean(temp1))
medianNeck.append(np.median(temp1))
speedNeck.append(3)
vasSubjectNeck1 = []
temp1 = []
if dataFrame[headers[(t * fields) + 1]][u] == 10:
# vasSubjectNeck2.append([dataFrame[headers[t * fields]][u], dataFrame[headers[(t * fields) + 1]][u]])
vasSubjectNeck2.append(dataFrame[headers[t * fields]][u])
if len(vasSubjectNeck2) == trials:
temp2.append(vasSubjectNeck2)
meanNeck.append(np.mean(temp2))
medianNeck.append(np.median(temp2))
speedNeck.append(10)
vasSubjectNeck2 = []
temp2 = []
if dataFrame[headers[(t * fields) + 1]][u] == 30:
# vasSubjectNeck3.append([dataFrame[headers[t * fields]][u], dataFrame[headers[(t * fields) + 1]][u]])
vasSubjectNeck3.append(dataFrame[headers[t * fields]][u])
if len(vasSubjectNeck3) == trials:
temp3.append(vasSubjectNeck3)
meanNeck.append(np.mean(temp3))
medianNeck.append(np.median(temp3))
speedNeck.append(30)
vasSubjectNeck3 = []
temp3 = []
if dataFrame[headers[(t * fields) + 1]][u] == 50:
# vasSubjectNeck4.append([dataFrame[headers[t * fields]][u], dataFrame[headers[(t * fields) + 1]][u]])
vasSubjectNeck4.append(dataFrame[headers[t * fields]][u])
if len(vasSubjectNeck4) == trials:
temp4.append(vasSubjectNeck4)
meanNeck.append(np.mean(temp4))
medianNeck.append(np.median(temp4))
speedNeck.append(50)
vasSubjectNeck4 = []
temp4 = []
if dataFrame[headers[(t * fields) + 1]][u] == 100:
# vasSubjectNeck5.append([dataFrame[headers[t * fields]][u], dataFrame[headers[(t * fields) + 1]][u]])
vasSubjectNeck5.append(dataFrame[headers[t * fields]][u])
if len(vasSubjectNeck5) == trials:
temp5.append(vasSubjectNeck5)
meanNeck.append(np.mean(temp5))
medianNeck.append(np.median(temp5))
speedNeck.append(100)
vasSubjectNeck5 = []
temp5 = []
if dataFrame[headers[(t * fields) + 1]][u] == 200:
# vasSubjectNeck6.append([dataFrame[headers[t * fields]][u], dataFrame[headers[(t * fields) + 1]][u]])
vasSubjectNeck6.append(dataFrame[headers[t * fields]][u])
if len(vasSubjectNeck6) == trials:
temp6.append(vasSubjectNeck6)
meanNeck.append(np.mean(temp6))
medianNeck.append(np.median(temp6))
speedNeck.append(200)
vasSubjectNeck6 = []
temp6 = []
# stores the VAS score from each subject in the foreanr area using axidraw
if dataFrame[headers[(t * fields) + 3]][u] == 3:
vasSubjectForearm1.append(dataFrame[headers[(t * fields) + 2]][u])
if len(vasSubjectForearm1) == trials:
temp1.append(vasSubjectForearm1)
meanForearm.append(np.mean(temp1))
medianForearm.append(np.median(temp1))
speedForearm.append(3)
vasSubjectForearm1 = []
temp1 = []
if dataFrame[headers[(t * fields) + 3]][u] == 10:
vasSubjectForearm2.append(dataFrame[headers[(t * fields) + 2]][u])
if len(vasSubjectForearm2) == trials:
temp2.append(vasSubjectForearm2)
meanForearm.append(np.mean(temp2))
medianForearm.append(np.median(temp2))
speedForearm.append(10)
vasSubjectForearm2 = []
temp2 = []
if dataFrame[headers[(t * fields) + 3]][u] == 30:
vasSubjectForearm3.append(dataFrame[headers[(t * fields + 2)]][u])
if len(vasSubjectForearm3) == trials:
temp3.append(vasSubjectForearm3)
meanForearm.append(np.mean(temp3))
medianForearm.append(np.median(temp3))
speedForearm.append(30)
vasSubjectForearm3 = []
temp3 = []
if dataFrame[headers[(t * fields) + 3]][u] == 50:
vasSubjectForearm4.append(dataFrame[headers[(t * fields + 2)]][u])
if len(vasSubjectForearm4) == trials:
temp4.append(vasSubjectForearm4)
meanForearm.append(np.mean(temp4))
medianForearm.append(np.median(temp4))
speedForearm.append(50)
vasSubjectForearm4 = []
temp4 = []
if dataFrame[headers[(t * fields) + 3]][u] == 100:
vasSubjectForearm5.append(dataFrame[headers[(t * fields + 2)]][u])
if len(vasSubjectForearm5) == trials:
temp5.append(vasSubjectForearm5)
meanForearm.append(np.mean(temp5))
medianForearm.append(np.median(temp5))
speedForearm.append(100)
vasSubjectForearm5 = []
temp5 = []
if dataFrame[headers[(t * fields) + 3]][u] == 200:
vasSubjectForearm6.append(dataFrame[headers[(t * fields + 2)]][u])
if len(vasSubjectForearm6) == trials:
temp6.append(vasSubjectForearm6)
meanForearm.append(np.mean(temp6))
medianForearm.append(np.median(temp6))
speedForearm.append(200)
vasSubjectForearm6 = []
temp6 = []
# stores the VAS score from each subject in the foreanr area using tactors
if dataFrame[headers[(t * fields) + 5]][u] == 3:
vasSubjectTactor1.append(dataFrame[headers[(t * fields) + 4]][u])
if len(vasSubjectTactor1) == trials:
temp1.append(vasSubjectTactor1)
meanTactor.append(np.mean(temp1))
medianTactor.append(np.median(temp1))
speedTactor.append(3)
vasSubjectTactor1 = []
temp1 = []
if dataFrame[headers[(t * fields) + 5]][u] == 10:
vasSubjectTactor2.append(dataFrame[headers[(t * fields) + 4]][u])
if len(vasSubjectTactor2) == trials:
temp2.append(vasSubjectTactor2)
meanTactor.append(np.mean(temp2))
medianTactor.append(np.median(temp2))
speedTactor.append(10)
vasSubjectTactor2 = []
temp2 = []
if dataFrame[headers[(t * fields) + 5]][u] == 30:
vasSubjectTactor3.append(dataFrame[headers[(t * fields) + 4]][u])
if len(vasSubjectTactor3) == trials:
temp3.append(vasSubjectTactor3)
meanTactor.append(np.mean(temp3))
medianTactor.append(np.median(temp3))
speedTactor.append(30)
vasSubjectTactor3 = []
temp3 = []
if dataFrame[headers[(t * fields) + 5]][u] == 50:
vasSubjectTactor4.append(dataFrame[headers[(t * fields) + 4]][u])
if len(vasSubjectTactor4) == trials:
temp4.append(vasSubjectTactor4)
meanTactor.append(np.mean(temp4))
medianTactor.append(np.median(temp4))
speedTactor.append(50)
vasSubjectTactor4 = []
temp4 = []
if dataFrame[headers[(t * fields) + 5]][u] == 100:
vasSubjectTactor5.append(dataFrame[headers[(t * fields) + 4]][u])
if len(vasSubjectTactor5) == trials:
temp5.append(vasSubjectTactor5)
meanTactor.append(np.mean(temp5))
medianTactor.append(np.median(temp5))
speedTactor.append(100)
vasSubjectTactor5 = []
temp5 = []
if dataFrame[headers[(t * fields) + 5]][u] == 200:
vasSubjectTactor6.append(dataFrame[headers[(t * fields) + 4]][u])
if len(vasSubjectTactor6) == trials:
temp6.append(vasSubjectTactor6)
meanTactor.append(np.mean(temp6))
medianTactor.append(np.median(temp6))
speedTactor.append(200)
vasSubjectTactor6 = []
temp6 = []
# fig1 = plt.figure(1)
# plot1 = pd.DataFrame({'stimulation velocity': speedNeck, 'VAS score': meanNeck})
# sns.swarmplot(x='stimulation velocity', y='VAS score', data=plot1, size=6, color='b')
# plt.title('VAS score vs AxiDraw Neck Stimulation')
# plt.yticks((-10, -5, 0, 5, 10))
# plt.ylabel("VAS score", labelpad=-5)
# fig1.show()
print(speedForearm)
print(meanForearm)
fig2 = plt.figure(2)
plot2 = pd.DataFrame({'stimulation velocity': speedForearm, 'VAS score': medianForearm})
sns.swarmplot(x='stimulation velocity', y='VAS score', data=plot2, size=6, color='k')
plt.title('VAS score vs AxiDraw Forearm Stimulation')
plt.yticks((-10, -5, 0, 5, 10))
plt.ylabel("VAS score", labelpad=-5)
fig2.show()
fig3 = plt.figure(3)
plot3 = pd.DataFrame({'stimulation velocity': speedTactor, 'VAS score': medianTactor})
sns.swarmplot(x='stimulation velocity', y='VAS score', data=plot3, size=6, color='k')
plt.title('VAS score vs Tactor Forearm Stimulation')
plt.yticks((-10, -5, 0, 5, 10))
plt.ylabel("VAS score", labelpad=-5)
fig3.show()
| [
"numpy.mean",
"numpy.median",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"pandas.read_excel",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"seaborn.swarmplot"
] | [((264, 321), 'pandas.read_excel', 'pd.read_excel', (['path'], {'header': '(2)', 'sheet_name': '"""trials_noTime"""'}), "(path, header=2, sheet_name='trials_noTime')\n", (277, 321), True, 'import pandas as pd\n'), ((10273, 10286), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (10283, 10286), True, 'import matplotlib.pyplot as plt\n'), ((10295, 10380), 'pandas.DataFrame', 'pd.DataFrame', (["{'stimulation velocity': speedForearm, 'VAS score': medianForearm}"], {}), "({'stimulation velocity': speedForearm, 'VAS score': medianForearm}\n )\n", (10307, 10380), True, 'import pandas as pd\n'), ((10376, 10465), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""stimulation velocity"""', 'y': '"""VAS score"""', 'data': 'plot2', 'size': '(6)', 'color': '"""k"""'}), "(x='stimulation velocity', y='VAS score', data=plot2, size=6,\n color='k')\n", (10389, 10465), True, 'import seaborn as sns\n'), ((10462, 10515), 'matplotlib.pyplot.title', 'plt.title', (['"""VAS score vs AxiDraw Forearm Stimulation"""'], {}), "('VAS score vs AxiDraw Forearm Stimulation')\n", (10471, 10515), True, 'import matplotlib.pyplot as plt\n'), ((10516, 10547), 'matplotlib.pyplot.yticks', 'plt.yticks', (['(-10, -5, 0, 5, 10)'], {}), '((-10, -5, 0, 5, 10))\n', (10526, 10547), True, 'import matplotlib.pyplot as plt\n'), ((10548, 10584), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""VAS score"""'], {'labelpad': '(-5)'}), "('VAS score', labelpad=-5)\n", (10558, 10584), True, 'import matplotlib.pyplot as plt\n'), ((10605, 10618), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (10615, 10618), True, 'import matplotlib.pyplot as plt\n'), ((10627, 10705), 'pandas.DataFrame', 'pd.DataFrame', (["{'stimulation velocity': speedTactor, 'VAS score': medianTactor}"], {}), "({'stimulation velocity': speedTactor, 'VAS score': medianTactor})\n", (10639, 10705), True, 'import pandas as pd\n'), ((10706, 10795), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""stimulation velocity"""', 'y': '"""VAS score"""', 'data': 'plot3', 'size': '(6)', 'color': '"""k"""'}), "(x='stimulation velocity', y='VAS score', data=plot3, size=6,\n color='k')\n", (10719, 10795), True, 'import seaborn as sns\n'), ((10792, 10844), 'matplotlib.pyplot.title', 'plt.title', (['"""VAS score vs Tactor Forearm Stimulation"""'], {}), "('VAS score vs Tactor Forearm Stimulation')\n", (10801, 10844), True, 'import matplotlib.pyplot as plt\n'), ((10845, 10876), 'matplotlib.pyplot.yticks', 'plt.yticks', (['(-10, -5, 0, 5, 10)'], {}), '((-10, -5, 0, 5, 10))\n', (10855, 10876), True, 'import matplotlib.pyplot as plt\n'), ((10877, 10913), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""VAS score"""'], {'labelpad': '(-5)'}), "('VAS score', labelpad=-5)\n", (10887, 10913), True, 'import matplotlib.pyplot as plt\n'), ((1518, 1532), 'numpy.mean', 'np.mean', (['temp1'], {}), '(temp1)\n', (1525, 1532), True, 'import numpy as np\n'), ((1568, 1584), 'numpy.median', 'np.median', (['temp1'], {}), '(temp1)\n', (1577, 1584), True, 'import numpy as np\n'), ((2054, 2068), 'numpy.mean', 'np.mean', (['temp2'], {}), '(temp2)\n', (2061, 2068), True, 'import numpy as np\n'), ((2104, 2120), 'numpy.median', 'np.median', (['temp2'], {}), '(temp2)\n', (2113, 2120), True, 'import numpy as np\n'), ((2591, 2605), 'numpy.mean', 'np.mean', (['temp3'], {}), '(temp3)\n', (2598, 2605), True, 'import numpy as np\n'), ((2641, 2657), 'numpy.median', 'np.median', (['temp3'], {}), '(temp3)\n', (2650, 2657), True, 'import numpy as np\n'), ((3128, 3142), 'numpy.mean', 'np.mean', (['temp4'], {}), '(temp4)\n', (3135, 3142), True, 'import numpy as np\n'), ((3178, 3194), 'numpy.median', 'np.median', (['temp4'], {}), '(temp4)\n', (3187, 3194), True, 'import numpy as np\n'), ((3666, 3680), 'numpy.mean', 'np.mean', (['temp5'], {}), '(temp5)\n', (3673, 3680), True, 'import numpy as np\n'), ((3716, 3732), 'numpy.median', 'np.median', (['temp5'], {}), '(temp5)\n', (3725, 3732), True, 'import numpy as np\n'), ((4205, 4219), 'numpy.mean', 'np.mean', (['temp6'], {}), '(temp6)\n', (4212, 4219), True, 'import numpy as np\n'), ((4255, 4271), 'numpy.median', 'np.median', (['temp6'], {}), '(temp6)\n', (4264, 4271), True, 'import numpy as np\n'), ((4729, 4743), 'numpy.mean', 'np.mean', (['temp1'], {}), '(temp1)\n', (4736, 4743), True, 'import numpy as np\n'), ((4782, 4798), 'numpy.median', 'np.median', (['temp1'], {}), '(temp1)\n', (4791, 4798), True, 'import numpy as np\n'), ((5177, 5191), 'numpy.mean', 'np.mean', (['temp2'], {}), '(temp2)\n', (5184, 5191), True, 'import numpy as np\n'), ((5230, 5246), 'numpy.median', 'np.median', (['temp2'], {}), '(temp2)\n', (5239, 5246), True, 'import numpy as np\n'), ((5626, 5640), 'numpy.mean', 'np.mean', (['temp3'], {}), '(temp3)\n', (5633, 5640), True, 'import numpy as np\n'), ((5679, 5695), 'numpy.median', 'np.median', (['temp3'], {}), '(temp3)\n', (5688, 5695), True, 'import numpy as np\n'), ((6075, 6089), 'numpy.mean', 'np.mean', (['temp4'], {}), '(temp4)\n', (6082, 6089), True, 'import numpy as np\n'), ((6128, 6144), 'numpy.median', 'np.median', (['temp4'], {}), '(temp4)\n', (6137, 6144), True, 'import numpy as np\n'), ((6525, 6539), 'numpy.mean', 'np.mean', (['temp5'], {}), '(temp5)\n', (6532, 6539), True, 'import numpy as np\n'), ((6578, 6594), 'numpy.median', 'np.median', (['temp5'], {}), '(temp5)\n', (6587, 6594), True, 'import numpy as np\n'), ((6976, 6990), 'numpy.mean', 'np.mean', (['temp6'], {}), '(temp6)\n', (6983, 6990), True, 'import numpy as np\n'), ((7029, 7045), 'numpy.median', 'np.median', (['temp6'], {}), '(temp6)\n', (7038, 7045), True, 'import numpy as np\n'), ((7505, 7519), 'numpy.mean', 'np.mean', (['temp1'], {}), '(temp1)\n', (7512, 7519), True, 'import numpy as np\n'), ((7557, 7573), 'numpy.median', 'np.median', (['temp1'], {}), '(temp1)\n', (7566, 7573), True, 'import numpy as np\n'), ((7946, 7960), 'numpy.mean', 'np.mean', (['temp2'], {}), '(temp2)\n', (7953, 7960), True, 'import numpy as np\n'), ((7998, 8014), 'numpy.median', 'np.median', (['temp2'], {}), '(temp2)\n', (8007, 8014), True, 'import numpy as np\n'), ((8388, 8402), 'numpy.mean', 'np.mean', (['temp3'], {}), '(temp3)\n', (8395, 8402), True, 'import numpy as np\n'), ((8440, 8456), 'numpy.median', 'np.median', (['temp3'], {}), '(temp3)\n', (8449, 8456), True, 'import numpy as np\n'), ((8830, 8844), 'numpy.mean', 'np.mean', (['temp4'], {}), '(temp4)\n', (8837, 8844), True, 'import numpy as np\n'), ((8882, 8898), 'numpy.median', 'np.median', (['temp4'], {}), '(temp4)\n', (8891, 8898), True, 'import numpy as np\n'), ((9273, 9287), 'numpy.mean', 'np.mean', (['temp5'], {}), '(temp5)\n', (9280, 9287), True, 'import numpy as np\n'), ((9325, 9341), 'numpy.median', 'np.median', (['temp5'], {}), '(temp5)\n', (9334, 9341), True, 'import numpy as np\n'), ((9717, 9731), 'numpy.mean', 'np.mean', (['temp6'], {}), '(temp6)\n', (9724, 9731), True, 'import numpy as np\n'), ((9769, 9785), 'numpy.median', 'np.median', (['temp6'], {}), '(temp6)\n', (9778, 9785), True, 'import numpy as np\n')] |
# Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""SketchRNN training."""
import json
import os
import time
import zipfile
import model as sketch_rnn_model
import utils
import numpy as np
import requests
import six
from six.moves.urllib.request import urlretrieve
import tensorflow.compat.v1 as tf
tf.logging.set_verbosity(tf.logging.INFO)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'data_dir',
'https://github.com/hardmaru/sketch-rnn-datasets/raw/master/aaron_sheep',
'The directory in which to find the dataset specified in model hparams. '
'If data_dir starts with "http://" or "https://", the file will be fetched '
'remotely.')
tf.app.flags.DEFINE_string(
'log_root', '/tmp/sketch_rnn/models/default',
'Directory to store model checkpoints, tensorboard.')
tf.app.flags.DEFINE_boolean('resume_training', False,
'Set to true to load previous checkpoint')
tf.app.flags.DEFINE_string(
'hparams', '', 'Pass in comma-separated key=value pairs such as '
'\'save_every=40,decay_rate=0.99\' '
'(no whitespace) to be read into the HParams object defined in model.py')
PRETRAINED_MODELS_URL = ('http://download.magenta.tensorflow.org/models/'
'sketch_rnn.zip')
def reset_graph():
"""Closes the current default session and resets the graph."""
sess = tf.get_default_session()
if sess:
sess.close()
tf.reset_default_graph()
def load_env(data_dir, model_dir):
"""Loads environment for inference mode, used in jupyter notebook."""
model_params = sketch_rnn_model.get_default_hparams()
with tf.gfile.Open(os.path.join(model_dir, 'model_config.json'), 'r') as f:
model_params.parse_json(f.read())
return load_dataset(data_dir, model_params, inference_mode=True)
def load_model(model_dir):
"""Loads model for inference mode, used in jupyter notebook."""
model_params = sketch_rnn_model.get_default_hparams()
with tf.gfile.Open(os.path.join(model_dir, 'model_config.json'), 'r') as f:
model_params.parse_json(f.read())
model_params.batch_size = 1 # only sample one at a time
eval_model_params = sketch_rnn_model.copy_hparams(model_params)
eval_model_params.use_input_dropout = 0
eval_model_params.use_recurrent_dropout = 0
eval_model_params.use_output_dropout = 0
eval_model_params.is_training = 0
sample_model_params = sketch_rnn_model.copy_hparams(eval_model_params)
sample_model_params.max_seq_len = 1 # sample one point at a time
return [model_params, eval_model_params, sample_model_params]
def download_pretrained_models(models_root_dir='/tmp/sketch_rnn/models',
pretrained_models_url=PRETRAINED_MODELS_URL):
"""Download pretrained models to a temporary directory."""
tf.gfile.MakeDirs(models_root_dir)
zip_path = os.path.join(models_root_dir,
os.path.basename(pretrained_models_url))
if os.path.isfile(zip_path):
tf.logging.info('%s already exists, using cached copy', zip_path)
else:
tf.logging.info('Downloading pretrained models from %s...',
pretrained_models_url)
urlretrieve(pretrained_models_url, zip_path)
tf.logging.info('Download complete.')
tf.logging.info('Unzipping %s...', zip_path)
with zipfile.ZipFile(zip_path) as models_zip:
models_zip.extractall(models_root_dir)
tf.logging.info('Unzipping complete.')
def load_dataset(data_dir, model_params, inference_mode=False):
"""Loads the .npz file, and splits the set into train/valid/test."""
# normalizes the x and y columns using the training set.
# applies same scaling factor to valid and test set.
if isinstance(model_params.data_set, list):
datasets = model_params.data_set
else:
datasets = [model_params.data_set]
train_strokes = None
valid_strokes = None
test_strokes = None
for dataset in datasets:
if data_dir.startswith('http://') or data_dir.startswith('https://'):
data_filepath = '/'.join([data_dir, dataset])
tf.logging.info('Downloading %s', data_filepath)
response = requests.get(data_filepath)
data = np.load(six.BytesIO(response.content), encoding='latin1')
else:
data_filepath = os.path.join(data_dir, dataset)
data = np.load(data_filepath, encoding='latin1', allow_pickle=True)
tf.logging.info('Loaded {}/{}/{} from {}'.format(
len(data['train']), len(data['valid']), len(data['test']),
dataset))
if train_strokes is None:
train_strokes = data['train']
valid_strokes = data['valid']
test_strokes = data['test']
else:
train_strokes = np.concatenate((train_strokes, data['train']))
valid_strokes = np.concatenate((valid_strokes, data['valid']))
test_strokes = np.concatenate((test_strokes, data['test']))
all_strokes = np.concatenate((train_strokes, valid_strokes, test_strokes))
num_points = 0
for stroke in all_strokes:
num_points += len(stroke)
avg_len = num_points / len(all_strokes)
tf.logging.info('Dataset combined: {} ({}/{}/{}), avg len {}'.format(
len(all_strokes), len(train_strokes), len(valid_strokes),
len(test_strokes), int(avg_len)))
# calculate the max strokes we need.
max_seq_len = utils.get_max_len(all_strokes)
# overwrite the hps with this calculation.
model_params.max_seq_len = max_seq_len
tf.logging.info('model_params.max_seq_len %i.', model_params.max_seq_len)
eval_model_params = sketch_rnn_model.copy_hparams(model_params)
eval_model_params.use_input_dropout = 0
eval_model_params.use_recurrent_dropout = 0
eval_model_params.use_output_dropout = 0
eval_model_params.is_training = 1
if inference_mode:
eval_model_params.batch_size = 1
eval_model_params.is_training = 0
sample_model_params = sketch_rnn_model.copy_hparams(eval_model_params)
sample_model_params.batch_size = 1 # only sample one at a time
sample_model_params.max_seq_len = 1 # sample one point at a time
train_set = utils.DataLoader(
train_strokes,
model_params.batch_size,
max_seq_length=model_params.max_seq_len,
random_scale_factor=model_params.random_scale_factor,
augment_stroke_prob=model_params.augment_stroke_prob)
normalizing_scale_factor = train_set.calculate_normalizing_scale_factor()
train_set.normalize(normalizing_scale_factor)
valid_set = utils.DataLoader(valid_strokes,
eval_model_params.batch_size,
max_seq_length=eval_model_params.max_seq_len,
random_scale_factor=0.0,
augment_stroke_prob=0.0)
valid_set.normalize(normalizing_scale_factor)
test_set = utils.DataLoader(test_strokes,
eval_model_params.batch_size,
max_seq_length=eval_model_params.max_seq_len,
random_scale_factor=0.0,
augment_stroke_prob=0.0)
test_set.normalize(normalizing_scale_factor)
tf.logging.info('normalizing_scale_factor %4.4f.',
normalizing_scale_factor)
result = [
train_set, valid_set, test_set, model_params, eval_model_params,
sample_model_params
]
return result
def evaluate_model(sess, model, data_set):
"""Returns the average weighted cost, reconstruction cost and KL cost."""
total_cost = 0.0
total_r_cost = 0.0
total_kl_cost = 0.0
for batch in range(data_set.num_batches):
unused_orig_x, x, s = data_set.get_batch(batch)
feed = {model.input_data: x, model.sequence_lengths: s}
(cost, r_cost,
kl_cost) = sess.run([model.cost, model.r_cost, model.kl_cost], feed)
total_cost += cost
total_r_cost += r_cost
total_kl_cost += kl_cost
total_cost /= (data_set.num_batches)
total_r_cost /= (data_set.num_batches)
total_kl_cost /= (data_set.num_batches)
return (total_cost, total_r_cost, total_kl_cost)
def load_checkpoint(sess, checkpoint_path):
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state(checkpoint_path)
tf.logging.info('Loading model %s.', ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
def save_model(sess, model_save_path, global_step):
saver = tf.train.Saver(tf.global_variables())
checkpoint_path = os.path.join(model_save_path, 'vector')
tf.logging.info('saving model %s.', checkpoint_path)
tf.logging.info('global_step %i.', global_step)
saver.save(sess, checkpoint_path, global_step=global_step)
def train(sess, model, eval_model, train_set, valid_set, test_set):
"""Train a sketch-rnn model."""
# Setup summary writer.
summary_writer = tf.summary.FileWriter(FLAGS.log_root)
# Calculate trainable params.
t_vars = tf.trainable_variables()
count_t_vars = 0
for var in t_vars:
num_param = np.prod(var.get_shape().as_list())
count_t_vars += num_param
tf.logging.info('%s %s %i', var.name, str(var.get_shape()), num_param)
tf.logging.info('Total trainable variables %i.', count_t_vars)
model_summ = tf.summary.Summary()
model_summ.value.add(tag='Num_Trainable_Params',
simple_value=float(count_t_vars))
summary_writer.add_summary(model_summ, 0)
summary_writer.flush()
# setup eval stats
best_valid_cost = 100000000.0 # set a large init value
valid_cost = 0.0
# main train loop
hps = model.hps
start = time.time()
for _ in range(hps.num_steps):
step = sess.run(model.global_step)
curr_learning_rate = ((hps.learning_rate - hps.min_learning_rate) *
(hps.decay_rate)**step + hps.min_learning_rate)
curr_kl_weight = (hps.kl_weight -
(hps.kl_weight - hps.kl_weight_start) *
(hps.kl_decay_rate)**step)
_, x, s = train_set.random_batch()
feed = {
model.input_data: x,
model.sequence_lengths: s,
model.lr: curr_learning_rate,
model.kl_weight: curr_kl_weight
}
(train_cost, r_cost, kl_cost, _, train_step, _) = sess.run([
model.cost, model.r_cost, model.kl_cost, model.final_state,
model.global_step, model.train_op
], feed)
if step % 20 == 0 and step > 0:
end = time.time()
time_taken = end - start
cost_summ = tf.summary.Summary()
cost_summ.value.add(tag='Train_Cost',
simple_value=float(train_cost))
reconstr_summ = tf.summary.Summary()
reconstr_summ.value.add(tag='Train_Reconstr_Cost',
simple_value=float(r_cost))
kl_summ = tf.summary.Summary()
kl_summ.value.add(tag='Train_KL_Cost', simple_value=float(kl_cost))
lr_summ = tf.summary.Summary()
lr_summ.value.add(tag='Learning_Rate',
simple_value=float(curr_learning_rate))
kl_weight_summ = tf.summary.Summary()
kl_weight_summ.value.add(tag='KL_Weight',
simple_value=float(curr_kl_weight))
time_summ = tf.summary.Summary()
time_summ.value.add(tag='Time_Taken_Train',
simple_value=float(time_taken))
output_format = ('step: %d, lr: %.6f, klw: %0.4f, cost: %.4f, '
'recon: %.4f, kl: %.4f, train_time_taken: %.4f')
output_values = (step, curr_learning_rate, curr_kl_weight,
train_cost, r_cost, kl_cost, time_taken)
output_log = output_format % output_values
tf.logging.info(output_log)
summary_writer.add_summary(cost_summ, train_step)
summary_writer.add_summary(reconstr_summ, train_step)
summary_writer.add_summary(kl_summ, train_step)
summary_writer.add_summary(lr_summ, train_step)
summary_writer.add_summary(kl_weight_summ, train_step)
summary_writer.add_summary(time_summ, train_step)
summary_writer.flush()
start = time.time()
if step % hps.save_every == 0 and step > 0:
(valid_cost, valid_r_cost,
valid_kl_cost) = evaluate_model(sess, eval_model, valid_set)
end = time.time()
time_taken_valid = end - start
start = time.time()
valid_cost_summ = tf.summary.Summary()
valid_cost_summ.value.add(tag='Valid_Cost',
simple_value=float(valid_cost))
valid_reconstr_summ = tf.summary.Summary()
valid_reconstr_summ.value.add(tag='Valid_Reconstr_Cost',
simple_value=float(valid_r_cost))
valid_kl_summ = tf.summary.Summary()
valid_kl_summ.value.add(tag='Valid_KL_Cost',
simple_value=float(valid_kl_cost))
valid_time_summ = tf.summary.Summary()
valid_time_summ.value.add(tag='Time_Taken_Valid',
simple_value=float(time_taken_valid))
output_format = (
'best_valid_cost: %0.4f, valid_cost: %.4f, valid_recon: '
'%.4f, valid_kl: %.4f, valid_time_taken: %.4f')
output_values = (min(best_valid_cost, valid_cost), valid_cost,
valid_r_cost, valid_kl_cost, time_taken_valid)
output_log = output_format % output_values
tf.logging.info(output_log)
summary_writer.add_summary(valid_cost_summ, train_step)
summary_writer.add_summary(valid_reconstr_summ, train_step)
summary_writer.add_summary(valid_kl_summ, train_step)
summary_writer.add_summary(valid_time_summ, train_step)
summary_writer.flush()
if valid_cost < best_valid_cost:
best_valid_cost = valid_cost
save_model(sess, FLAGS.log_root, step)
end = time.time()
time_taken_save = end - start
start = time.time()
tf.logging.info('time_taken_save %4.4f.', time_taken_save)
best_valid_cost_summ = tf.summary.Summary()
best_valid_cost_summ.value.add(
tag='Best_Valid_Cost', simple_value=float(best_valid_cost))
summary_writer.add_summary(best_valid_cost_summ, train_step)
summary_writer.flush()
(eval_cost, eval_r_cost,
eval_kl_cost) = evaluate_model(sess, eval_model, test_set)
end = time.time()
time_taken_eval = end - start
start = time.time()
eval_cost_summ = tf.summary.Summary()
eval_cost_summ.value.add(tag='Eval_Cost',
simple_value=float(eval_cost))
eval_reconstr_summ = tf.summary.Summary()
eval_reconstr_summ.value.add(tag='Eval_Reconstr_Cost',
simple_value=float(eval_r_cost))
eval_kl_summ = tf.summary.Summary()
eval_kl_summ.value.add(tag='Eval_KL_Cost',
simple_value=float(eval_kl_cost))
eval_time_summ = tf.summary.Summary()
eval_time_summ.value.add(tag='Time_Taken_Eval',
simple_value=float(time_taken_eval))
output_format = ('eval_cost: %.4f, eval_recon: %.4f, '
'eval_kl: %.4f, eval_time_taken: %.4f')
output_values = (eval_cost, eval_r_cost, eval_kl_cost,
time_taken_eval)
output_log = output_format % output_values
tf.logging.info(output_log)
summary_writer.add_summary(eval_cost_summ, train_step)
summary_writer.add_summary(eval_reconstr_summ, train_step)
summary_writer.add_summary(eval_kl_summ, train_step)
summary_writer.add_summary(eval_time_summ, train_step)
summary_writer.flush()
def trainer(model_params):
"""Train a sketch-rnn model."""
np.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True)
tf.logging.info('sketch-rnn')
tf.logging.info('Hyperparams:')
tf.logging.info('Loading data files.')
datasets = load_dataset(FLAGS.data_dir, model_params)
train_set = datasets[0]
valid_set = datasets[1]
test_set = datasets[2]
model_params = datasets[3]
eval_model_params = datasets[4]
reset_graph()
model = sketch_rnn_model.Model(model_params)
eval_model = sketch_rnn_model.Model(eval_model_params, reuse=True)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
if FLAGS.resume_training:
load_checkpoint(sess, FLAGS.log_root)
# Write config file to json file.
tf.gfile.MakeDirs(FLAGS.log_root)
with tf.gfile.Open(os.path.join(FLAGS.log_root, 'model_config.json'),
'w') as f:
json.dump(list(model_params.values()), f, indent=True)
train(sess, model, eval_model, train_set, valid_set, test_set)
def main(unused_argv):
"""Load model params, save config file and start trainer."""
model_params = sketch_rnn_model.get_default_hparams()
if FLAGS.hparams:
model_params.parse(FLAGS.hparams)
trainer(model_params)
def console_entry_point():
tf.disable_v2_behavior()
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| [
"model.Model",
"tensorflow.compat.v1.disable_v2_behavior",
"zipfile.ZipFile",
"model.copy_hparams",
"tensorflow.compat.v1.get_default_session",
"tensorflow.compat.v1.summary.Summary",
"tensorflow.compat.v1.global_variables_initializer",
"model.get_default_hparams",
"tensorflow.compat.v1.logging.set_... | [((857, 898), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (881, 898), True, 'import tensorflow.compat.v1 as tf\n'), ((928, 1211), 'tensorflow.compat.v1.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""data_dir"""', '"""https://github.com/hardmaru/sketch-rnn-datasets/raw/master/aaron_sheep"""', '"""The directory in which to find the dataset specified in model hparams. If data_dir starts with "http://" or "https://", the file will be fetched remotely."""'], {}), '(\'data_dir\',\n \'https://github.com/hardmaru/sketch-rnn-datasets/raw/master/aaron_sheep\',\n \'The directory in which to find the dataset specified in model hparams. If data_dir starts with "http://" or "https://", the file will be fetched remotely.\'\n )\n', (954, 1211), True, 'import tensorflow.compat.v1 as tf\n'), ((1226, 1356), 'tensorflow.compat.v1.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""log_root"""', '"""/tmp/sketch_rnn/models/default"""', '"""Directory to store model checkpoints, tensorboard."""'], {}), "('log_root', '/tmp/sketch_rnn/models/default',\n 'Directory to store model checkpoints, tensorboard.')\n", (1252, 1356), True, 'import tensorflow.compat.v1 as tf\n'), ((1362, 1462), 'tensorflow.compat.v1.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""resume_training"""', '(False)', '"""Set to true to load previous checkpoint"""'], {}), "('resume_training', False,\n 'Set to true to load previous checkpoint')\n", (1389, 1462), True, 'import tensorflow.compat.v1 as tf\n'), ((1487, 1691), 'tensorflow.compat.v1.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""hparams"""', '""""""', '"""Pass in comma-separated key=value pairs such as \'save_every=40,decay_rate=0.99\' (no whitespace) to be read into the HParams object defined in model.py"""'], {}), '(\'hparams\', \'\',\n "Pass in comma-separated key=value pairs such as \'save_every=40,decay_rate=0.99\' (no whitespace) to be read into the HParams object defined in model.py"\n )\n', (1513, 1691), True, 'import tensorflow.compat.v1 as tf\n'), ((1921, 1945), 'tensorflow.compat.v1.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (1943, 1945), True, 'import tensorflow.compat.v1 as tf\n'), ((1984, 2008), 'tensorflow.compat.v1.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2006, 2008), True, 'import tensorflow.compat.v1 as tf\n'), ((2139, 2177), 'model.get_default_hparams', 'sketch_rnn_model.get_default_hparams', ([], {}), '()\n', (2175, 2177), True, 'import model as sketch_rnn_model\n'), ((2485, 2523), 'model.get_default_hparams', 'sketch_rnn_model.get_default_hparams', ([], {}), '()\n', (2521, 2523), True, 'import model as sketch_rnn_model\n'), ((2732, 2775), 'model.copy_hparams', 'sketch_rnn_model.copy_hparams', (['model_params'], {}), '(model_params)\n', (2761, 2775), True, 'import model as sketch_rnn_model\n'), ((2977, 3025), 'model.copy_hparams', 'sketch_rnn_model.copy_hparams', (['eval_model_params'], {}), '(eval_model_params)\n', (3006, 3025), True, 'import model as sketch_rnn_model\n'), ((3381, 3415), 'tensorflow.compat.v1.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['models_root_dir'], {}), '(models_root_dir)\n', (3398, 3415), True, 'import tensorflow.compat.v1 as tf\n'), ((3537, 3561), 'os.path.isfile', 'os.path.isfile', (['zip_path'], {}), '(zip_path)\n', (3551, 3561), False, 'import os\n'), ((3865, 3909), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Unzipping %s..."""', 'zip_path'], {}), "('Unzipping %s...', zip_path)\n", (3880, 3909), True, 'import tensorflow.compat.v1 as tf\n'), ((4011, 4049), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Unzipping complete."""'], {}), "('Unzipping complete.')\n", (4026, 4049), True, 'import tensorflow.compat.v1 as tf\n'), ((5599, 5659), 'numpy.concatenate', 'np.concatenate', (['(train_strokes, valid_strokes, test_strokes)'], {}), '((train_strokes, valid_strokes, test_strokes))\n', (5613, 5659), True, 'import numpy as np\n'), ((6030, 6060), 'utils.get_max_len', 'utils.get_max_len', (['all_strokes'], {}), '(all_strokes)\n', (6047, 6060), False, 'import utils\n'), ((6156, 6229), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""model_params.max_seq_len %i."""', 'model_params.max_seq_len'], {}), "('model_params.max_seq_len %i.', model_params.max_seq_len)\n", (6171, 6229), True, 'import tensorflow.compat.v1 as tf\n'), ((6255, 6298), 'model.copy_hparams', 'sketch_rnn_model.copy_hparams', (['model_params'], {}), '(model_params)\n', (6284, 6298), True, 'import model as sketch_rnn_model\n'), ((6609, 6657), 'model.copy_hparams', 'sketch_rnn_model.copy_hparams', (['eval_model_params'], {}), '(eval_model_params)\n', (6638, 6657), True, 'import model as sketch_rnn_model\n'), ((6813, 7028), 'utils.DataLoader', 'utils.DataLoader', (['train_strokes', 'model_params.batch_size'], {'max_seq_length': 'model_params.max_seq_len', 'random_scale_factor': 'model_params.random_scale_factor', 'augment_stroke_prob': 'model_params.augment_stroke_prob'}), '(train_strokes, model_params.batch_size, max_seq_length=\n model_params.max_seq_len, random_scale_factor=model_params.\n random_scale_factor, augment_stroke_prob=model_params.augment_stroke_prob)\n', (6829, 7028), False, 'import utils\n'), ((7206, 7371), 'utils.DataLoader', 'utils.DataLoader', (['valid_strokes', 'eval_model_params.batch_size'], {'max_seq_length': 'eval_model_params.max_seq_len', 'random_scale_factor': '(0.0)', 'augment_stroke_prob': '(0.0)'}), '(valid_strokes, eval_model_params.batch_size,\n max_seq_length=eval_model_params.max_seq_len, random_scale_factor=0.0,\n augment_stroke_prob=0.0)\n', (7222, 7371), False, 'import utils\n'), ((7562, 7727), 'utils.DataLoader', 'utils.DataLoader', (['test_strokes', 'eval_model_params.batch_size'], {'max_seq_length': 'eval_model_params.max_seq_len', 'random_scale_factor': '(0.0)', 'augment_stroke_prob': '(0.0)'}), '(test_strokes, eval_model_params.batch_size, max_seq_length\n =eval_model_params.max_seq_len, random_scale_factor=0.0,\n augment_stroke_prob=0.0)\n', (7578, 7727), False, 'import utils\n'), ((7901, 7977), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""normalizing_scale_factor %4.4f."""', 'normalizing_scale_factor'], {}), "('normalizing_scale_factor %4.4f.', normalizing_scale_factor)\n", (7916, 7977), True, 'import tensorflow.compat.v1 as tf\n'), ((8977, 9023), 'tensorflow.compat.v1.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['checkpoint_path'], {}), '(checkpoint_path)\n', (9006, 9023), True, 'import tensorflow.compat.v1 as tf\n'), ((9028, 9092), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Loading model %s."""', 'ckpt.model_checkpoint_path'], {}), "('Loading model %s.', ckpt.model_checkpoint_path)\n", (9043, 9092), True, 'import tensorflow.compat.v1 as tf\n'), ((9271, 9310), 'os.path.join', 'os.path.join', (['model_save_path', '"""vector"""'], {}), "(model_save_path, 'vector')\n", (9283, 9310), False, 'import os\n'), ((9315, 9367), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""saving model %s."""', 'checkpoint_path'], {}), "('saving model %s.', checkpoint_path)\n", (9330, 9367), True, 'import tensorflow.compat.v1 as tf\n'), ((9372, 9419), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""global_step %i."""', 'global_step'], {}), "('global_step %i.', global_step)\n", (9387, 9419), True, 'import tensorflow.compat.v1 as tf\n'), ((9638, 9675), 'tensorflow.compat.v1.summary.FileWriter', 'tf.summary.FileWriter', (['FLAGS.log_root'], {}), '(FLAGS.log_root)\n', (9659, 9675), True, 'import tensorflow.compat.v1 as tf\n'), ((9724, 9748), 'tensorflow.compat.v1.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (9746, 9748), True, 'import tensorflow.compat.v1 as tf\n'), ((9965, 10027), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Total trainable variables %i."""', 'count_t_vars'], {}), "('Total trainable variables %i.', count_t_vars)\n", (9980, 10027), True, 'import tensorflow.compat.v1 as tf\n'), ((10045, 10065), 'tensorflow.compat.v1.summary.Summary', 'tf.summary.Summary', ([], {}), '()\n', (10063, 10065), True, 'import tensorflow.compat.v1 as tf\n'), ((10412, 10423), 'time.time', 'time.time', ([], {}), '()\n', (10421, 10423), False, 'import time\n'), ((17326, 17401), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(8)', 'edgeitems': '(6)', 'linewidth': '(200)', 'suppress': '(True)'}), '(precision=8, edgeitems=6, linewidth=200, suppress=True)\n', (17345, 17401), True, 'import numpy as np\n'), ((17407, 17436), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""sketch-rnn"""'], {}), "('sketch-rnn')\n", (17422, 17436), True, 'import tensorflow.compat.v1 as tf\n'), ((17441, 17472), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Hyperparams:"""'], {}), "('Hyperparams:')\n", (17456, 17472), True, 'import tensorflow.compat.v1 as tf\n'), ((17477, 17515), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Loading data files."""'], {}), "('Loading data files.')\n", (17492, 17515), True, 'import tensorflow.compat.v1 as tf\n'), ((17756, 17792), 'model.Model', 'sketch_rnn_model.Model', (['model_params'], {}), '(model_params)\n', (17778, 17792), True, 'import model as sketch_rnn_model\n'), ((17810, 17863), 'model.Model', 'sketch_rnn_model.Model', (['eval_model_params'], {'reuse': '(True)'}), '(eval_model_params, reuse=True)\n', (17832, 17863), True, 'import model as sketch_rnn_model\n'), ((17876, 17899), 'tensorflow.compat.v1.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (17897, 17899), True, 'import tensorflow.compat.v1 as tf\n'), ((18068, 18101), 'tensorflow.compat.v1.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.log_root'], {}), '(FLAGS.log_root)\n', (18085, 18101), True, 'import tensorflow.compat.v1 as tf\n'), ((18450, 18488), 'model.get_default_hparams', 'sketch_rnn_model.get_default_hparams', ([], {}), '()\n', (18486, 18488), True, 'import model as sketch_rnn_model\n'), ((18612, 18636), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (18634, 18636), True, 'import tensorflow.compat.v1 as tf\n'), ((18641, 18657), 'tensorflow.compat.v1.app.run', 'tf.app.run', (['main'], {}), '(main)\n', (18651, 18657), True, 'import tensorflow.compat.v1 as tf\n'), ((3489, 3528), 'os.path.basename', 'os.path.basename', (['pretrained_models_url'], {}), '(pretrained_models_url)\n', (3505, 3528), False, 'import os\n'), ((3571, 3636), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""%s already exists, using cached copy"""', 'zip_path'], {}), "('%s already exists, using cached copy', zip_path)\n", (3586, 3636), True, 'import tensorflow.compat.v1 as tf\n'), ((3655, 3741), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Downloading pretrained models from %s..."""', 'pretrained_models_url'], {}), "('Downloading pretrained models from %s...',\n pretrained_models_url)\n", (3670, 3741), True, 'import tensorflow.compat.v1 as tf\n'), ((3770, 3814), 'six.moves.urllib.request.urlretrieve', 'urlretrieve', (['pretrained_models_url', 'zip_path'], {}), '(pretrained_models_url, zip_path)\n', (3781, 3814), False, 'from six.moves.urllib.request import urlretrieve\n'), ((3823, 3860), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Download complete."""'], {}), "('Download complete.')\n", (3838, 3860), True, 'import tensorflow.compat.v1 as tf\n'), ((3919, 3944), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_path'], {}), '(zip_path)\n', (3934, 3944), False, 'import zipfile\n'), ((8943, 8964), 'tensorflow.compat.v1.global_variables', 'tf.global_variables', ([], {}), '()\n', (8962, 8964), True, 'import tensorflow.compat.v1 as tf\n'), ((9226, 9247), 'tensorflow.compat.v1.global_variables', 'tf.global_variables', ([], {}), '()\n', (9245, 9247), True, 'import tensorflow.compat.v1 as tf\n'), ((17913, 17946), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (17944, 17946), True, 'import tensorflow.compat.v1 as tf\n'), ((2201, 2245), 'os.path.join', 'os.path.join', (['model_dir', '"""model_config.json"""'], {}), "(model_dir, 'model_config.json')\n", (2213, 2245), False, 'import os\n'), ((2547, 2591), 'os.path.join', 'os.path.join', (['model_dir', '"""model_config.json"""'], {}), "(model_dir, 'model_config.json')\n", (2559, 2591), False, 'import os\n'), ((4704, 4752), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Downloading %s"""', 'data_filepath'], {}), "('Downloading %s', data_filepath)\n", (4719, 4752), True, 'import tensorflow.compat.v1 as tf\n'), ((4776, 4803), 'requests.get', 'requests.get', (['data_filepath'], {}), '(data_filepath)\n', (4788, 4803), False, 'import requests\n'), ((4923, 4954), 'os.path.join', 'os.path.join', (['data_dir', 'dataset'], {}), '(data_dir, dataset)\n', (4935, 4954), False, 'import os\n'), ((4974, 5034), 'numpy.load', 'np.load', (['data_filepath'], {'encoding': '"""latin1"""', 'allow_pickle': '(True)'}), "(data_filepath, encoding='latin1', allow_pickle=True)\n", (4981, 5034), True, 'import numpy as np\n'), ((5386, 5432), 'numpy.concatenate', 'np.concatenate', (["(train_strokes, data['train'])"], {}), "((train_strokes, data['train']))\n", (5400, 5432), True, 'import numpy as np\n'), ((5461, 5507), 'numpy.concatenate', 'np.concatenate', (["(valid_strokes, data['valid'])"], {}), "((valid_strokes, data['valid']))\n", (5475, 5507), True, 'import numpy as np\n'), ((5535, 5579), 'numpy.concatenate', 'np.concatenate', (["(test_strokes, data['test'])"], {}), "((test_strokes, data['test']))\n", (5549, 5579), True, 'import numpy as np\n'), ((11314, 11325), 'time.time', 'time.time', ([], {}), '()\n', (11323, 11325), False, 'import time\n'), ((11388, 11408), 'tensorflow.compat.v1.summary.Summary', 'tf.summary.Summary', ([], {}), '()\n', (11406, 11408), True, 'import tensorflow.compat.v1 as tf\n'), ((11551, 11571), 'tensorflow.compat.v1.summary.Summary', 'tf.summary.Summary', ([], {}), '()\n', (11569, 11571), True, 'import tensorflow.compat.v1 as tf\n'), ((11721, 11741), 'tensorflow.compat.v1.summary.Summary', 'tf.summary.Summary', ([], {}), '()\n', (11739, 11741), True, 'import tensorflow.compat.v1 as tf\n'), ((11844, 11864), 'tensorflow.compat.v1.summary.Summary', 'tf.summary.Summary', ([], {}), '()\n', (11862, 11864), True, 'import tensorflow.compat.v1 as tf\n'), ((12015, 12035), 'tensorflow.compat.v1.summary.Summary', 'tf.summary.Summary', ([], {}), '()\n', (12033, 12035), True, 'import tensorflow.compat.v1 as tf\n'), ((12187, 12207), 'tensorflow.compat.v1.summary.Summary', 'tf.summary.Summary', ([], {}), '()\n', (12205, 12207), True, 'import tensorflow.compat.v1 as tf\n'), ((12692, 12719), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['output_log'], {}), '(output_log)\n', (12707, 12719), True, 'import tensorflow.compat.v1 as tf\n'), ((13153, 13164), 'time.time', 'time.time', ([], {}), '()\n', (13162, 13164), False, 'import time\n'), ((13351, 13362), 'time.time', 'time.time', ([], {}), '()\n', (13360, 13362), False, 'import time\n'), ((13426, 13437), 'time.time', 'time.time', ([], {}), '()\n', (13435, 13437), False, 'import time\n'), ((13469, 13489), 'tensorflow.compat.v1.summary.Summary', 'tf.summary.Summary', ([], {}), '()\n', (13487, 13489), True, 'import tensorflow.compat.v1 as tf\n'), ((13650, 13670), 'tensorflow.compat.v1.summary.Summary', 'tf.summary.Summary', ([], {}), '()\n', (13668, 13670), True, 'import tensorflow.compat.v1 as tf\n'), ((13844, 13864), 'tensorflow.compat.v1.summary.Summary', 'tf.summary.Summary', ([], {}), '()\n', (13862, 13864), True, 'import tensorflow.compat.v1 as tf\n'), ((14023, 14043), 'tensorflow.compat.v1.summary.Summary', 'tf.summary.Summary', ([], {}), '()\n', (14041, 14043), True, 'import tensorflow.compat.v1 as tf\n'), ((14570, 14597), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['output_log'], {}), '(output_log)\n', (14585, 14597), True, 'import tensorflow.compat.v1 as tf\n'), ((18125, 18174), 'os.path.join', 'os.path.join', (['FLAGS.log_root', '"""model_config.json"""'], {}), "(FLAGS.log_root, 'model_config.json')\n", (18137, 18174), False, 'import os\n'), ((4831, 4860), 'six.BytesIO', 'six.BytesIO', (['response.content'], {}), '(response.content)\n', (4842, 4860), False, 'import six\n'), ((15078, 15089), 'time.time', 'time.time', ([], {}), '()\n', (15087, 15089), False, 'import time\n'), ((15160, 15171), 'time.time', 'time.time', ([], {}), '()\n', (15169, 15171), False, 'import time\n'), ((15189, 15247), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""time_taken_save %4.4f."""', 'time_taken_save'], {}), "('time_taken_save %4.4f.', time_taken_save)\n", (15204, 15247), True, 'import tensorflow.compat.v1 as tf\n'), ((15288, 15308), 'tensorflow.compat.v1.summary.Summary', 'tf.summary.Summary', ([], {}), '()\n', (15306, 15308), True, 'import tensorflow.compat.v1 as tf\n'), ((15695, 15706), 'time.time', 'time.time', ([], {}), '()\n', (15704, 15706), False, 'import time\n'), ((15777, 15788), 'time.time', 'time.time', ([], {}), '()\n', (15786, 15788), False, 'import time\n'), ((15823, 15843), 'tensorflow.compat.v1.summary.Summary', 'tf.summary.Summary', ([], {}), '()\n', (15841, 15843), True, 'import tensorflow.compat.v1 as tf\n'), ((16011, 16031), 'tensorflow.compat.v1.summary.Summary', 'tf.summary.Summary', ([], {}), '()\n', (16029, 16031), True, 'import tensorflow.compat.v1 as tf\n'), ((16212, 16232), 'tensorflow.compat.v1.summary.Summary', 'tf.summary.Summary', ([], {}), '()\n', (16230, 16232), True, 'import tensorflow.compat.v1 as tf\n'), ((16398, 16418), 'tensorflow.compat.v1.summary.Summary', 'tf.summary.Summary', ([], {}), '()\n', (16416, 16418), True, 'import tensorflow.compat.v1 as tf\n'), ((16903, 16930), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['output_log'], {}), '(output_log)\n', (16918, 16930), True, 'import tensorflow.compat.v1 as tf\n')] |
"""
Aggregate tools
===============
"""
import sys
import numpy
from .._lib.hashmap import factorize
from ..compat import tqdm
from ..ds.scaling import linearscaling
from .arrays import first, lexsort_uint32_pair, to_structured
def igroupby(ids, values, n=None, logging_prefix=None, assume_sorted=False,
find_next_hint=512):
"""
Efficiently converts two arrays representing a relation
(the ``ids`` and the associated ``values``) to an iterable ``(id, values_associated)``.
The ``values`` are grouped by ``ids`` and a sequence of tuples is generated.
The ``i`` th tuple generated is ``(id_i, values[ids == id_i])``,
``id_i`` being the ``i`` th element of the ``ids`` array, once sorted in ascending order.
:param array ids: ``(>=n,) dtype array``
:param array values: ``(>=n, *shape) uint32 array``
:param int? n: length of array to consider,
applying igroupby to ``(ids[:n], values[:n])``. Uses full array when not set.
:param string? logging_prefix: prefix to include while logging progress.
``(default:`` Does not log``)``.
:param bool? assume_sorted: whether ids is sorted. ``(default: False)``
:param int? find_next_hint: hint for find_next_lookup. ``(default: 512)``
:generates: tuple(id:int, values_associated:``(m, *shape) array slice``)
Example
_______
>>> ids = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3])
>>> values = numpy.array([0, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 6])
>>> gen = igroupby(ids, values)
>>> next(gen)
(0, array([0, 1, 2, 3, 4]))
>>> next(gen)
(1, array([0, 2, 4, 6]))
>>> next(gen)
(3, array([0, 4, 6]))
Example with strings as ids:
>>> ids = numpy.array(["alpha", "alpha", "beta", "omega", "alpha", "gamma", "beta"])
>>> values = numpy.array([1, 2, 10, 100, 3, 1000, 20])
>>> gen = igroupby(ids, values)
>>> next(gen)
('alpha', array([1, 2, 3]))
>>> next(gen)
('beta', array([10, 20]))
>>> next(gen)
('gamma', array([1000]))
>>> next(gen)
('omega', array([100]))
"""
# convert to numpy arrays
ids = numpy.asarray(ids)
values = numpy.asarray(values)
# check input shape
assert len(ids.shape) == 1
if n is None:
n = ids.shape[0]
assert ids.shape[0] >= n and values.shape[0] >= n, values.shape
# sort if needed
if not assume_sorted:
ids = ids[:n]
values = values[:n]
asort = numpy.argsort(ids)
ids = ids[asort]
values = values[asort]
# init
start_block = 0
find_next_lookup = find_next_hint
# search next change block by block
disable = logging_prefix is None
with tqdm(total=n, desc=logging_prefix, disable=disable, file=sys.stdout) as pbar:
while start_block < n:
# find all items having id by block boundaries
current_id = ids[start_block]
try:
end_block = first(ids,
lambda x: x != current_id,
offset=start_block,
batch_size=find_next_lookup)
find_next_lookup = max(
find_next_hint, 2 * (end_block - start_block))
except StopIteration:
end_block = n
current_id_values = values[start_block:end_block]
assert (ids[start_block:end_block] == current_id).all()
pbar.update(end_block - start_block)
start_block = end_block
yield current_id, current_id_values
def ufunc_group_by_idx(idx, values, ufunc, init, minlength=None):
"""
Abstract wrapper to compute ufunc grouped by values in array ``idx``.
Return an array containing the results of ``ufunc`` applied to ``values``
grouped by the indexes in array ``idx``.
(See available ufuncs `here <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_).
Warning: the ``init`` parameter is not a filling value for missing indexes.
If index ``i`` is missing, then ``out[i] = init``
but this value also serves as the initialization of ``ufunc`` on all the groups of ``values``.
For example, if ``ufunc`` is ``numpy.add`` and ``init = -1`` then for each index,
the sum of the corresponding values will be decreased by one.
:param array idx: ``(n,) int array``
:param array values: ``(n,) dtype array``
:param numpy.ufunc ufunc: universal function applied to the groups of ``values``
:param dtype init: initialization value
:param int? minlength: ``(default: idx.max() + 1)``
:returns: (min-length,) dtype array, such that ``out[i] = ufunc(values[idx==i])``
Example
_______
>>> idx = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3])
>>> values = numpy.array([0, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 6])
>>> ufunc_group_by_idx(idx, values, numpy.maximum, -1)
array([ 4, 6, -1, 6])
>>> ufunc_group_by_idx(idx, values, numpy.add, -1)
array([ 9, 11, -1, 9])
>>> ufunc_group_by_idx(idx, values, numpy.add, 0)
array([ 10, 12, -0, 10])
"""
length = max(idx.max() + 1, minlength or 0)
out = numpy.full(length, init)
ufunc.at(out, idx, values)
return out
def min_by_idx(idx, values, minlength=None, fill=None):
"""
Given array of indexes ``idx`` and array ``values``,
outputs the max value by idx, aligned on ``arange(idx.max() + 1)``.
See also ``argmin_by_idx`` and ``value_at_argmin_by_idx``.
:param array idx: (n,) int array
:param array values: (n,) float array
:param int? minlength: (default: idx.max() + 1)
:param float? fill: filling value for missing idx (default: +inf)
:returns: (min-length,) float array, such that out[i] = min(values[idx==i])
Example
_______
>>> idx = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3])
>>> values = numpy.array([1, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 6])
>>> min_by_idx(idx, values, fill=100)
array([ 1, 0, 100, 0])
>>> min_by_idx(idx, values)
array([1, 0, 9223372036854775807, 0])
"""
assert idx.dtype.kind == 'u' or (idx.dtype.kind == 'i' and (idx >= 0).all()), (
'Can only use get_xx_by_idx with integer idx, where (idx >= 0).all()')
if fill is None:
fill = numpy.inf if values.dtype.kind == 'f' else numpy.iinfo(values.dtype).max
else:
assert fill >= values.max()
return ufunc_group_by_idx(idx, values, numpy.minimum, fill, minlength=minlength)
def max_by_idx(idx, values, minlength=None, fill=None):
"""
Given array of indexes ``idx`` and array ``values``,
outputs the max value by idx, aligned on ``arange(idx.max() + 1)``.
See also ``argmax_by_idx`` and ``value_at_argmax_by_idx``.
:param array idx: (n,) int array
:param array values: (n,) float array
:param int? minlength: (default: idx.max() + 1)
:param float? fill: filling value for missing idx (default: -inf)
:returns: (min-length,) float array, such that out[i] = max(values[idx==i])
Example
_______
>>> idx = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3])
>>> values = numpy.array([0, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 6])
>>> max_by_idx(idx, values, fill=-1)
array([ 4, 6, -1, 6])
>>> max_by_idx(idx, values, minlength=10, fill=-1)
array([ 4, 6, -1, 6, -1, -1, -1, -1, -1, -1])
>>> max_by_idx(idx, values)
array([ 4, 6, -9223372036854775808, 6])
"""
assert idx.dtype.kind == 'u' or (idx.dtype.kind == 'i' and (idx >= 0).all()), (
'Can only use get_xx_by_idx with integer idx, where all idx >= 0')
if fill is None:
fill = - numpy.inf if values.dtype.kind == 'f' else numpy.iinfo(values.dtype).min
else:
assert fill <= values.min()
return ufunc_group_by_idx(idx, values, numpy.maximum, fill, minlength=minlength)
def argmin_by_idx(idx, values, minlength=None, fill=None):
"""
Given array of indexes ``idx`` and array ``values``,
outputs the argmin of the values by idx,
aligned on ``arange(idx.max() + 1)``.
See also ``min_by_idx`` and ``value_at_argmin_by_idx``.
:param array idx: (n,) int array
:param array values: (n,) float array
:param int? minlength: (default: idx.max() + 1)
:param float? fill: filling value for missing idx (default: -1)
:returns: (min-length,) int32 array, such that
out[i] = argmin_{idx}(values[idx] : idx[idx] == i)
Example
_______
>>> idx = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3])
>>> values = numpy.array([0, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 6])
>>> argmin_by_idx(idx, values, fill=-1)
array([ 0, 5, -1, 9])
>>> argmin_by_idx(idx, values, minlength=10, fill=-1)
array([ 0, 5, -1, 9, -1, -1, -1, -1, -1, -1])
"""
assert idx.dtype.kind == 'u' or (idx.dtype.kind == 'i' and (idx >= 0).all()), (
'Can only use get_xx_by_idx with integer idx, where all idx >= 0')
if fill is None:
fill = -1
min_values_by_idx = min_by_idx(idx, values, minlength) # (n-idx,)
is_min = values == min_values_by_idx[idx]
out = numpy.full(min_values_by_idx.size, fill)
out[idx[is_min]] = numpy.where(is_min)[0]
return out
# TODO: improve test
def value_at_argmin_by_idx(idx, sorting_values, fill, output_values=None, minlength=None):
"""
Wrapper around argmin_by_idx and get_value_by_idx.
Allows to use a different value for the output and for detecting the minimum
Allows to set a specific fill value that is not compared with the sorting_values
:param array idx: (n,) uint array with values < max_idx
:param array values: (n,) array
:param fill: filling value for output[i] if there is no idx == i
:param array? output_values: (n,) dtype array
Useful if you want to select the min based on one array,
and get the value on another array
:param int? minlength: minimum shape for the output array.
:returns array: (max_idx+1,), dtype array such that
out[i] = min(values[idx==i])
Example
_______
>>> idx = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3])
>>> values = numpy.array([0, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 6])
>>> value_at_argmin_by_idx(idx, values, fill=-1)
array([ 0, 0, -1, 0])
>>> value_at_argmin_by_idx(idx, values, minlength=10, fill=-1)
array([ 0, 0, -1, 0, -1, -1, -1, -1, -1, -1])
"""
assert idx.dtype.kind == 'u' or (idx.dtype.kind == 'i' and (idx >= 0).all()), (
'Can only use get_xx_by_idx with integer idx, where all idx >= 0')
length = max(idx.max() + 1, minlength or 0)
if output_values is None:
output_values = sorting_values
out = numpy.full(length, fill, dtype=output_values.dtype)
argmin = argmin_by_idx(idx,
sorting_values,
minlength=minlength)
mask = (argmin != -1)
out[:mask.size][mask] = output_values[argmin[mask]]
return out
def argmax_by_idx(idx, values, minlength=None, fill=None):
"""
Given array of indexes ``idx`` and array ``values``,
outputs the argmax of the values by idx,
aligned on ``arange(idx.max() + 1)``.
See also ``max_by_idx`` and ``value_at_argmax_by_idx``.
:param array idx: (n,) int array
:param array values: (n,) float array
:param int? minlength: (default: idx.max() + 1)
:param float? fill: filling value for missing idx (default: -1)
:returns: (min-length,) int32 array, such that
out[i] = argmax_{idx}(values[idx] : idx[idx] == i)
Example
_______
>>> idx = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3])
>>> values = numpy.array([0, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 6])
>>> argmax_by_idx(idx, values, fill=-1)
array([ 4, 8, -1, 11])
>>> argmax_by_idx(idx, values, minlength=10, fill=-1)
array([ 4, 8, -1, 11, -1, -1, -1, -1, -1, -1])
"""
assert idx.dtype.kind == 'u' or (idx.dtype.kind == 'i' and (idx >= 0).all()), (
'Can only use get_xx_by_idx with integer idx, where all idx >= 0')
if fill is None:
fill = -1
max_values_by_idx = max_by_idx(idx, values, minlength) # (n-idx,)
is_max = values == max_values_by_idx[idx]
out = numpy.full(max_values_by_idx.size, fill)
out[idx[is_max]] = numpy.where(is_max)[0]
return out
# TODO: improve test
def value_at_argmax_by_idx(idx, sorting_values, fill, output_values=None, minlength=None):
"""
Wrapper around ``argmax_by_idx`` and ``get_value_by_id``.
Allows to use a different value for the output and for detecting the minimum
Allows to set a specific fill value that is not compared with the sorting_values
:param array idx: (n,) uint array with values < max_idx
:param array values: (n,) array
:param fill: filling value for output[i] if there is no idx == i
:param array? output_values: (n,) dtype array
Useful if you want to select the min based on one array,
and get the value on another array
:param int? minlength: minimum shape for the output array.
:returns array: (max_idx+1,), dtype array such that
out[i] = max(values[idx==i])
Example
_______
>>> idx = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3])
>>> values = numpy.array([0, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 6])
>>> value_at_argmax_by_idx(idx, values, fill=-1)
array([ 4, 6, -1, 6])
>>> value_at_argmax_by_idx(idx, values, minlength=10, fill=-1)
array([ 4, 6, -1, 6, -1, -1, -1, -1, -1, -1])
"""
assert idx.dtype.kind == 'u' or (idx.dtype.kind == 'i' and (idx >= 0).all()), (
'Can only use get_xx_by_idx with integer idx, where all idx >= 0')
length = max(idx.max() + 1, minlength or 0)
if output_values is None:
output_values = sorting_values
out = numpy.full(length, fill, dtype=output_values.dtype)
argmax = argmax_by_idx(idx,
sorting_values,
minlength=minlength)
mask = (argmax != -1)
out[:mask.size][mask] = output_values[argmax[mask]]
return out
def connect_adjacents_in_groups(group_ids, values, max_gap):
"""
For each group_id in ``group_ids``, connect values that are closer than ``max_gap`` together.
Return an array mapping the values to the indexes of
the newly formed connected components they belong to.
Two values that don't have the same input group_id can's be connected in the same
connected component.
``connect_adjacents_in_groups`` is faster when an array of indexes is provided as ``group_ids``,
but also accepts other types of ids.
:param array group_ids: ``(n,) dtype array``
:param array values: ``(n,) float array``
:param float max_gap: maximum distance between a value and the nearest value in the same group.
:returns: ``(n,) uint array``,
such that ``out[s[i]]==out[s[i+1]]`` :math:`\iff`
``group_ids[s[i]]==group_ids[s[i+1]]`` & ``|values[s[i]]-values[s[i+1]]| <= max_gap``
where ``s[i]`` is the ``i`` -th index when sorting by id and value
Example
_______
>>> group_ids = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3, 3])
>>> values = numpy.array([ 0, 35, 20, 25, 30, 0, 5, 10, 20, 0, 5, 10, 15])
>>> connect_adjacents_in_groups(group_ids, values, max_gap = 5)
array([0, 1, 1, 1, 1, 2, 2, 2, 3, 4, 4, 4, 4], dtype=uint32)
Example with string ``group_ids``:
>>> group_ids = numpy.array(['alpha', 'alpha', 'alpha', 'alpha', 'alpha', 'beta', 'beta', 'beta', 'beta', 'gamma', 'gamma', 'gamma', 'gamma'])
>>> values = numpy.array([ 0, 35, 20, 25, 30, 0, 5, 10, 20, 0, 5, 10, 15])
>>> connected_components_ids = connect_adjacents_in_groups(group_ids, values, max_gap = 5)
The function does not require the ``group_ids`` or the ``values`` to be sorted:
>>> shuffler = numpy.random.permutation(len(group_ids))
>>> group_ids_shuffled = group_ids[shuffler]
>>> values_shuffled = values[shuffler]
>>> connect_adjacents_in_groups(group_ids_shuffled, values_shuffled, max_gap = 5)
array([2, 1, 0, 2, 4, 1, 1, 4, 1, 4, 3, 2, 4], dtype=uint32)
>>> connected_components_ids[shuffler]
array([2, 1, 0, 2, 4, 1, 1, 4, 1, 4, 3, 2, 4], dtype=uint32)
"""
as_idx = False
if group_ids.dtype.kind in 'ui':
if min(group_ids) >= 0 and max(group_ids) < (1 << 6) * len(group_ids):
as_idx = True
# FIXME: add old max and old min for it to work with pandas DataFrames
if as_idx:
values_for_uint32 = linearscaling(
values, 1, (1 << 32) - float(1 << 8) - 1)
args = lexsort_uint32_pair(group_ids, values_for_uint32)
else:
args = numpy.lexsort((values, group_ids))
group_ids = group_ids[args] # e.g. 1 1 1 1 1 1 1 1 1 2 2 2 2
values = values[args] # e.g. 1 1 1 2 2 3 3 9 9 1 2 2 9
# to_split e.g. 0 0 0 0 0 0 1 0 1 0 0 1
to_split = ((group_ids[1:] != group_ids[:-1])
| ((values[1:] - values[:-1]) > max_gap))
# group_idx e.g. 0 0 0 0 0 0 0 1 1 2 2 2 3
group_idx = numpy.empty(group_ids.size, dtype='uint32')
group_idx[0] = 0
numpy.cumsum(to_split, out=group_idx[1:])
# reverse argsort
aligned_group_idx = numpy.empty_like(group_idx)
aligned_group_idx[args] = group_idx
return aligned_group_idx
# TODO: improve test
def get_value_by_idx(idx, values, default, check_unique=True, minlength=None):
"""
Given array of indexes ``idx`` and array ``values`` (unordered, not necesarilly full),
output array such that ``out[i] = values[idx==i]``.
If all indexes in ``idx`` are unique, it is equivalent to sorting the ``values``
by their ``idx`` and filling with ``default`` for missing ``idx``.
If ``idx`` elements are not unique and you still want to proceed,
you can set ``check_unique`` to ``False``. The output values for the non-unique indexes
will be chosen arbitrarily among the multiple values corresponding.
:param array idx: ``(n,) uint array`` with values < max_idx
:param array values: ``(n,) dtype array``
:param dtype default: filling value for ``output[i]`` if there is no ``idx == i``
:param bool check_unique: if ``True``, will check that ``idx`` are unique
If ``False``, if the ``idx`` are not unique, then an arbitrary value
will be chosen.
:param int? minlength: minimum shape for the output array (``default: idx.max() + 1``).
:returns array: (max_idx+1,), dtype array such that
``out[i] = values[idx==i]``.
Example
_______
>>> idx = numpy.array([8,2,4,7])
>>> values = numpy.array([100, 200, 300, 400])
>>> get_value_by_idx(idx, values, -1, check_unique=False, minlength=None)
array([ -1, -1, 200, -1, 300, -1, -1, 400, 100])
Example with non-unique elements in ``idx``:
>>> idx = numpy.array([2,2,4,7])
>>> values = numpy.array([100, 200, 300, 400])
>>> get_value_by_idx(idx, values, -1, check_unique=False, minlength=None)
array([ -1, -1, 200, -1, 300, -1, -1, 400])
"""
assert idx.dtype.kind == 'u' or (idx.dtype.kind == 'i' and (idx >= 0).all()), (
'Can only use get_xx_by_idx with integer indexes in `idx`, where (idx >= 0).all()')
if check_unique:
assert numpy.unique(idx).shape == idx.shape, "indexes in `idx` should be unique"
length = max(idx.max() + 1, minlength or 0)
out = numpy.full(length, default, dtype=values.dtype)
out[idx] = values
return out
# TODO: improve test and add example in doc
def get_most_common_by_idx(idx, values, fill, minlength=None):
"""
Given array of indexes ``idx`` and array ``values``,
outputs the most common value by idx.
:param array idx: (n,) uint array with values < max_idx
:param array values: (n,) non-float, dtype array
:param fill: filling value for output[i] if there is no idx == i
:param minlength: minimum shape for the output array.
:returns: (max_idx+1,), dtype array such that
out[i] = the most common value such that (values[idx==i])
"""
assert idx.dtype.kind == 'u' or (idx.dtype.kind == 'i' and (idx >= 0).all()), (
'Can only use get_xx_by_idx with integer idx, where all idx >= 0')
assert values.dtype.kind != 'f', ('values dtype is float - Please convert to other dtype'
'or digitize values before using get_most_common_by_idx')
length = max(idx.max() + 1, minlength or 0)
sessions_uv = to_structured([
('idx', idx),
('value', values),
])
sessions_uv_idx, uv_idx2id, _ = factorize(sessions_uv)
uv_idx2id = numpy.asarray(
uv_idx2id, [('idx', 'uint32'), ('value', values.dtype)]) # cast to struct
count_by_uv_idx = numpy.bincount(sessions_uv_idx)
top_uv_by_id = argmax_by_idx(
uv_idx2id['idx'], count_by_uv_idx, minlength=length)
out = uv_idx2id[top_uv_by_id]['value']
out[top_uv_by_id == -1] = fill
return out
def average_by_idx(idx, values, weights=None, minlength=None, fill=0, dtype='float64'):
"""
Compute average-by-idx given array of indexes ``idx``, ``values``, and optional ``weights``
:param array idx: (n,) int array
:param array values: (n,) float array
:param array? weights: (n,) float array
:param int? minlength: (default: idx.max() + 1)
:param float? fill: filling value for missing idx (default: 0)
:param str? dtype: (default: 'float32')
:returns: (min-length,) float array, such that out[i] = mean(values[idx==i])
Example
_______
>>> idx = numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3])
>>> values = numpy.array([0, 1, 2, 3, 4, 0, 2, 4, 6, 0, 4, 6])
>>> average_by_idx(idx, values, fill=0)
array([ 2. , 3. , 0. , 3.33333333])
>>> weights = numpy.array([0, 1, 0, 0, 0, 1, 2, 3, 4, 1, 1, 0])
>>> average_by_idx(idx, values, weights=weights, fill=0)
array([ 1., 4., 0., 2.])
"""
assert idx.dtype.kind == 'u' or (idx.dtype.kind == 'i' and (idx >= 0).all()), (
'Can only use get_xx_by_idx with integer idx, where (idx >= 0).all()')
# FIXME: define dtype whitelist instead
assert values.dtype.kind not in 'USOb', ('values dtype not supported')
norm_by_idx = numpy.bincount(
idx, weights, minlength=minlength).astype(dtype)
if weights is not None:
values = values * weights
sum_by_idx = numpy.bincount(idx, values, minlength=minlength).astype(dtype)
with numpy.warnings.catch_warnings():
numpy.warnings.filterwarnings('ignore', r'.*divide.*')
return numpy.where(norm_by_idx > 0, sum_by_idx / norm_by_idx, fill)
| [
"numpy.unique",
"numpy.where",
"numpy.asarray",
"numpy.warnings.filterwarnings",
"numpy.iinfo",
"numpy.argsort",
"numpy.lexsort",
"numpy.warnings.catch_warnings",
"numpy.empty_like",
"numpy.empty",
"numpy.cumsum",
"numpy.full",
"numpy.bincount"
] | [((2130, 2148), 'numpy.asarray', 'numpy.asarray', (['ids'], {}), '(ids)\n', (2143, 2148), False, 'import numpy\n'), ((2162, 2183), 'numpy.asarray', 'numpy.asarray', (['values'], {}), '(values)\n', (2175, 2183), False, 'import numpy\n'), ((5163, 5187), 'numpy.full', 'numpy.full', (['length', 'init'], {}), '(length, init)\n', (5173, 5187), False, 'import numpy\n'), ((9115, 9155), 'numpy.full', 'numpy.full', (['min_values_by_idx.size', 'fill'], {}), '(min_values_by_idx.size, fill)\n', (9125, 9155), False, 'import numpy\n'), ((10693, 10744), 'numpy.full', 'numpy.full', (['length', 'fill'], {'dtype': 'output_values.dtype'}), '(length, fill, dtype=output_values.dtype)\n', (10703, 10744), False, 'import numpy\n'), ((12228, 12268), 'numpy.full', 'numpy.full', (['max_values_by_idx.size', 'fill'], {}), '(max_values_by_idx.size, fill)\n', (12238, 12268), False, 'import numpy\n'), ((13813, 13864), 'numpy.full', 'numpy.full', (['length', 'fill'], {'dtype': 'output_values.dtype'}), '(length, fill, dtype=output_values.dtype)\n', (13823, 13864), False, 'import numpy\n'), ((17131, 17174), 'numpy.empty', 'numpy.empty', (['group_ids.size'], {'dtype': '"""uint32"""'}), "(group_ids.size, dtype='uint32')\n", (17142, 17174), False, 'import numpy\n'), ((17200, 17241), 'numpy.cumsum', 'numpy.cumsum', (['to_split'], {'out': 'group_idx[1:]'}), '(to_split, out=group_idx[1:])\n', (17212, 17241), False, 'import numpy\n'), ((17288, 17315), 'numpy.empty_like', 'numpy.empty_like', (['group_idx'], {}), '(group_idx)\n', (17304, 17315), False, 'import numpy\n'), ((19465, 19512), 'numpy.full', 'numpy.full', (['length', 'default'], {'dtype': 'values.dtype'}), '(length, default, dtype=values.dtype)\n', (19475, 19512), False, 'import numpy\n'), ((20694, 20764), 'numpy.asarray', 'numpy.asarray', (['uv_idx2id', "[('idx', 'uint32'), ('value', values.dtype)]"], {}), "(uv_idx2id, [('idx', 'uint32'), ('value', values.dtype)])\n", (20707, 20764), False, 'import numpy\n'), ((20814, 20845), 'numpy.bincount', 'numpy.bincount', (['sessions_uv_idx'], {}), '(sessions_uv_idx)\n', (20828, 20845), False, 'import numpy\n'), ((2463, 2481), 'numpy.argsort', 'numpy.argsort', (['ids'], {}), '(ids)\n', (2476, 2481), False, 'import numpy\n'), ((9179, 9198), 'numpy.where', 'numpy.where', (['is_min'], {}), '(is_min)\n', (9190, 9198), False, 'import numpy\n'), ((12292, 12311), 'numpy.where', 'numpy.where', (['is_max'], {}), '(is_max)\n', (12303, 12311), False, 'import numpy\n'), ((16706, 16740), 'numpy.lexsort', 'numpy.lexsort', (['(values, group_ids)'], {}), '((values, group_ids))\n', (16719, 16740), False, 'import numpy\n'), ((22554, 22585), 'numpy.warnings.catch_warnings', 'numpy.warnings.catch_warnings', ([], {}), '()\n', (22583, 22585), False, 'import numpy\n'), ((22595, 22648), 'numpy.warnings.filterwarnings', 'numpy.warnings.filterwarnings', (['"""ignore"""', '""".*divide.*"""'], {}), "('ignore', '.*divide.*')\n", (22624, 22648), False, 'import numpy\n'), ((22665, 22725), 'numpy.where', 'numpy.where', (['(norm_by_idx > 0)', '(sum_by_idx / norm_by_idx)', 'fill'], {}), '(norm_by_idx > 0, sum_by_idx / norm_by_idx, fill)\n', (22676, 22725), False, 'import numpy\n'), ((22330, 22379), 'numpy.bincount', 'numpy.bincount', (['idx', 'weights'], {'minlength': 'minlength'}), '(idx, weights, minlength=minlength)\n', (22344, 22379), False, 'import numpy\n'), ((22482, 22530), 'numpy.bincount', 'numpy.bincount', (['idx', 'values'], {'minlength': 'minlength'}), '(idx, values, minlength=minlength)\n', (22496, 22530), False, 'import numpy\n'), ((6331, 6356), 'numpy.iinfo', 'numpy.iinfo', (['values.dtype'], {}), '(values.dtype)\n', (6342, 6356), False, 'import numpy\n'), ((7691, 7716), 'numpy.iinfo', 'numpy.iinfo', (['values.dtype'], {}), '(values.dtype)\n', (7702, 7716), False, 'import numpy\n'), ((19333, 19350), 'numpy.unique', 'numpy.unique', (['idx'], {}), '(idx)\n', (19345, 19350), False, 'import numpy\n')] |
import pyautogui
import PySimpleGUI as sg
import cv2
import numpy as np
"""
Demo program that displays a webcam using OpenCV
"""
def main():
sg.theme('Black')
# define the window layout
layout = [[sg.Text('OpenCV Demo', size=(40, 1), justification='center', font='Helvetica 20')],
[sg.Image(filename='', key='image')],
[sg.Button('Record', size=(10, 1), font='Arial 14'),
sg.Button('Stop', size=(10, 1), font='Arial 14'),
sg.Button('Exit', size=(10, 1), font='Arial 14'),
sg.Button('Screenshot',size=(10,1),font='Arial 14') ]]
# create the window and show it without the plot
window = sg.Window('Demo Application - OpenCV Integration',
layout, location=(800, 400))
# ---===--- Event LOOP Read and display frames, operate the GUI --- #
cap = cv2.VideoCapture(0)
recording = False
while True:
event, values = window.read(timeout=20)
if event == 'Exit' or event == sg.WIN_CLOSED:
return
elif event == 'Record':
recording = True
elif event=='Screenshot':
myScreenshot = pyautogui.screenshot()
myScreenshot.save(r'shot.png')
elif event == 'Stop':
recording = False
img = np.full((480, 640), 255)
# this is faster, shorter and needs less includes
imgbytes = cv2.imencode('.png', img)[1].tobytes()
window['image'].update(data=imgbytes)
if recording:
ret, frame = cap.read()
imgbytes = cv2.imencode('.png', frame)[1].tobytes() # ditto
window['image'].update(data=imgbytes)
main() | [
"cv2.imencode",
"pyautogui.screenshot",
"PySimpleGUI.Text",
"PySimpleGUI.Button",
"PySimpleGUI.theme",
"cv2.VideoCapture",
"PySimpleGUI.Image",
"numpy.full",
"PySimpleGUI.Window"
] | [((149, 166), 'PySimpleGUI.theme', 'sg.theme', (['"""Black"""'], {}), "('Black')\n", (157, 166), True, 'import PySimpleGUI as sg\n'), ((684, 763), 'PySimpleGUI.Window', 'sg.Window', (['"""Demo Application - OpenCV Integration"""', 'layout'], {'location': '(800, 400)'}), "('Demo Application - OpenCV Integration', layout, location=(800, 400))\n", (693, 763), True, 'import PySimpleGUI as sg\n'), ((872, 891), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (888, 891), False, 'import cv2\n'), ((214, 300), 'PySimpleGUI.Text', 'sg.Text', (['"""OpenCV Demo"""'], {'size': '(40, 1)', 'justification': '"""center"""', 'font': '"""Helvetica 20"""'}), "('OpenCV Demo', size=(40, 1), justification='center', font=\n 'Helvetica 20')\n", (221, 300), True, 'import PySimpleGUI as sg\n'), ((313, 347), 'PySimpleGUI.Image', 'sg.Image', ([], {'filename': '""""""', 'key': '"""image"""'}), "(filename='', key='image')\n", (321, 347), True, 'import PySimpleGUI as sg\n'), ((365, 415), 'PySimpleGUI.Button', 'sg.Button', (['"""Record"""'], {'size': '(10, 1)', 'font': '"""Arial 14"""'}), "('Record', size=(10, 1), font='Arial 14')\n", (374, 415), True, 'import PySimpleGUI as sg\n'), ((432, 480), 'PySimpleGUI.Button', 'sg.Button', (['"""Stop"""'], {'size': '(10, 1)', 'font': '"""Arial 14"""'}), "('Stop', size=(10, 1), font='Arial 14')\n", (441, 480), True, 'import PySimpleGUI as sg\n'), ((497, 545), 'PySimpleGUI.Button', 'sg.Button', (['"""Exit"""'], {'size': '(10, 1)', 'font': '"""Arial 14"""'}), "('Exit', size=(10, 1), font='Arial 14')\n", (506, 545), True, 'import PySimpleGUI as sg\n'), ((562, 616), 'PySimpleGUI.Button', 'sg.Button', (['"""Screenshot"""'], {'size': '(10, 1)', 'font': '"""Arial 14"""'}), "('Screenshot', size=(10, 1), font='Arial 14')\n", (571, 616), True, 'import PySimpleGUI as sg\n'), ((1175, 1197), 'pyautogui.screenshot', 'pyautogui.screenshot', ([], {}), '()\n', (1195, 1197), False, 'import pyautogui\n'), ((1320, 1344), 'numpy.full', 'np.full', (['(480, 640)', '(255)'], {}), '((480, 640), 255)\n', (1327, 1344), True, 'import numpy as np\n'), ((1601, 1628), 'cv2.imencode', 'cv2.imencode', (['""".png"""', 'frame'], {}), "('.png', frame)\n", (1613, 1628), False, 'import cv2\n'), ((1430, 1455), 'cv2.imencode', 'cv2.imencode', (['""".png"""', 'img'], {}), "('.png', img)\n", (1442, 1455), False, 'import cv2\n')] |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import sys
import os
import json
import random
import logging
import numpy as np
import six
from io import open
from collections import namedtuple
from utils import tokenization
log = logging.getLogger(__name__)
if six.PY3:
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
def csv_reader(fd, delimiter='\t'):
def gen():
for i in fd:
yield i.rstrip('\n').split(delimiter)
return gen()
class BaseReader(object):
def __init__(self,
vocab_path,
label_map_config=None,
max_seq_len=512,
max_ent_cnt=42,
do_lower_case=True,
in_tokens=False,
is_inference=False,
random_seed=None,
tokenizer="FullTokenizer",
is_classify=True,
is_regression=False,
for_cn=True,
task_id=0):
self.max_seq_len = max_seq_len
self.max_ent_cnt = max_ent_cnt
self.tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_path, do_lower_case=do_lower_case)
self.vocab = self.tokenizer.vocab
self.pad_id = self.vocab["[PAD]"]
self.cls_id = self.vocab["[CLS]"]
self.sep_id = self.vocab["[SEP]"]
self.in_tokens = in_tokens
self.is_inference = is_inference
self.for_cn = for_cn
self.task_id = task_id
np.random.seed(random_seed)
self.is_classify = is_classify
self.is_regression = is_regression
self.current_example = 0
self.current_epoch = 0
self.num_examples = 0
if label_map_config:
with open(label_map_config, encoding='utf8') as f:
self.label_map = json.load(f)
else:
self.label_map = None
self.ner_map = {'PAD': 0, 'ORG': 1, 'LOC': 2, 'NUM': 3, 'TIME': 4, 'MISC': 5, 'PER': 6}
distance_buckets = np.zeros((512), dtype='int64')
distance_buckets[1] = 1
distance_buckets[2:] = 2
distance_buckets[4:] = 3
distance_buckets[8:] = 4
distance_buckets[16:] = 5
distance_buckets[32:] = 6
distance_buckets[64:] = 7
distance_buckets[128:] = 8
distance_buckets[256:] = 9
self.distance_buckets = distance_buckets
def get_train_progress(self):
"""Gets progress for training phase."""
return self.current_example, self.current_epoch
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
from dataclasses import dataclass
@dataclass(frozen=False)
class DocREDExample:
guid: str
title: str
vertexSet: list
sents: list
labels: None
class DocREDReader(BaseReader):
def _load_json(self, input_file):
"""Read DocRED json file into examples"""
with open(input_file, 'r') as f:
examples_raw = json.load(f)
examples = []
for (i, ins) in enumerate(examples_raw):
guid = i
examples.append(DocREDExample(guid=guid,
title=ins['title'],
vertexSet=ins['vertexSet'],
sents=ins['sents'],
labels=ins['labels'] if 'labels' in ins.keys() else None))
return examples
def get_num_train_examples(self, data_dir):
examples = self._load_json(os.path.join(data_dir, "train_annotated.json"))
return len(examples)
def data_generator(self,
data_dir,
mode,
batch_size,
epoch,
dev_count=1):
if mode == 'train':
datafile = os.path.join(data_dir, "train_annotated.json")
shuffle = True
elif mode == 'eval':
datafile = os.path.join(data_dir, "dev.json")
shuffle = False
elif mode == 'test':
datafile = os.path.join(data_dir, "test.json")
shuffle = False
else:
raise Exception("Invalid mode for data reader.")
examples = self._load_json(datafile)
def wrapper():
all_dev_batches = []
for epoch_index in range(epoch):
if mode == "train":
self.current_example = 0
self.current_epoch = epoch_index
if shuffle:
np.random.shuffle(examples)
for batch_data in self._prepare_batch_data(
examples, batch_size, mode=mode):
if len(all_dev_batches) < dev_count:
all_dev_batches.append(batch_data)
if len(all_dev_batches) == dev_count:
for batch in all_dev_batches:
yield batch
all_dev_batches = []
def f():
try:
for i in wrapper():
yield i
except Exception as e:
import traceback
traceback.print_exc()
return f
def _prepare_batch_data(self, examples, batch_size, mode=None):
"""generate batch records"""
batch_records, max_len = [], 0
for index, example in enumerate(examples):
if mode == "train":
self.current_example = index
record = self._convert_example_to_record(example, self.max_seq_len,
self.max_ent_cnt, self.tokenizer)
max_len = max(max_len, len(record.token_ids))
if self.in_tokens:
to_append = (len(batch_records) + 1) * max_len <= batch_size
else:
to_append = len(batch_records) < batch_size
if to_append:
batch_records.append(record)
else:
yield self._batch_records(batch_records)
batch_records, max_len = [record], len(record.token_ids)
# drop last batch!
# if batch_records:
# yield self._batch_records(batch_records)
def _batch_records(self, batch_records):
batch_token_ids = [record.token_ids for record in batch_records]
batch_input_mask = [record.input_mask for record in batch_records]
batch_text_type_ids = [record.text_type_ids for record in batch_records]
batch_position_ids = [record.position_ids for record in batch_records]
batch_ent_mask = [record.ent_mask for record in batch_records]
batch_label_ids = [record.label_ids for record in batch_records]
batch_label_mask = [record.label_mask for record in batch_records]
batch_ent_ner = [record.ent_ner for record in batch_records]
batch_ent_pos = [record.ent_pos for record in batch_records]
batch_ent_distance = [record.ent_distance for record in batch_records]
batch_structure_mask = [record.structure_mask for record in batch_records]
padded_task_ids = np.ones_like(batch_token_ids, dtype="int64") * self.task_id
return_list = [
batch_token_ids, batch_input_mask, batch_text_type_ids, batch_position_ids, padded_task_ids,
batch_ent_mask, batch_label_ids, batch_label_mask, batch_ent_ner, batch_ent_pos,
batch_ent_distance, batch_structure_mask
]
return return_list
def norm_mask(self, input_mask):
output_mask = np.zeros(input_mask.shape)
for i in range(len(input_mask)):
if not np.all(input_mask[i] == 0):
output_mask[i] = input_mask[i] / sum(input_mask[i])
return output_mask
def _convert_example_to_record(self, example, max_seq_length, max_ent_cnt, tokenizer):
input_tokens = []
tok_to_sent = []
tok_to_word = []
for sent_idx, sent in enumerate(example.sents):
for word_idx, word in enumerate(sent):
word = tokenization.convert_to_unicode(word)
tokens_tmp = tokenizer.tokenize(word)
input_tokens += tokens_tmp
tok_to_sent += [sent_idx] * len(tokens_tmp)
tok_to_word += [word_idx] * len(tokens_tmp)
if len(input_tokens) <= max_seq_length - 2:
input_tokens = ['[CLS]'] + input_tokens + ['[SEP]']
tok_to_sent = [None] + tok_to_sent + [None]
tok_to_word = [None] + tok_to_word + [None]
input_ids = tokenizer.convert_tokens_to_ids(input_tokens)
input_mask = [1] * len(input_ids)
text_type_ids = [0] * len(input_ids)
position_ids = list(range(len(input_ids)))
# padding
padding = [None] * (max_seq_length - len(input_ids))
tok_to_sent += padding
tok_to_word += padding
padding = [0] * (max_seq_length - len(input_ids))
input_mask += padding
text_type_ids += padding
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
position_ids += padding
else:
input_tokens = input_tokens[:max_seq_length - 2]
tok_to_sent = tok_to_sent[:max_seq_length - 2]
tok_to_word = tok_to_word[:max_seq_length - 2]
input_tokens = ['[CLS]'] + input_tokens + ['[SEP]']
tok_to_sent = [None] + tok_to_sent + [None]
tok_to_word = [None] + tok_to_word + [None]
input_ids = tokenizer.convert_tokens_to_ids(input_tokens)
input_mask = [1] * len(input_ids)
text_type_ids = [0] * len(input_ids)
position_ids = list(range(len(input_ids)))
# ent_mask & ner / coreference feature
ent_mask = np.zeros((max_ent_cnt, max_seq_length), dtype='int64')
ent_ner = [0] * max_seq_length
ent_pos = [0] * max_seq_length
tok_to_ent = [-1] * max_seq_length
ents = example.vertexSet
for ent_idx, ent in enumerate(ents):
for mention in ent:
for tok_idx in range(len(input_ids)):
if tok_to_sent[tok_idx] == mention['sent_id'] \
and mention['pos'][0] <= tok_to_word[tok_idx] < mention['pos'][1]:
ent_mask[ent_idx][tok_idx] = 1
ent_ner[tok_idx] = self.ner_map[ent[0]['type']]
ent_pos[tok_idx] = ent_idx + 1
tok_to_ent[tok_idx] = ent_idx
# distance feature
ent_first_appearance = [0] * max_ent_cnt
ent_distance = np.zeros((max_ent_cnt, max_ent_cnt), dtype='int64') # padding id is 10
for i in range(len(ents)):
if np.all(ent_mask[i] == 0):
continue
else:
ent_first_appearance[i] = np.where(ent_mask[i] == 1)[0][0]
for i in range(len(ents)):
for j in range(len(ents)):
if ent_first_appearance[i] != 0 and ent_first_appearance[j] != 0:
if ent_first_appearance[i] >= ent_first_appearance[j]:
ent_distance[i][j] = self.distance_buckets[ent_first_appearance[i] - ent_first_appearance[j]]
else:
ent_distance[i][j] = - self.distance_buckets[- ent_first_appearance[i] + ent_first_appearance[j]]
ent_distance += 10 # norm from [-9, 9] to [1, 19]
# structure prior for attentive biase
# PRIOR DEFINITION | share ent context | diff ent context | No ent
# share sem context | intra-coref | intra-relate | intra-NA
# diff sem context | inter-coref | inter-relate |
structure_mask = np.zeros((5, max_seq_length, max_seq_length), dtype='float')
for i in range(max_seq_length):
if input_mask[i] == 0:
break
else:
if tok_to_ent[i] != -1:
for j in range(max_seq_length):
if tok_to_sent[j] is None:
continue
# intra
if tok_to_sent[j] == tok_to_sent[i]:
# intra-coref
if tok_to_ent[j] == tok_to_ent[i]:
structure_mask[0][i][j] = 1
# intra-relate
elif tok_to_ent[j] != -1:
structure_mask[1][i][j] = 1
# intra-NA
else:
structure_mask[2][i][j] = 1
else:
# inter-coref
if tok_to_ent[j] == tok_to_ent[i]:
structure_mask[3][i][j] = 1
# inter-relate
elif tok_to_ent[j] != -1:
structure_mask[4][i][j] = 1
# label
label_ids = np.zeros((max_ent_cnt, max_ent_cnt, len(self.label_map.keys())), dtype='int64')
# test file does not have "labels"
if example.labels is not None:
labels = example.labels
for label in labels:
label_ids[label['h']][label['t']][self.label_map[label['r']]] = 1
for h in range(len(ents)):
for t in range(len(ents)):
if np.all(label_ids[h][t] == 0):
label_ids[h][t][0] = 1
label_mask = np.zeros((max_ent_cnt, max_ent_cnt), dtype='int64')
label_mask[:len(ents), :len(ents)] = 1
for ent in range(len(ents)):
label_mask[ent][ent] = 0
for ent in range(len(ents)):
if np.all(ent_mask[ent] == 0):
label_mask[ent, :] = 0
label_mask[:, ent] = 0
ent_mask = self.norm_mask(ent_mask)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(text_type_ids) == max_seq_length
assert len(position_ids) == max_seq_length
assert ent_mask.shape == (max_ent_cnt, max_seq_length)
assert label_ids.shape == (max_ent_cnt, max_ent_cnt, len(self.label_map.keys()))
assert label_mask.shape == (max_ent_cnt, max_ent_cnt)
assert len(ent_ner) == max_seq_length
assert len(ent_pos) == max_seq_length
assert ent_distance.shape == (max_ent_cnt, max_ent_cnt)
assert structure_mask.shape == (5, max_seq_length, max_seq_length)
input_ids = np.expand_dims(input_ids, axis=-1).astype('int64')
input_mask = np.expand_dims(input_mask, axis=-1).astype('int64')
text_type_ids = np.expand_dims(text_type_ids, axis=-1).astype('int64')
position_ids = np.expand_dims(position_ids, axis=-1).astype('int64')
ent_ner = np.expand_dims(ent_ner, axis=-1).astype('int64')
ent_pos = np.expand_dims(ent_pos, axis=-1).astype('int64')
ent_distance = np.expand_dims(ent_distance, axis=-1).astype('int64')
Record = namedtuple(
'Record',
['token_ids', 'input_mask', 'text_type_ids', 'position_ids', 'ent_mask', 'label_ids',
'label_mask', 'ent_ner', 'ent_pos', 'ent_distance', 'structure_mask'])
record = Record(
token_ids=input_ids,
input_mask=input_mask,
text_type_ids=text_type_ids,
position_ids=position_ids,
ent_mask=ent_mask,
label_ids=label_ids,
label_mask=label_mask,
ent_ner=ent_ner,
ent_pos=ent_pos,
ent_distance=ent_distance,
structure_mask=structure_mask)
return record
if __name__ == '__main__':
pass
| [
"logging.getLogger",
"utils.tokenization.FullTokenizer",
"numpy.ones_like",
"collections.namedtuple",
"utils.tokenization.convert_to_unicode",
"numpy.where",
"dataclasses.dataclass",
"os.path.join",
"io.open",
"numpy.zeros",
"io.TextIOWrapper",
"numpy.random.seed",
"numpy.expand_dims",
"js... | [((988, 1015), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1005, 1015), False, 'import logging\n'), ((4158, 4181), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(False)'}), '(frozen=False)\n', (4167, 4181), False, 'from dataclasses import dataclass\n'), ((1060, 1113), 'io.TextIOWrapper', 'io.TextIOWrapper', (['sys.stdout.buffer'], {'encoding': '"""utf-8"""'}), "(sys.stdout.buffer, encoding='utf-8')\n", (1076, 1113), False, 'import io\n'), ((1131, 1184), 'io.TextIOWrapper', 'io.TextIOWrapper', (['sys.stderr.buffer'], {'encoding': '"""utf-8"""'}), "(sys.stderr.buffer, encoding='utf-8')\n", (1147, 1184), False, 'import io\n'), ((1935, 2013), 'utils.tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'vocab_path', 'do_lower_case': 'do_lower_case'}), '(vocab_file=vocab_path, do_lower_case=do_lower_case)\n', (1961, 2013), False, 'from utils import tokenization\n'), ((2340, 2367), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (2354, 2367), True, 'import numpy as np\n'), ((2855, 2883), 'numpy.zeros', 'np.zeros', (['(512)'], {'dtype': '"""int64"""'}), "(512, dtype='int64')\n", (2863, 2883), True, 'import numpy as np\n'), ((9095, 9121), 'numpy.zeros', 'np.zeros', (['input_mask.shape'], {}), '(input_mask.shape)\n', (9103, 9121), True, 'import numpy as np\n'), ((11385, 11439), 'numpy.zeros', 'np.zeros', (['(max_ent_cnt, max_seq_length)'], {'dtype': '"""int64"""'}), "((max_ent_cnt, max_seq_length), dtype='int64')\n", (11393, 11439), True, 'import numpy as np\n'), ((12224, 12275), 'numpy.zeros', 'np.zeros', (['(max_ent_cnt, max_ent_cnt)'], {'dtype': '"""int64"""'}), "((max_ent_cnt, max_ent_cnt), dtype='int64')\n", (12232, 12275), True, 'import numpy as np\n'), ((13353, 13413), 'numpy.zeros', 'np.zeros', (['(5, max_seq_length, max_seq_length)'], {'dtype': '"""float"""'}), "((5, max_seq_length, max_seq_length), dtype='float')\n", (13361, 13413), True, 'import numpy as np\n'), ((15148, 15199), 'numpy.zeros', 'np.zeros', (['(max_ent_cnt, max_ent_cnt)'], {'dtype': '"""int64"""'}), "((max_ent_cnt, max_ent_cnt), dtype='int64')\n", (15156, 15199), True, 'import numpy as np\n'), ((16700, 16885), 'collections.namedtuple', 'namedtuple', (['"""Record"""', "['token_ids', 'input_mask', 'text_type_ids', 'position_ids', 'ent_mask',\n 'label_ids', 'label_mask', 'ent_ner', 'ent_pos', 'ent_distance',\n 'structure_mask']"], {}), "('Record', ['token_ids', 'input_mask', 'text_type_ids',\n 'position_ids', 'ent_mask', 'label_ids', 'label_mask', 'ent_ner',\n 'ent_pos', 'ent_distance', 'structure_mask'])\n", (16710, 16885), False, 'from collections import namedtuple\n'), ((4421, 4442), 'io.open', 'open', (['input_file', '"""r"""'], {}), "(input_file, 'r')\n", (4425, 4442), False, 'from io import open\n'), ((4476, 4488), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4485, 4488), False, 'import json\n'), ((5037, 5083), 'os.path.join', 'os.path.join', (['data_dir', '"""train_annotated.json"""'], {}), "(data_dir, 'train_annotated.json')\n", (5049, 5083), False, 'import os\n'), ((5360, 5406), 'os.path.join', 'os.path.join', (['data_dir', '"""train_annotated.json"""'], {}), "(data_dir, 'train_annotated.json')\n", (5372, 5406), False, 'import os\n'), ((8663, 8707), 'numpy.ones_like', 'np.ones_like', (['batch_token_ids'], {'dtype': '"""int64"""'}), "(batch_token_ids, dtype='int64')\n", (8675, 8707), True, 'import numpy as np\n'), ((12346, 12370), 'numpy.all', 'np.all', (['(ent_mask[i] == 0)'], {}), '(ent_mask[i] == 0)\n', (12352, 12370), True, 'import numpy as np\n'), ((15373, 15399), 'numpy.all', 'np.all', (['(ent_mask[ent] == 0)'], {}), '(ent_mask[ent] == 0)\n', (15379, 15399), True, 'import numpy as np\n'), ((2592, 2631), 'io.open', 'open', (['label_map_config'], {'encoding': '"""utf8"""'}), "(label_map_config, encoding='utf8')\n", (2596, 2631), False, 'from io import open\n'), ((2671, 2683), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2680, 2683), False, 'import json\n'), ((5486, 5520), 'os.path.join', 'os.path.join', (['data_dir', '"""dev.json"""'], {}), "(data_dir, 'dev.json')\n", (5498, 5520), False, 'import os\n'), ((9182, 9208), 'numpy.all', 'np.all', (['(input_mask[i] == 0)'], {}), '(input_mask[i] == 0)\n', (9188, 9208), True, 'import numpy as np\n'), ((9603, 9640), 'utils.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['word'], {}), '(word)\n', (9634, 9640), False, 'from utils import tokenization\n'), ((15053, 15081), 'numpy.all', 'np.all', (['(label_ids[h][t] == 0)'], {}), '(label_ids[h][t] == 0)\n', (15059, 15081), True, 'import numpy as np\n'), ((16191, 16225), 'numpy.expand_dims', 'np.expand_dims', (['input_ids'], {'axis': '(-1)'}), '(input_ids, axis=-1)\n', (16205, 16225), True, 'import numpy as np\n'), ((16263, 16298), 'numpy.expand_dims', 'np.expand_dims', (['input_mask'], {'axis': '(-1)'}), '(input_mask, axis=-1)\n', (16277, 16298), True, 'import numpy as np\n'), ((16339, 16377), 'numpy.expand_dims', 'np.expand_dims', (['text_type_ids'], {'axis': '(-1)'}), '(text_type_ids, axis=-1)\n', (16353, 16377), True, 'import numpy as np\n'), ((16417, 16454), 'numpy.expand_dims', 'np.expand_dims', (['position_ids'], {'axis': '(-1)'}), '(position_ids, axis=-1)\n', (16431, 16454), True, 'import numpy as np\n'), ((16489, 16521), 'numpy.expand_dims', 'np.expand_dims', (['ent_ner'], {'axis': '(-1)'}), '(ent_ner, axis=-1)\n', (16503, 16521), True, 'import numpy as np\n'), ((16556, 16588), 'numpy.expand_dims', 'np.expand_dims', (['ent_pos'], {'axis': '(-1)'}), '(ent_pos, axis=-1)\n', (16570, 16588), True, 'import numpy as np\n'), ((16628, 16665), 'numpy.expand_dims', 'np.expand_dims', (['ent_distance'], {'axis': '(-1)'}), '(ent_distance, axis=-1)\n', (16642, 16665), True, 'import numpy as np\n'), ((5601, 5636), 'os.path.join', 'os.path.join', (['data_dir', '"""test.json"""'], {}), "(data_dir, 'test.json')\n", (5613, 5636), False, 'import os\n'), ((6069, 6096), 'numpy.random.shuffle', 'np.random.shuffle', (['examples'], {}), '(examples)\n', (6086, 6096), True, 'import numpy as np\n'), ((6711, 6732), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6730, 6732), False, 'import traceback\n'), ((12457, 12483), 'numpy.where', 'np.where', (['(ent_mask[i] == 1)'], {}), '(ent_mask[i] == 1)\n', (12465, 12483), True, 'import numpy as np\n')] |
import numpy as np
import cv2
def softmax(x, axis=-1):
numerator = np.exp(x - np.max(x, axis=axis, keepdims=True))
return numerator / np.sum(numerator, axis=axis, keepdims=True)
def resize(image, width=None, height=None, inter=cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation=inter)
# return the resized image
return resized
def gen_anchor(featuresize, scale):
"""
gen base anchor from feature map [HXW][9][4]
reshape [HXW][9][4] to [HXWX9][4]
"""
heights = [11, 16, 23, 33, 48, 68, 97, 139, 198, 283]
widths = [16, 16, 16, 16, 16, 16, 16, 16, 16, 16]
# gen k=9 anchor size (h,w)
heights = np.array(heights).reshape(len(heights), 1)
widths = np.array(widths).reshape(len(widths), 1)
base_anchor = np.array([0, 0, 15, 15])
# center x,y
xt = (base_anchor[0] + base_anchor[2]) * 0.5
yt = (base_anchor[1] + base_anchor[3]) * 0.5
# x1 y1 x2 y2
x1 = xt - widths * 0.5
y1 = yt - heights * 0.5
x2 = xt + widths * 0.5
y2 = yt + heights * 0.5
base_anchor = np.hstack((x1, y1, x2, y2))
h, w = featuresize
shift_x = np.arange(0, w) * scale
shift_y = np.arange(0, h) * scale
# apply shift
anchor = []
for i in shift_y:
for j in shift_x:
anchor.append(base_anchor + [j, i, j, i])
return np.array(anchor).reshape((-1, 4))
def bbox_transfor_inv(anchor, regr):
"""
return predict bbox
"""
Cya = (anchor[:, 1] + anchor[:, 3]) * 0.5
ha = anchor[:, 3] - anchor[:, 1] + 1
Vcx = regr[0, :, 0]
Vhx = regr[0, :, 1]
Cyx = Vcx * ha + Cya
hx = np.exp(Vhx) * ha
xt = (anchor[:, 0] + anchor[:, 2]) * 0.5
x1 = xt - 16 * 0.5
y1 = Cyx - hx * 0.5
x2 = xt + 16 * 0.5
y2 = Cyx + hx * 0.5
bbox = np.vstack((x1, y1, x2, y2)).transpose()
return bbox
def clip_box(bbox, im_shape):
# x1 >= 0
bbox[:, 0] = np.maximum(np.minimum(bbox[:, 0], im_shape[1] - 1), 0)
# y1 >= 0
bbox[:, 1] = np.maximum(np.minimum(bbox[:, 1], im_shape[0] - 1), 0)
# x2 < im_shape[1]
bbox[:, 2] = np.maximum(np.minimum(bbox[:, 2], im_shape[1] - 1), 0)
# y2 < im_shape[0]
bbox[:, 3] = np.maximum(np.minimum(bbox[:, 3], im_shape[0] - 1), 0)
return bbox
def filter_bbox(bbox, minsize):
ws = bbox[:, 2] - bbox[:, 0] + 1
hs = bbox[:, 3] - bbox[:, 1] + 1
keep = np.where((ws >= minsize) & (hs >= minsize))[0]
return keep
def nms(dets, thresh):
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
# for predict
class Graph:
def __init__(self, graph):
self.graph = graph
def sub_graphs_connected(self):
sub_graphs = []
for index in range(self.graph.shape[0]):
if not self.graph[:, index].any() and self.graph[index, :].any():
v = index
sub_graphs.append([v])
while self.graph[v, :].any():
v = np.where(self.graph[v, :])[0][0]
sub_graphs[-1].append(v)
return sub_graphs
class TextLineCfg:
SCALE = 600
MAX_SCALE = 1200
TEXT_PROPOSALS_WIDTH = 16
MIN_NUM_PROPOSALS = 2
MIN_RATIO = 0.5
LINE_MIN_SCORE = 0.9
MAX_HORIZONTAL_GAP = 60
TEXT_PROPOSALS_MIN_SCORE = 0.7
TEXT_PROPOSALS_NMS_THRESH = 0.3
MIN_V_OVERLAPS = 0.6
MIN_SIZE_SIM = 0.6
class TextProposalGraphBuilder:
"""
Build Text proposals into a graph.
"""
def get_successions(self, index):
box = self.text_proposals[index]
results = []
for left in range(int(box[0]) + 1, min(int(box[0]) + TextLineCfg.MAX_HORIZONTAL_GAP + 1, self.im_size[1])):
adj_box_indices = self.boxes_table[left]
for adj_box_index in adj_box_indices:
if self.meet_v_iou(adj_box_index, index):
results.append(adj_box_index)
if len(results) != 0:
return results
return results
def get_precursors(self, index):
box = self.text_proposals[index]
results = []
for left in range(int(box[0]) - 1, max(int(box[0] - TextLineCfg.MAX_HORIZONTAL_GAP), 0) - 1, -1):
adj_box_indices = self.boxes_table[left]
for adj_box_index in adj_box_indices:
if self.meet_v_iou(adj_box_index, index):
results.append(adj_box_index)
if len(results) != 0:
return results
return results
def is_succession_node(self, index, succession_index):
precursors = self.get_precursors(succession_index)
if self.scores[index] >= np.max(self.scores[precursors]):
return True
return False
def meet_v_iou(self, index1, index2):
def overlaps_v(index1, index2):
h1 = self.heights[index1]
h2 = self.heights[index2]
y0 = max(self.text_proposals[index2][1], self.text_proposals[index1][1])
y1 = min(self.text_proposals[index2][3], self.text_proposals[index1][3])
return max(0, y1 - y0 + 1) / min(h1, h2)
def size_similarity(index1, index2):
h1 = self.heights[index1]
h2 = self.heights[index2]
return min(h1, h2) / max(h1, h2)
return overlaps_v(index1, index2) >= TextLineCfg.MIN_V_OVERLAPS and \
size_similarity(index1, index2) >= TextLineCfg.MIN_SIZE_SIM
def build_graph(self, text_proposals, scores, im_size):
self.text_proposals = text_proposals
self.scores = scores
self.im_size = im_size
self.heights = text_proposals[:, 3] - text_proposals[:, 1] + 1
boxes_table = [[] for _ in range(self.im_size[1])]
for index, box in enumerate(text_proposals):
boxes_table[int(box[0])].append(index)
self.boxes_table = boxes_table
graph = np.zeros((text_proposals.shape[0], text_proposals.shape[0]), np.bool)
for index, box in enumerate(text_proposals):
successions = self.get_successions(index)
if len(successions) == 0:
continue
succession_index = successions[np.argmax(scores[successions])]
if self.is_succession_node(index, succession_index):
# NOTE: a box can have multiple successions(precursors) if multiple successions(precursors)
# have equal scores.
graph[index, succession_index] = True
return Graph(graph)
class TextProposalConnectorOriented:
"""
Connect text proposals into text lines
"""
def __init__(self):
self.graph_builder = TextProposalGraphBuilder()
def group_text_proposals(self, text_proposals, scores, im_size):
graph = self.graph_builder.build_graph(text_proposals, scores, im_size)
return graph.sub_graphs_connected()
def fit_y(self, X, Y, x1, x2):
# len(X) != 0
# if X only include one point, the function will get line y=Y[0]
if np.sum(X == X[0]) == len(X):
return Y[0], Y[0]
p = np.poly1d(np.polyfit(X, Y, 1))
return p(x1), p(x2)
def get_text_lines(self, text_proposals, scores, im_size):
"""
text_proposals:boxes
"""
# tp=text proposal
tp_groups = self.group_text_proposals(text_proposals, scores, im_size) # 首先还是建图,获取到文本行由哪几个小框构成
text_lines = np.zeros((len(tp_groups), 8), np.float32)
for index, tp_indices in enumerate(tp_groups):
text_line_boxes = text_proposals[list(tp_indices)] # 每个文本行的全部小框
X = (text_line_boxes[:, 0] + text_line_boxes[:, 2]) / 2 # 求每一个小框的中心x,y坐标
Y = (text_line_boxes[:, 1] + text_line_boxes[:, 3]) / 2
z1 = np.polyfit(X, Y, 1) # 多项式拟合,根据之前求的中心店拟合一条直线(最小二乘)
x0 = np.min(text_line_boxes[:, 0]) # 文本行x坐标最小值
x1 = np.max(text_line_boxes[:, 2]) # 文本行x坐标最大值
offset = (text_line_boxes[0, 2] - text_line_boxes[0, 0]) * 0.5 # 小框宽度的一半
# 以全部小框的左上角这个点去拟合一条直线,然后计算一下文本行x坐标的极左极右对应的y坐标
lt_y, rt_y = self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 1], x0 + offset, x1 - offset)
# 以全部小框的左下角这个点去拟合一条直线,然后计算一下文本行x坐标的极左极右对应的y坐标
lb_y, rb_y = self.fit_y(text_line_boxes[:, 0], text_line_boxes[:, 3], x0 + offset, x1 - offset)
score = scores[list(tp_indices)].sum() / float(len(tp_indices)) # 求全部小框得分的均值作为文本行的均值
text_lines[index, 0] = x0
text_lines[index, 1] = min(lt_y, rt_y) # 文本行上端 线段 的y坐标的小值
text_lines[index, 2] = x1
text_lines[index, 3] = max(lb_y, rb_y) # 文本行下端 线段 的y坐标的大值
text_lines[index, 4] = score # 文本行得分
text_lines[index, 5] = z1[0] # 根据中心点拟合的直线的k,b
text_lines[index, 6] = z1[1]
height = np.mean((text_line_boxes[:, 3] - text_line_boxes[:, 1])) # 小框平均高度
text_lines[index, 7] = height + 2.5
text_recs = np.zeros((len(text_lines), 9), np.float)
index = 0
for line in text_lines:
b1 = line[6] - line[7] / 2 # 根据高度和文本行中心线,求取文本行上下两条线的b值
b2 = line[6] + line[7] / 2
x1 = line[0]
y1 = line[5] * line[0] + b1 # 左上
x2 = line[2]
y2 = line[5] * line[2] + b1 # 右上
x3 = line[0]
y3 = line[5] * line[0] + b2 # 左下
x4 = line[2]
y4 = line[5] * line[2] + b2 # 右下
disX = x2 - x1
disY = y2 - y1
width = np.sqrt(disX * disX + disY * disY) # 文本行宽度
fTmp0 = y3 - y1 # 文本行高度
fTmp1 = fTmp0 * disY / width
x = np.fabs(fTmp1 * disX / width) # 做补偿
y = np.fabs(fTmp1 * disY / width)
if line[5] < 0:
x1 -= x
y1 += y
x4 += x
y4 -= y
else:
x2 += x
y2 += y
x3 -= x
y3 -= y
text_recs[index, 0] = x1
text_recs[index, 1] = y1
text_recs[index, 2] = x2
text_recs[index, 3] = y2
text_recs[index, 4] = x3
text_recs[index, 5] = y3
text_recs[index, 6] = x4
text_recs[index, 7] = y4
text_recs[index, 8] = line[4]
index = index + 1
return text_recs
| [
"numpy.mean",
"numpy.fabs",
"numpy.sqrt",
"numpy.minimum",
"numpy.hstack",
"numpy.where",
"numpy.polyfit",
"numpy.argmax",
"numpy.max",
"numpy.exp",
"numpy.array",
"numpy.sum",
"numpy.zeros",
"numpy.vstack",
"numpy.min",
"numpy.maximum",
"cv2.resize",
"numpy.arange"
] | [((975, 1018), 'cv2.resize', 'cv2.resize', (['image', 'dim'], {'interpolation': 'inter'}), '(image, dim, interpolation=inter)\n', (985, 1018), False, 'import cv2\n'), ((1495, 1519), 'numpy.array', 'np.array', (['[0, 0, 15, 15]'], {}), '([0, 0, 15, 15])\n', (1503, 1519), True, 'import numpy as np\n'), ((1782, 1809), 'numpy.hstack', 'np.hstack', (['(x1, y1, x2, y2)'], {}), '((x1, y1, x2, y2))\n', (1791, 1809), True, 'import numpy as np\n'), ((144, 187), 'numpy.sum', 'np.sum', (['numerator'], {'axis': 'axis', 'keepdims': '(True)'}), '(numerator, axis=axis, keepdims=True)\n', (150, 187), True, 'import numpy as np\n'), ((1848, 1863), 'numpy.arange', 'np.arange', (['(0)', 'w'], {}), '(0, w)\n', (1857, 1863), True, 'import numpy as np\n'), ((1886, 1901), 'numpy.arange', 'np.arange', (['(0)', 'h'], {}), '(0, h)\n', (1895, 1901), True, 'import numpy as np\n'), ((2346, 2357), 'numpy.exp', 'np.exp', (['Vhx'], {}), '(Vhx)\n', (2352, 2357), True, 'import numpy as np\n'), ((2645, 2684), 'numpy.minimum', 'np.minimum', (['bbox[:, 0]', '(im_shape[1] - 1)'], {}), '(bbox[:, 0], im_shape[1] - 1)\n', (2655, 2684), True, 'import numpy as np\n'), ((2731, 2770), 'numpy.minimum', 'np.minimum', (['bbox[:, 1]', '(im_shape[0] - 1)'], {}), '(bbox[:, 1], im_shape[0] - 1)\n', (2741, 2770), True, 'import numpy as np\n'), ((2826, 2865), 'numpy.minimum', 'np.minimum', (['bbox[:, 2]', '(im_shape[1] - 1)'], {}), '(bbox[:, 2], im_shape[1] - 1)\n', (2836, 2865), True, 'import numpy as np\n'), ((2921, 2960), 'numpy.minimum', 'np.minimum', (['bbox[:, 3]', '(im_shape[0] - 1)'], {}), '(bbox[:, 3], im_shape[0] - 1)\n', (2931, 2960), True, 'import numpy as np\n'), ((3101, 3144), 'numpy.where', 'np.where', (['((ws >= minsize) & (hs >= minsize))'], {}), '((ws >= minsize) & (hs >= minsize))\n', (3109, 3144), True, 'import numpy as np\n'), ((3470, 3502), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order[1:]]'], {}), '(x1[i], x1[order[1:]])\n', (3480, 3502), True, 'import numpy as np\n'), ((3517, 3549), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[order[1:]]'], {}), '(y1[i], y1[order[1:]])\n', (3527, 3549), True, 'import numpy as np\n'), ((3564, 3596), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order[1:]]'], {}), '(x2[i], x2[order[1:]])\n', (3574, 3596), True, 'import numpy as np\n'), ((3611, 3643), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[order[1:]]'], {}), '(y2[i], y2[order[1:]])\n', (3621, 3643), True, 'import numpy as np\n'), ((3657, 3687), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (3667, 3687), True, 'import numpy as np\n'), ((3700, 3730), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (3710, 3730), True, 'import numpy as np\n'), ((7228, 7297), 'numpy.zeros', 'np.zeros', (['(text_proposals.shape[0], text_proposals.shape[0])', 'np.bool'], {}), '((text_proposals.shape[0], text_proposals.shape[0]), np.bool)\n', (7236, 7297), True, 'import numpy as np\n'), ((84, 119), 'numpy.max', 'np.max', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), '(x, axis=axis, keepdims=True)\n', (90, 119), True, 'import numpy as np\n'), ((1379, 1396), 'numpy.array', 'np.array', (['heights'], {}), '(heights)\n', (1387, 1396), True, 'import numpy as np\n'), ((1435, 1451), 'numpy.array', 'np.array', (['widths'], {}), '(widths)\n', (1443, 1451), True, 'import numpy as np\n'), ((2057, 2073), 'numpy.array', 'np.array', (['anchor'], {}), '(anchor)\n', (2065, 2073), True, 'import numpy as np\n'), ((2514, 2541), 'numpy.vstack', 'np.vstack', (['(x1, y1, x2, y2)'], {}), '((x1, y1, x2, y2))\n', (2523, 2541), True, 'import numpy as np\n'), ((3829, 3852), 'numpy.where', 'np.where', (['(ovr <= thresh)'], {}), '(ovr <= thresh)\n', (3837, 3852), True, 'import numpy as np\n'), ((5990, 6021), 'numpy.max', 'np.max', (['self.scores[precursors]'], {}), '(self.scores[precursors])\n', (5996, 6021), True, 'import numpy as np\n'), ((8355, 8372), 'numpy.sum', 'np.sum', (['(X == X[0])'], {}), '(X == X[0])\n', (8361, 8372), True, 'import numpy as np\n'), ((8436, 8455), 'numpy.polyfit', 'np.polyfit', (['X', 'Y', '(1)'], {}), '(X, Y, 1)\n', (8446, 8455), True, 'import numpy as np\n'), ((9103, 9122), 'numpy.polyfit', 'np.polyfit', (['X', 'Y', '(1)'], {}), '(X, Y, 1)\n', (9113, 9122), True, 'import numpy as np\n'), ((9172, 9201), 'numpy.min', 'np.min', (['text_line_boxes[:, 0]'], {}), '(text_line_boxes[:, 0])\n', (9178, 9201), True, 'import numpy as np\n'), ((9232, 9261), 'numpy.max', 'np.max', (['text_line_boxes[:, 2]'], {}), '(text_line_boxes[:, 2])\n', (9238, 9261), True, 'import numpy as np\n'), ((10184, 10238), 'numpy.mean', 'np.mean', (['(text_line_boxes[:, 3] - text_line_boxes[:, 1])'], {}), '(text_line_boxes[:, 3] - text_line_boxes[:, 1])\n', (10191, 10238), True, 'import numpy as np\n'), ((10876, 10910), 'numpy.sqrt', 'np.sqrt', (['(disX * disX + disY * disY)'], {}), '(disX * disX + disY * disY)\n', (10883, 10910), True, 'import numpy as np\n'), ((11015, 11044), 'numpy.fabs', 'np.fabs', (['(fTmp1 * disX / width)'], {}), '(fTmp1 * disX / width)\n', (11022, 11044), True, 'import numpy as np\n'), ((11068, 11097), 'numpy.fabs', 'np.fabs', (['(fTmp1 * disY / width)'], {}), '(fTmp1 * disY / width)\n', (11075, 11097), True, 'import numpy as np\n'), ((7512, 7542), 'numpy.argmax', 'np.argmax', (['scores[successions]'], {}), '(scores[successions])\n', (7521, 7542), True, 'import numpy as np\n'), ((4314, 4340), 'numpy.where', 'np.where', (['self.graph[v, :]'], {}), '(self.graph[v, :])\n', (4322, 4340), True, 'import numpy as np\n')] |
import os
import sys
sys.path.append(os.getcwd())
#from __future__ import print_function
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
from workspace.workspace_intent import SENT_WORDID, SENT_LABELID, SENT_WORD_MASK, SENT_ORIGINAL_TXT
import numpy
import numpy as np
import random
import os
from torch.utils.data import Dataset, DataLoader
class RunExperiment:
def __init__(self, model, params):
self.model = model
self.params = params
def run_training_epoch(self, params, train_dl, optimizer, epoch):
RSL_PATH= HOME_DIR+'/results'
model = self.model
idx2word = params['idx2word']
total_loss = 0.
i = 0
domains = []
for b in train_dl:
x, x_len, y, y_oh, xq, xq_len, yq, yq_oh, x_ood, x_ood_len, y_ood, y_ood_oh, domain = b['X_sup'], b['X_sup_len'], b['Y_sup'], b['Y_sup_oh'], b['X_q'], b['Xq_len'], b['Y_q'], b['Y_q_oh'], b['X_neg'], b['X_neg_len'], b['Y_neg'], b['Y_neg_oh'], b['target_sets_files']
x = x.squeeze()
x_len = x_len.squeeze()
y = y.squeeze()
y_oh = y_oh.squeeze()
xq = xq.squeeze()
xq_len = xq_len.squeeze()
yq = yq.squeeze()
yq_oh = yq_oh.squeeze()
x_ood = x_ood.squeeze()
x_ood_len = x_ood_len.squeeze()
y_ood = y_ood.squeeze()
y_ood_oh = y_ood_oh.squeeze()
x_ = x
y_ = y
xq_ = xq
yq_ = yq
x_ood_ = x_ood
y_ood_ = y_ood
# bs =100
x = x.view(1000 * self.params['min_ss_size'], self.params['max_length'])
x_len = x_len.view(1000 * self.params['min_ss_size'])
y = y.view(1000 * self.params['min_ss_size'])
y_oh = y_oh.view(1000 * self.params['min_ss_size'], 10)
loss = model(x, x_len, y_oh, xq, xq_len, yq_oh, x_ood, x_ood_len, y_ood_oh)
loss.backward()
optimizer.step()
domains.append(domain)
total_loss += loss.item()
i+=1
train_loss = total_loss/i
if epoch % 10 == 0:
print(train_loss)
state = {'epoch': epoch, 'state_dict': model.state_dict(), 'optim_dict' : optimizer.state_dict()}
torch.save(state, open(os.path.join(RSL_PATH, 'imax_intent_k100_%s.pth'%epoch), 'wb'))
return train_loss, domains
def run_testing_epoch(self, params, dev_dl):
model = self.model
idx2word = params['idx2word']
with torch.no_grad():
preds_info = []
all_dataset = []
probs = []
gts = []
avg_conf_ood = []
for dat in dev_dl:
for b in dat:
x, x_len, y, y_oh, xq, xq_len, yq, dataset = b['X_sup'], b['X_sup_len'], b['Y_sup'], b['Y_sup_oh'], b['X_q'], b['X_q_len'], b['Y_q'], b['target_set_file']
x = x.squeeze()
x_len = x_len.squeeze()
y = y.squeeze()
y_oh = y_oh.squeeze()
xq = xq.squeeze(0)
x_cpu = x.cpu().numpy()
y_ = y.cpu().numpy()
xq_cpu = xq.cpu().numpy()
xq_cpu = xq_cpu.reshape((xq_cpu.shape[-1]))
xq_str = [idx2word[i] for i in xq_cpu if i in idx2word and idx2word[i] != '</s>']
xq_str = ' '.join(xq_str)
pred = model._predict(x, x_len, y_oh, xq, xq_len)
pred = pred.cpu().data.numpy()
pred_cls = numpy.argmax(pred)
conf = numpy.max(pred)
pred_cls_ = ''
yq_str = ''
if pred_cls==0:
pred_cls_ = str(pred_cls)
else:
pred_cls_ = 'oos'
if yq.cpu().data.numpy().tolist()[0][0] ==0:
yq_str = str(yq.cpu().data.numpy().tolist()[0][0])
probs.append(pred)
gts.append(yq.cpu().data.numpy().tolist()[0][0])
else:
yq_str = 'oos'
probs.append(pred)
gts.append(yq_str)
avg_conf_ood.append(conf)
atuple = (pred_cls_, yq_str, conf)
preds_info.append(atuple)
all_dataset.append(dataset)
probs = numpy.array(probs)
gts = numpy.array(gts)
avg_conf_ood = numpy.mean(avg_conf_ood)
return preds_info, all_dataset, probs, gts, avg_conf_ood
def get_support_set_one_hot(self, support_set, classe_list):
cls_id_map = dict()
for lid in classe_list:
cls_id_map[lid] = len(cls_id_map)
support_set_one_hot = numpy.zeros([len(support_set),
len(support_set[0]),
len(cls_id_map)])
for k in range(len(support_set)):
for j in range(len(support_set[k])):
support_set_one_hot[k][j][cls_id_map[support_set[k][j]]] = 1.0
return support_set_one_hot
def get_one_hot(self, y_target, classe_list):
cls_id_map = dict()
for lid in classe_list:
cls_id_map[lid] = len(cls_id_map)
y_target_one_hot = numpy.zeros([len(y_target), len(cls_id_map)])
for k in range(len(y_target)):
y_target_one_hot[k][cls_id_map[y_target[k]]] = 1.0
return y_target_one_hot
| [
"numpy.mean",
"os.path.join",
"numpy.argmax",
"os.getcwd",
"numpy.max",
"numpy.array",
"torch.no_grad"
] | [((37, 48), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (46, 48), False, 'import os\n'), ((4884, 4902), 'numpy.array', 'numpy.array', (['probs'], {}), '(probs)\n', (4895, 4902), False, 'import numpy\n'), ((4917, 4933), 'numpy.array', 'numpy.array', (['gts'], {}), '(gts)\n', (4928, 4933), False, 'import numpy\n'), ((4958, 4982), 'numpy.mean', 'numpy.mean', (['avg_conf_ood'], {}), '(avg_conf_ood)\n', (4968, 4982), False, 'import numpy\n'), ((2735, 2750), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2748, 2750), False, 'import torch\n'), ((2485, 2542), 'os.path.join', 'os.path.join', (['RSL_PATH', "('imax_intent_k100_%s.pth' % epoch)"], {}), "(RSL_PATH, 'imax_intent_k100_%s.pth' % epoch)\n", (2497, 2542), False, 'import os\n'), ((3861, 3879), 'numpy.argmax', 'numpy.argmax', (['pred'], {}), '(pred)\n', (3873, 3879), False, 'import numpy\n'), ((3907, 3922), 'numpy.max', 'numpy.max', (['pred'], {}), '(pred)\n', (3916, 3922), False, 'import numpy\n')] |
#! /usr/bin/env python
import numpy as np
from .order_parameters import potential_M_N, centroid_m
def is_discrete_pattern_formed(states, params):
K = params['K']
M = params['M']
potential = potential_M_N(K, M, states.values())
velocities = np.array([s.velocity for s in states.values()])
speeds = np.linalg.norm(velocities, axis=1)
# print(max(speeds))
return potential < 1e-15 and max(speeds) < 0.001
def is_original_pattern_formed(states, params):
phases = [s.phase for s in states.values()]
centroid = centroid_m(1, phases)
velocities = np.array([s.velocity for s in states.values()])
speeds = np.linalg.norm(velocities, axis=1)
return abs(1 - centroid) < 1e-15 and max(speeds) < 0.001
| [
"numpy.linalg.norm"
] | [((320, 354), 'numpy.linalg.norm', 'np.linalg.norm', (['velocities'], {'axis': '(1)'}), '(velocities, axis=1)\n', (334, 354), True, 'import numpy as np\n'), ((646, 680), 'numpy.linalg.norm', 'np.linalg.norm', (['velocities'], {'axis': '(1)'}), '(velocities, axis=1)\n', (660, 680), True, 'import numpy as np\n')] |
import os
import struct
import numpy as np
import torch
import torch.utils.data
from functools import lru_cache
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float,
7: np.double,
8: np.uint16
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + '.idx'
def data_file_path(prefix_path):
return prefix_path + '.bin'
def _warmup_mmap_file(path):
with open(path, 'rb') as stream:
while stream.read(100 * 1024 * 1024):
pass
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b'MMIDIDX\x00\x00'
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack('<Q', 1))
self._file.write(struct.pack('<B', code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack('<Q', len(sizes)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order='C'))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order='C'))
del pointers
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path):
# print("hello " + path)
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = struct.unpack('<Q', stream.read(8))
assert (1,) == version
dtype_code, = struct.unpack('<B', stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack('<Q', stream.read(8))[0]
offset = stream.tell()
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)
self._pointers = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._len,
offset=offset + self._sizes.nbytes)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path):
self._path = path
self._index = self.Index(index_file_path(self._path))
_warmup_mmap_file(data_file_path(self._path))
self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
@lru_cache(maxsize=8)
def __getitem__(self, i):
ptr, size = self._index[i]
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr)
if self._index.dtype != np.int64:
np_array = np_array.astype(np.int64)
return torch.from_numpy(np_array)
@property
def sizes(self):
return self._index.sizes
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
)
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int32):
self._data_file = open(out_file, 'wb')
self._dtype = dtype
self._sizes = []
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
# Concatenate data
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes) | [
"numpy.frombuffer",
"numpy.memmap",
"torch.from_numpy",
"struct.pack",
"numpy.array",
"numpy.empty",
"functools.lru_cache"
] | [((144, 171), 'numpy.empty', 'np.empty', (['n'], {'dtype': 'np.int64'}), '(n, dtype=np.int64)\n', (152, 171), True, 'import numpy as np\n'), ((4783, 4803), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(8)'}), '(maxsize=8)\n', (4792, 4803), False, 'from functools import lru_cache\n'), ((240, 267), 'numpy.array', 'np.array', (['a'], {'dtype': 'np.int64'}), '(a, dtype=np.int64)\n', (248, 267), True, 'import numpy as np\n'), ((3811, 3831), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(8)'}), '(maxsize=8)\n', (3820, 3831), False, 'from functools import lru_cache\n'), ((4888, 4973), 'numpy.frombuffer', 'np.frombuffer', (['self._bin_buffer'], {'dtype': 'self._index.dtype', 'count': 'size', 'offset': 'ptr'}), '(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr\n )\n', (4901, 4973), True, 'import numpy as np\n'), ((5076, 5102), 'torch.from_numpy', 'torch.from_numpy', (['np_array'], {}), '(np_array)\n', (5092, 5102), False, 'import torch\n'), ((3155, 3191), 'numpy.memmap', 'np.memmap', (['path'], {'mode': '"""r"""', 'order': '"""C"""'}), "(path, mode='r', order='C')\n", (3164, 3191), True, 'import numpy as np\n'), ((3284, 3363), 'numpy.frombuffer', 'np.frombuffer', (['self._bin_buffer'], {'dtype': 'np.int32', 'count': 'self._len', 'offset': 'offset'}), '(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)\n', (3297, 3363), True, 'import numpy as np\n'), ((3393, 3498), 'numpy.frombuffer', 'np.frombuffer', (['self._bin_buffer'], {'dtype': 'np.int64', 'count': 'self._len', 'offset': '(offset + self._sizes.nbytes)'}), '(self._bin_buffer, dtype=np.int64, count=self._len, offset=\n offset + self._sizes.nbytes)\n', (3406, 3498), True, 'import numpy as np\n'), ((1890, 1921), 'numpy.array', 'np.array', (['sizes'], {'dtype': 'np.int32'}), '(sizes, dtype=np.int32)\n', (1898, 1921), True, 'import numpy as np\n'), ((2047, 2081), 'numpy.array', 'np.array', (['pointers'], {'dtype': 'np.int64'}), '(pointers, dtype=np.int64)\n', (2055, 2081), True, 'import numpy as np\n'), ((1202, 1222), 'struct.pack', 'struct.pack', (['"""<Q"""', '(1)'], {}), "('<Q', 1)\n", (1213, 1222), False, 'import struct\n')] |
# *SymmetryFinder*: platform-independent symmetry finder, wrapping Spglib code
# *SymmetryHandler*: symmetry inferences for 0D-, 1D-, 2D- and 3D-systems
# Author: <NAME>
from numpy.linalg import det
from ase.atoms import Atoms
from ase.geometry import cell_to_cellpar
import spglib as spg
class SymmetryFinder:
accuracy = 1e-04
def __init__(self, accuracy=None):
self.error = None
self.accuracy=accuracy if accuracy else SymmetryFinder.accuracy
self.angle_tolerance = -1
def get_spacegroup(self, tilde_obj):
try:
symmetry = spg.get_spacegroup(tilde_obj['structures'][-1], symprec=self.accuracy, angle_tolerance=self.angle_tolerance)
except Exception as ex:
self.error = 'Symmetry finder error: %s' % ex
else:
try:
self.sg, self.ng = symmetry.split()
self.ng = int(self.ng.strip("()"))
except (ValueError, IndexError, AttributeError):
self.ng = 0
self.error = 'Symmetry finder error (probably, coinciding atoms)'
def refine_cell(self, tilde_obj):
'''
NB only used for perovskite_tilting app
'''
try: lattice, positions, numbers = spg.refine_cell(tilde_obj['structures'][-1], symprec=self.accuracy, angle_tolerance=self.angle_tolerance)
except Exception as ex:
self.error = 'Symmetry finder error: %s' % ex
else:
self.refinedcell = Atoms(numbers=numbers, cell=lattice, scaled_positions=positions, pbc=tilde_obj['structures'][-1].get_pbc())
self.refinedcell.periodicity = sum(self.refinedcell.get_pbc())
self.refinedcell.dims = abs(det(tilde_obj['structures'][-1].cell))
"""
# Dummy class for testing purposes
class SymmetryFinder:
accuracy = 1e-04
def __init__(self, tilde_obj, accuracy=None):
self.error = None
self.accuracy=accuracy if accuracy else SymmetryFinder.accuracy
def get_spacegroup(self, tilde_obj):
self.ng = 1
self.sg = 'P1'
"""
class SymmetryHandler(SymmetryFinder):
def __init__(self, tilde_obj, accuracy=None):
self.sg = None
self.ng = None
self.system = None
self.pg = None
self.dg = None
SymmetryFinder.__init__(self, accuracy)
SymmetryFinder.get_spacegroup(self, tilde_obj)
# Data below are taken from Table 2.3 of the book
# <NAME>, Quantum Chemistry of Solids,
# LCAO Treatment of Crystals and Nanostructures, 2nd Edition,
# Springer, 2012, http://dx.doi.org/10.1007/978-3-642-30356-2
# NB 7 crystal systems != 7 lattice systems
# space group to crystal system conversion
if 195 <= self.ng <= 230: self.system = 'cubic'
elif 168 <= self.ng <= 194: self.system = 'hexagonal'
elif 143 <= self.ng <= 167: self.system = 'trigonal'
elif 75 <= self.ng <= 142: self.system = 'tetragonal'
elif 16 <= self.ng <= 74: self.system = 'orthorhombic'
elif 3 <= self.ng <= 15: self.system = 'monoclinic'
elif 1 <= self.ng <= 2: self.system = 'triclinic'
# space group to point group conversion
if 221 <= self.ng <= 230: self.pg = 'O<sub>h</sub>'
elif 215 <= self.ng <= 220: self.pg = 'T<sub>d</sub>'
elif 207 <= self.ng <= 214: self.pg = 'O'
elif 200 <= self.ng <= 206: self.pg = 'T<sub>h</sub>'
elif 195 <= self.ng <= 199: self.pg = 'T'
elif 191 <= self.ng <= 194: self.pg = 'D<sub>6h</sub>'
elif 187 <= self.ng <= 190: self.pg = 'D<sub>3h</sub>'
elif 183 <= self.ng <= 186: self.pg = 'C<sub>6v</sub>'
elif 177 <= self.ng <= 182: self.pg = 'D<sub>6</sub>'
elif 175 <= self.ng <= 176: self.pg = 'C<sub>6h</sub>'
elif self.ng == 174: self.pg = 'C<sub>3h</sub>'
elif 168 <= self.ng <= 173: self.pg = 'C<sub>6</sub>'
elif 162 <= self.ng <= 167: self.pg = 'D<sub>3d</sub>'
elif 156 <= self.ng <= 161: self.pg = 'C<sub>3v</sub>'
elif 149 <= self.ng <= 155: self.pg = 'D<sub>3</sub>'
elif 147 <= self.ng <= 148: self.pg = 'C<sub>3i</sub>'
elif 143 <= self.ng <= 146: self.pg = 'C<sub>3</sub>'
elif 123 <= self.ng <= 142: self.pg = 'D<sub>4h</sub>'
elif 111 <= self.ng <= 122: self.pg = 'D<sub>2d</sub>'
elif 99 <= self.ng <= 110: self.pg = 'C<sub>4v</sub>'
elif 89 <= self.ng <= 98: self.pg = 'D<sub>4</sub>'
elif 83 <= self.ng <= 88: self.pg = 'C<sub>4h</sub>'
elif 81 <= self.ng <= 82: self.pg = 'S<sub>4</sub>'
elif 75 <= self.ng <= 80: self.pg = 'C<sub>4</sub>'
elif 47 <= self.ng <= 74: self.pg = 'D<sub>2h</sub>'
elif 25 <= self.ng <= 46: self.pg = 'C<sub>2v</sub>'
elif 16 <= self.ng <= 24: self.pg = 'D<sub>2</sub>'
elif 10 <= self.ng <= 15: self.pg = 'C<sub>2h</sub>'
elif 6 <= self.ng <= 9: self.pg = 'C<sub>s</sub>'
elif 3 <= self.ng <= 5: self.pg = 'C<sub>2</sub>'
elif self.ng == 2: self.pg = 'C<sub>i</sub>'
elif self.ng == 1: self.pg = 'C<sub>1</sub>'
# space group to layer group conversion
if getattr(tilde_obj['structures'][-1], 'periodicity', None) == 2:
if self.ng in [25, 26, 28, 51]:
tilde_obj.warning('Warning! Diperiodical group setting is undefined!')
DIPERIODIC_MAPPING = {3:8, 4:9, 5:10, 6:11, 7:12, 8:13, 10:14, 11:15, 12:16, 13:17, 14:18, 16:19, 17:20, 18:21, 21:22, 25:23, 25:24, 26:25, 26:26, 27:27, 28:28, 28:29, 29:30, 30:31, 31:32, 32:33, 35:34, 38:35, 39:36, 47:37, 49:38, 50:39, 51:40, 51:41, 53:42, 54:43, 55:44, 57:45, 59:46, 65:47, 67:48, 75:49, 81:50, 83:51, 85:52, 89:53, 90:54, 99:55, 100:56, 111:57, 113:58, 115:59, 117:60, 123:61, 125:62, 127:63, 129:64, 143:65, 147:66, 149:67, 150:68, 156:69, 157:70, 162:71, 164:72, 168:73, 174:74, 175:75, 177:76, 183:77, 187:78, 189:79, 191:80}
cellpar = cell_to_cellpar( tilde_obj['structures'][-1].cell ).tolist()
if cellpar[3] != 90 or cellpar[4] != 90 or cellpar[5] != 90:
DIPERIODIC_MAPPING.update({1:1, 2:2, 3:3, 6:4, 7:5, 10:6, 13:7})
try: self.dg = DIPERIODIC_MAPPING[self.ng]
except KeyError: tilde_obj.warning('No diperiodical group found because rotational axes inconsistent with 2d translations!')
else:
if 65 <= self.dg <= 80: self.system = '2d-hexagonal'
elif 49 <= self.dg <= 64: self.system = '2d-square'
elif 8 <= self.dg <= 48: self.system = '2d-rectangular'
elif 1 <= self.dg <= 7: self.system = '2d-oblique'
| [
"numpy.linalg.det",
"spglib.refine_cell",
"ase.geometry.cell_to_cellpar",
"spglib.get_spacegroup"
] | [((588, 700), 'spglib.get_spacegroup', 'spg.get_spacegroup', (["tilde_obj['structures'][-1]"], {'symprec': 'self.accuracy', 'angle_tolerance': 'self.angle_tolerance'}), "(tilde_obj['structures'][-1], symprec=self.accuracy,\n angle_tolerance=self.angle_tolerance)\n", (606, 700), True, 'import spglib as spg\n'), ((1246, 1355), 'spglib.refine_cell', 'spg.refine_cell', (["tilde_obj['structures'][-1]"], {'symprec': 'self.accuracy', 'angle_tolerance': 'self.angle_tolerance'}), "(tilde_obj['structures'][-1], symprec=self.accuracy,\n angle_tolerance=self.angle_tolerance)\n", (1261, 1355), True, 'import spglib as spg\n'), ((1710, 1747), 'numpy.linalg.det', 'det', (["tilde_obj['structures'][-1].cell"], {}), "(tilde_obj['structures'][-1].cell)\n", (1713, 1747), False, 'from numpy.linalg import det\n'), ((6029, 6078), 'ase.geometry.cell_to_cellpar', 'cell_to_cellpar', (["tilde_obj['structures'][-1].cell"], {}), "(tilde_obj['structures'][-1].cell)\n", (6044, 6078), False, 'from ase.geometry import cell_to_cellpar\n')] |
#!/usr/bin/env python
from __future__ import division
from past.utils import old_div
import unittest
import os.path
import sys
import numpy
import anuga
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular_cross
from anuga.shallow_water.shallow_water_domain import Domain
from anuga.abstract_2d_finite_volumes.util import file_function
from anuga.utilities.system_tools import get_pathname_from_package
from anuga.structures.inlet_operator import Inlet_operator
class Test_inlet_operator(unittest.TestCase):
"""
Test the boyd box operator, in particular the discharge_routine!
"""
def setUp(self):
pass
def tearDown(self):
try:
os.remove('Test_Outlet_Inlet.sww')
except:
pass
def _create_domain(self,d_length,
d_width,
dx,
dy,
elevation_0,
elevation_1,
stage_0,
stage_1):
points, vertices, boundary = rectangular_cross(int(old_div(d_length,dx)), int(old_div(d_width,dy)),
len1=d_length, len2=d_width)
domain = Domain(points, vertices, boundary)
domain.set_name('Test_Outlet_Inlet') # Output name
domain.set_store()
domain.set_default_order(2)
domain.H0 = 0.01
domain.tight_slope_limiters = 1
#print 'Size', len(domain)
#------------------------------------------------------------------------------
# Setup initial conditions
#------------------------------------------------------------------------------
def elevation(x, y):
"""Set up a elevation
"""
z = numpy.zeros(x.shape,dtype='d')
z[:] = elevation_0
numpy.putmask(z, x > old_div(d_length,2), elevation_1)
return z
def stage(x,y):
"""Set up stage
"""
z = numpy.zeros(x.shape,dtype='d')
z[:] = stage_0
numpy.putmask(z, x > old_div(d_length,2), stage_1)
return z
#print 'Setting Quantities....'
domain.set_quantity('elevation', elevation) # Use function for elevation
domain.set_quantity('stage', stage) # Use function for elevation
Br = anuga.Reflective_boundary(domain)
domain.set_boundary({'left': Br, 'right': Br, 'top': Br, 'bottom': Br})
return domain
def test_inlet_constant_Q(self):
"""test_inlet_Q
This tests that the inlet operator adds the correct amount of water
"""
stage_0 = 11.0
stage_1 = 10.0
elevation_0 = 10.0
elevation_1 = 10.0
domain_length = 200.0
domain_width = 200.0
domain = self._create_domain(d_length=domain_length,
d_width=domain_width,
dx = 10.0,
dy = 10.0,
elevation_0 = elevation_0,
elevation_1 = elevation_1,
stage_0 = stage_0,
stage_1 = stage_1)
vol0 = domain.compute_total_volume()
finaltime = 3.0
line1 = [[95.0, 10.0], [105.0, 10.0]]
Q1 = 5.00
line2 = [[10.0, 90.0], [20.0, 90.0]]
Q2 = 10.0
Inlet_operator(domain, line1, Q1, logging=False)
Inlet_operator(domain, line2, Q2)
for t in domain.evolve(yieldstep = 1.0, finaltime = finaltime):
#domain.write_time()
#print domain.volumetric_balance_statistics()
pass
vol1 = domain.compute_total_volume()
assert numpy.allclose((Q1+Q2)*finaltime, vol1-vol0, rtol=1.0e-8)
assert numpy.allclose((Q1+Q2)*finaltime, domain.fractional_step_volume_integral, rtol=1.0e-8)
def test_inlet_constant_Q_polygon(self):
"""test_inlet_Q
This tests that the inlet operator adds the correct amount of water
"""
stage_0 = 11.0
stage_1 = 10.0
elevation_0 = 10.0
elevation_1 = 10.0
domain_length = 200.0
domain_width = 200.0
domain = self._create_domain(d_length=domain_length,
d_width=domain_width,
dx = 10.0,
dy = 10.0,
elevation_0 = elevation_0,
elevation_1 = elevation_1,
stage_0 = stage_0,
stage_1 = stage_1)
vol0 = domain.compute_total_volume()
finaltime = 3.0
poly1 = [[95.0, 10.0], [105.0, 10.0], [105, 20.0], [95.0, 20.0]]
Q1 = 5.00
Inlet_operator(domain, poly1, Q1, logging=False)
for t in domain.evolve(yieldstep = 1.0, finaltime = finaltime):
#domain.write_time()
#print domain.volumetric_balance_statistics()
pass
vol1 = domain.compute_total_volume()
assert numpy.allclose((Q1)*finaltime, vol1-vol0, rtol=1.0e-8)
assert numpy.allclose((Q1)*finaltime, domain.fractional_step_volume_integral, rtol=1.0e-8)
def test_inlet_variable_Q(self):
"""test_inlet_Q
This tests that the inlet operator adds the correct amount of water
"""
stage_0 = 11.0
stage_1 = 10.0
elevation_0 = 10.0
elevation_1 = 10.0
domain_length = 200.0
domain_width = 200.0
domain = self._create_domain(d_length=domain_length,
d_width=domain_width,
dx = 10.0,
dy = 10.0,
elevation_0 = elevation_0,
elevation_1 = elevation_1,
stage_0 = stage_0,
stage_1 = stage_1)
vol0 = domain.compute_total_volume()
finaltime = 3.0
#Make sure we are inthe right directory to find the
#time series data for the inlets
import os
path = get_pathname_from_package('anuga.structures')
filename1 = os.path.join(path, 'tests', 'data', 'inlet_operator_test1.tms')
filename2 = os.path.join(path, 'tests', 'data', 'inlet_operator_test2.tms')
line1 = [[95.0, 10.0], [105.0, 10.0]]
Q1 = file_function(filename=filename1, quantities=['hydrograph'])
line2 = [[10.0, 90.0], [20.0, 90.0]]
Q2 = file_function(filename=filename2, quantities=['hydrograph'])
Inlet_operator(domain, line1, Q1)
Inlet_operator(domain, line2, Q2)
for t in domain.evolve(yieldstep = 1.0, finaltime = finaltime):
#domain.write_time()
#print domain.volumetric_balance_statistics()
pass
vol1 = domain.compute_total_volume()
#print vol1-vol0
assert numpy.allclose(13.5, vol1-vol0, rtol=1.0e-8)
assert numpy.allclose(vol1-vol0, domain.fractional_step_volume_integral, rtol=1.0e-8)
def test_inlet_variable_Q_default(self):
"""test_inlet_Q
This tests that the inlet operator adds the correct amount of water
"""
stage_0 = 11.0
stage_1 = 10.0
elevation_0 = 10.0
elevation_1 = 10.0
domain_length = 200.0
domain_width = 200.0
domain = self._create_domain(d_length=domain_length,
d_width=domain_width,
dx = 10.0,
dy = 10.0,
elevation_0 = elevation_0,
elevation_1 = elevation_1,
stage_0 = stage_0,
stage_1 = stage_1)
vol0 = domain.compute_total_volume()
finaltime = 5.0
#Make sure we are inthe right directory to find the
#time series data for the inlets
import os
baseDir = os.getcwd()
path = get_pathname_from_package('anuga.structures')
filename1 = os.path.join(path, 'tests', 'data', 'inlet_operator_test1.tms')
filename2 = os.path.join(path, 'tests', 'data', 'inlet_operator_test2.tms')
line1 = [[95.0, 10.0], [105.0, 10.0]]
Q1 = file_function(filename=filename1, quantities=['hydrograph'])
line2 = [[10.0, 90.0], [20.0, 90.0]]
Q2 = file_function(filename=filename2, quantities=['hydrograph'])
os.chdir(baseDir)
import warnings
warnings.simplefilter("ignore")
Inlet_operator(domain, line1, Q1, default=6)
Inlet_operator(domain, line2, Q2, default=3)
for t in domain.evolve(yieldstep = 1.0, finaltime = finaltime):
#domain.write_time()
#print domain.volumetric_balance_statistics()
pass
warnings.simplefilter("default")
vol1 = domain.compute_total_volume()
#print vol1-vol0
assert numpy.allclose(31.5, vol1-vol0, rtol=1.0e-8)
assert numpy.allclose(vol1-vol0, domain.fractional_step_volume_integral, rtol=1.0e-8)
# =========================================================================
if __name__ == "__main__":
suite = unittest.makeSuite(Test_inlet_operator, 'test')
runner = unittest.TextTestRunner()
runner.run(suite)
| [
"numpy.allclose",
"unittest.makeSuite",
"anuga.Reflective_boundary",
"os.path.join",
"anuga.utilities.system_tools.get_pathname_from_package",
"os.getcwd",
"os.chdir",
"os.remove",
"numpy.zeros",
"past.utils.old_div",
"anuga.shallow_water.shallow_water_domain.Domain",
"warnings.simplefilter",
... | [((9849, 9896), 'unittest.makeSuite', 'unittest.makeSuite', (['Test_inlet_operator', '"""test"""'], {}), "(Test_inlet_operator, 'test')\n", (9867, 9896), False, 'import unittest\n'), ((9910, 9935), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (9933, 9935), False, 'import unittest\n'), ((1302, 1336), 'anuga.shallow_water.shallow_water_domain.Domain', 'Domain', (['points', 'vertices', 'boundary'], {}), '(points, vertices, boundary)\n', (1308, 1336), False, 'from anuga.shallow_water.shallow_water_domain import Domain\n'), ((2546, 2579), 'anuga.Reflective_boundary', 'anuga.Reflective_boundary', (['domain'], {}), '(domain)\n', (2571, 2579), False, 'import anuga\n'), ((3701, 3749), 'anuga.structures.inlet_operator.Inlet_operator', 'Inlet_operator', (['domain', 'line1', 'Q1'], {'logging': '(False)'}), '(domain, line1, Q1, logging=False)\n', (3715, 3749), False, 'from anuga.structures.inlet_operator import Inlet_operator\n'), ((3758, 3791), 'anuga.structures.inlet_operator.Inlet_operator', 'Inlet_operator', (['domain', 'line2', 'Q2'], {}), '(domain, line2, Q2)\n', (3772, 3791), False, 'from anuga.structures.inlet_operator import Inlet_operator\n'), ((4037, 4099), 'numpy.allclose', 'numpy.allclose', (['((Q1 + Q2) * finaltime)', '(vol1 - vol0)'], {'rtol': '(1e-08)'}), '((Q1 + Q2) * finaltime, vol1 - vol0, rtol=1e-08)\n', (4051, 4099), False, 'import numpy\n'), ((4111, 4205), 'numpy.allclose', 'numpy.allclose', (['((Q1 + Q2) * finaltime)', 'domain.fractional_step_volume_integral'], {'rtol': '(1e-08)'}), '((Q1 + Q2) * finaltime, domain.\n fractional_step_volume_integral, rtol=1e-08)\n', (4125, 4205), False, 'import numpy\n'), ((5151, 5199), 'anuga.structures.inlet_operator.Inlet_operator', 'Inlet_operator', (['domain', 'poly1', 'Q1'], {'logging': '(False)'}), '(domain, poly1, Q1, logging=False)\n', (5165, 5199), False, 'from anuga.structures.inlet_operator import Inlet_operator\n'), ((5445, 5500), 'numpy.allclose', 'numpy.allclose', (['(Q1 * finaltime)', '(vol1 - vol0)'], {'rtol': '(1e-08)'}), '(Q1 * finaltime, vol1 - vol0, rtol=1e-08)\n', (5459, 5500), False, 'import numpy\n'), ((5515, 5602), 'numpy.allclose', 'numpy.allclose', (['(Q1 * finaltime)', 'domain.fractional_step_volume_integral'], {'rtol': '(1e-08)'}), '(Q1 * finaltime, domain.fractional_step_volume_integral, rtol\n =1e-08)\n', (5529, 5602), False, 'import numpy\n'), ((6603, 6648), 'anuga.utilities.system_tools.get_pathname_from_package', 'get_pathname_from_package', (['"""anuga.structures"""'], {}), "('anuga.structures')\n", (6628, 6648), False, 'from anuga.utilities.system_tools import get_pathname_from_package\n'), ((6669, 6732), 'os.path.join', 'os.path.join', (['path', '"""tests"""', '"""data"""', '"""inlet_operator_test1.tms"""'], {}), "(path, 'tests', 'data', 'inlet_operator_test1.tms')\n", (6681, 6732), False, 'import os\n'), ((6753, 6816), 'os.path.join', 'os.path.join', (['path', '"""tests"""', '"""data"""', '"""inlet_operator_test2.tms"""'], {}), "(path, 'tests', 'data', 'inlet_operator_test2.tms')\n", (6765, 6816), False, 'import os\n'), ((6877, 6937), 'anuga.abstract_2d_finite_volumes.util.file_function', 'file_function', ([], {'filename': 'filename1', 'quantities': "['hydrograph']"}), "(filename=filename1, quantities=['hydrograph'])\n", (6890, 6937), False, 'from anuga.abstract_2d_finite_volumes.util import file_function\n'), ((7005, 7065), 'anuga.abstract_2d_finite_volumes.util.file_function', 'file_function', ([], {'filename': 'filename2', 'quantities': "['hydrograph']"}), "(filename=filename2, quantities=['hydrograph'])\n", (7018, 7065), False, 'from anuga.abstract_2d_finite_volumes.util import file_function\n'), ((7084, 7117), 'anuga.structures.inlet_operator.Inlet_operator', 'Inlet_operator', (['domain', 'line1', 'Q1'], {}), '(domain, line1, Q1)\n', (7098, 7117), False, 'from anuga.structures.inlet_operator import Inlet_operator\n'), ((7126, 7159), 'anuga.structures.inlet_operator.Inlet_operator', 'Inlet_operator', (['domain', 'line2', 'Q2'], {}), '(domain, line2, Q2)\n', (7140, 7159), False, 'from anuga.structures.inlet_operator import Inlet_operator\n'), ((7439, 7484), 'numpy.allclose', 'numpy.allclose', (['(13.5)', '(vol1 - vol0)'], {'rtol': '(1e-08)'}), '(13.5, vol1 - vol0, rtol=1e-08)\n', (7453, 7484), False, 'import numpy\n'), ((7500, 7579), 'numpy.allclose', 'numpy.allclose', (['(vol1 - vol0)', 'domain.fractional_step_volume_integral'], {'rtol': '(1e-08)'}), '(vol1 - vol0, domain.fractional_step_volume_integral, rtol=1e-08)\n', (7514, 7579), False, 'import numpy\n'), ((8583, 8594), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8592, 8594), False, 'import os\n'), ((8611, 8656), 'anuga.utilities.system_tools.get_pathname_from_package', 'get_pathname_from_package', (['"""anuga.structures"""'], {}), "('anuga.structures')\n", (8636, 8656), False, 'from anuga.utilities.system_tools import get_pathname_from_package\n'), ((8677, 8740), 'os.path.join', 'os.path.join', (['path', '"""tests"""', '"""data"""', '"""inlet_operator_test1.tms"""'], {}), "(path, 'tests', 'data', 'inlet_operator_test1.tms')\n", (8689, 8740), False, 'import os\n'), ((8761, 8824), 'os.path.join', 'os.path.join', (['path', '"""tests"""', '"""data"""', '"""inlet_operator_test2.tms"""'], {}), "(path, 'tests', 'data', 'inlet_operator_test2.tms')\n", (8773, 8824), False, 'import os\n'), ((8885, 8945), 'anuga.abstract_2d_finite_volumes.util.file_function', 'file_function', ([], {'filename': 'filename1', 'quantities': "['hydrograph']"}), "(filename=filename1, quantities=['hydrograph'])\n", (8898, 8945), False, 'from anuga.abstract_2d_finite_volumes.util import file_function\n'), ((9005, 9065), 'anuga.abstract_2d_finite_volumes.util.file_function', 'file_function', ([], {'filename': 'filename2', 'quantities': "['hydrograph']"}), "(filename=filename2, quantities=['hydrograph'])\n", (9018, 9065), False, 'from anuga.abstract_2d_finite_volumes.util import file_function\n'), ((9075, 9092), 'os.chdir', 'os.chdir', (['baseDir'], {}), '(baseDir)\n', (9083, 9092), False, 'import os\n'), ((9126, 9157), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (9147, 9157), False, 'import warnings\n'), ((9184, 9228), 'anuga.structures.inlet_operator.Inlet_operator', 'Inlet_operator', (['domain', 'line1', 'Q1'], {'default': '(6)'}), '(domain, line1, Q1, default=6)\n', (9198, 9228), False, 'from anuga.structures.inlet_operator import Inlet_operator\n'), ((9237, 9281), 'anuga.structures.inlet_operator.Inlet_operator', 'Inlet_operator', (['domain', 'line2', 'Q2'], {'default': '(3)'}), '(domain, line2, Q2, default=3)\n', (9251, 9281), False, 'from anuga.structures.inlet_operator import Inlet_operator\n'), ((9473, 9505), 'warnings.simplefilter', 'warnings.simplefilter', (['"""default"""'], {}), "('default')\n", (9494, 9505), False, 'import warnings\n'), ((9593, 9638), 'numpy.allclose', 'numpy.allclose', (['(31.5)', '(vol1 - vol0)'], {'rtol': '(1e-08)'}), '(31.5, vol1 - vol0, rtol=1e-08)\n', (9607, 9638), False, 'import numpy\n'), ((9653, 9732), 'numpy.allclose', 'numpy.allclose', (['(vol1 - vol0)', 'domain.fractional_step_volume_integral'], {'rtol': '(1e-08)'}), '(vol1 - vol0, domain.fractional_step_volume_integral, rtol=1e-08)\n', (9667, 9732), False, 'import numpy\n'), ((700, 734), 'os.remove', 'os.remove', (['"""Test_Outlet_Inlet.sww"""'], {}), "('Test_Outlet_Inlet.sww')\n", (709, 734), False, 'import os\n'), ((1900, 1931), 'numpy.zeros', 'numpy.zeros', (['x.shape'], {'dtype': '"""d"""'}), "(x.shape, dtype='d')\n", (1911, 1931), False, 'import numpy\n'), ((2165, 2196), 'numpy.zeros', 'numpy.zeros', (['x.shape'], {'dtype': '"""d"""'}), "(x.shape, dtype='d')\n", (2176, 2196), False, 'import numpy\n'), ((1151, 1172), 'past.utils.old_div', 'old_div', (['d_length', 'dx'], {}), '(d_length, dx)\n', (1158, 1172), False, 'from past.utils import old_div\n'), ((1178, 1198), 'past.utils.old_div', 'old_div', (['d_width', 'dy'], {}), '(d_width, dy)\n', (1185, 1198), False, 'from past.utils import old_div\n'), ((2008, 2028), 'past.utils.old_div', 'old_div', (['d_length', '(2)'], {}), '(d_length, 2)\n', (2015, 2028), False, 'from past.utils import old_div\n'), ((2269, 2289), 'past.utils.old_div', 'old_div', (['d_length', '(2)'], {}), '(d_length, 2)\n', (2276, 2289), False, 'from past.utils import old_div\n')] |
import random
import numpy as np
def add_noise_new(data, labels, params):
# new refactoring of the noise injection methods
# noise_mode sets the pattern of the noise injection
# cluster: apply to the samples and features defined by noise_samples and noise_features
# conditional : apply to features conditional on a threshold
#
# noise_type sets the form of the noise
# gaussian: Gaussian feature noise with noise_scale as std_dev
# uniform: Uniformly distributed noise on the interval [-noise_scale, noise_scale]
# label: Flip labels
if params["noise_injection"]:
if params["label_noise"]:
# check if we want noise correlated with a feature
if params["noise_correlated"]:
labels, y_noise_gen = label_flip_correlated(
labels,
params["label_noise"],
data,
params["feature_col"],
params["feature_threshold"],
)
# else add uncorrelated noise
else:
labels, y_noise_gen = label_flip(labels, params["label_noise"])
# check if noise is on for RNA-seq data
elif params["noise_gaussian"]:
data = add_gaussian_noise(data, 0, params["std_dev"])
elif params["noise_cluster"]:
data = add_cluster_noise(
data,
loc=0.0,
scale=params["std_dev"],
col_ids=params["feature_col"],
noise_type=params["noise_type"],
row_ids=params["sample_ids"],
y_noise_level=params["label_noise"],
)
elif params["noise_column"]:
data = add_column_noise(
data,
0,
params["std_dev"],
col_ids=params["feature_col"],
noise_type=params["noise_type"],
)
return data, labels
def add_noise(data, labels, params):
# put all the logic under the add_noise switch
if params["add_noise"]:
if params["label_noise"]:
# check if we want noise correlated with a feature
if params["noise_correlated"]:
labels, y_noise_gen = label_flip_correlated(
labels,
params["label_noise"],
data,
params["feature_col"],
params["feature_threshold"],
)
# else add uncorrelated noise
else:
labels, y_noise_gen = label_flip(labels, params["label_noise"])
# check if noise is on for RNA-seq data
elif params["noise_gaussian"]:
data = add_gaussian_noise(data, 0, params["std_dev"])
elif params["noise_cluster"]:
data = add_cluster_noise(
data,
loc=0.0,
scale=params["std_dev"],
col_ids=params["feature_col"],
noise_type=params["noise_type"],
row_ids=params["sample_ids"],
y_noise_level=params["label_noise"],
)
elif params["noise_column"]:
data = add_column_noise(
data,
0,
params["std_dev"],
col_ids=params["feature_col"],
noise_type=params["noise_type"],
)
return data, labels
def label_flip(y_data_categorical, y_noise_level):
flip_count = 0
for i in range(0, y_data_categorical.shape[0]):
if random.random() < y_noise_level:
flip_count += 1
for j in range(y_data_categorical.shape[1]):
y_data_categorical[i][j] = int(not y_data_categorical[i][j])
y_noise_generated = float(flip_count) / float(y_data_categorical.shape[0])
print("Uncorrelated label noise generation:\n")
print(
"Labels flipped on {} samples out of {}: {:06.4f} ({:06.4f} requested)\n".format(
flip_count, y_data_categorical.shape[0], y_noise_generated, y_noise_level
)
)
return y_data_categorical, y_noise_generated
def label_flip_correlated(
y_data_categorical, y_noise_level, x_data, col_ids, threshold
):
for col_id in col_ids:
flip_count = 0
for i in range(0, y_data_categorical.shape[0]):
if x_data[i][col_id] > threshold:
if random.random() < y_noise_level:
print(i, y_data_categorical[i][:])
flip_count += 1
for j in range(y_data_categorical.shape[1]):
y_data_categorical[i][j] = int(not y_data_categorical[i][j])
print(i, y_data_categorical[i][:])
y_noise_generated = float(flip_count) / float(y_data_categorical.shape[0])
print("Correlated label noise generation for feature {:d}:\n".format(col_id))
print(
"Labels flipped on {} samples out of {}: {:06.4f} ({:06.4f} requested)\n".format(
flip_count,
y_data_categorical.shape[0],
y_noise_generated,
y_noise_level,
)
)
return y_data_categorical, y_noise_generated
# Add simple Gaussian noise to RNA seq values, assume normalized x data
def add_gaussian_noise(x_data, loc=0.0, scale=0.5):
print("added gaussian noise")
train_noise = np.random.normal(loc, scale, size=x_data.shape)
x_data = x_data + train_noise
return x_data
# Add simple Gaussian noise to a list of RNA seq values, assume normalized x data
def add_column_noise(x_data, loc=0.0, scale=0.5, col_ids=[0], noise_type="gaussian"):
for col_id in col_ids:
print("added", noise_type, "noise to column ", col_id)
print(x_data[:, col_id].T)
if noise_type == "gaussian":
train_noise = np.random.normal(loc, scale, size=x_data.shape[0])
elif noise_type == "uniform":
train_noise = np.random.uniform(-1.0 * scale, scale, size=x_data.shape[0])
print(train_noise)
x_data[:, col_id] = 1.0 * x_data[:, col_id] + 1.0 * train_noise.T
print(x_data[:, col_id].T)
return x_data
# Add noise to a list of RNA seq values, for a fraction of samples assume normalized x data
def add_cluster_noise(
x_data,
loc=0.0,
scale=0.5,
col_ids=[0],
noise_type="gaussian",
row_ids=[0],
y_noise_level=0.0,
):
# loop over all samples
num_samples = len(row_ids)
flip_count = 0
for row_id in row_ids:
# only perturb a fraction of the samples
if random.random() < y_noise_level:
flip_count += 1
for col_id in col_ids:
print("added", noise_type, "noise to row, column ", row_id, col_id)
print(x_data[row_id, col_id])
if noise_type == "gaussian":
train_noise = np.random.normal(loc, scale)
elif noise_type == "uniform":
train_noise = np.random.uniform(-1.0 * scale, scale)
print(train_noise)
x_data[row_id, col_id] = (
1.0 * x_data[row_id, col_id] + 1.0 * train_noise
)
print(x_data[row_id, col_id])
y_noise_generated = float(flip_count) / float(num_samples)
print(
"Noise added to {} samples out of {}: {:06.4f} ({:06.4f} requested)\n".format(
flip_count, num_samples, y_noise_generated, y_noise_level
)
)
return x_data
| [
"numpy.random.normal",
"random.random",
"numpy.random.uniform"
] | [((5437, 5484), 'numpy.random.normal', 'np.random.normal', (['loc', 'scale'], {'size': 'x_data.shape'}), '(loc, scale, size=x_data.shape)\n', (5453, 5484), True, 'import numpy as np\n'), ((3589, 3604), 'random.random', 'random.random', ([], {}), '()\n', (3602, 3604), False, 'import random\n'), ((5895, 5945), 'numpy.random.normal', 'np.random.normal', (['loc', 'scale'], {'size': 'x_data.shape[0]'}), '(loc, scale, size=x_data.shape[0])\n', (5911, 5945), True, 'import numpy as np\n'), ((6634, 6649), 'random.random', 'random.random', ([], {}), '()\n', (6647, 6649), False, 'import random\n'), ((6010, 6070), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0 * scale)', 'scale'], {'size': 'x_data.shape[0]'}), '(-1.0 * scale, scale, size=x_data.shape[0])\n', (6027, 6070), True, 'import numpy as np\n'), ((4438, 4453), 'random.random', 'random.random', ([], {}), '()\n', (4451, 4453), False, 'import random\n'), ((6939, 6967), 'numpy.random.normal', 'np.random.normal', (['loc', 'scale'], {}), '(loc, scale)\n', (6955, 6967), True, 'import numpy as np\n'), ((7048, 7086), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0 * scale)', 'scale'], {}), '(-1.0 * scale, scale)\n', (7065, 7086), True, 'import numpy as np\n')] |
#!/usr/bin/env python
__author__ = "<EMAIL>"
"""
Reporter for junction summary for one or more samples.
Recommended to run before scrubbing sample GFFs prior to chaining.
Suggested process is:
1. run collapse to get GFF for each sample
2. run this report script on all sample GFFs
3. run scrubber on all sample GFFs
4. run chaining using scrubbed (cleaned) sample GFFs
"""
import sys
from collections import defaultdict
from csv import DictWriter
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import typer
from Bio import SeqIO
from sklearn.cluster import Birch
import cupcake.sequence.GFF as GFF
from cupcake import version_callback
from cupcake import cupcake_logger as logger
app = typer.Typer(name="cupcake.tofu.counting.summarize_sample_GFF_junctions")
def sanity_check(
sample_dirs: List[Dict[str, Union[str, Path]]],
gff_filename: Union[str, Path],
genome_filename: Optional[Union[str, Path]] = None,
junction_filename: Optional[Union[str, Path]] = None,
) -> None:
for d in sample_dirs.values():
file = Path(d, gff_filename)
if not file.exists():
logger.error(f"Expected GFF file {file} does not exist. Abort!")
sys.exit(-1)
if genome_filename is not None and not Path(genome_filename).exists():
logger.error(f"Genome file {genome_filename} given but does not exist. Abort!")
sys.exit(-1)
if junction_filename is not None and not Path(junction_filename).exists():
logger.error(
f"Junction file {junction_filename} given but does not exist. Abort!"
)
sys.exit(-1)
def read_config(filename: Path) -> Tuple[Dict[str, Path], List[str], Path, Path, Path]:
"""
SAMPLE=<name>;<path>
must also have
GFF_FILENAME=
optional:
GENOME_FILENAME=
JUNCTION_FILENAME=
GROUP_FILENAME=
Everything else will be ignored (so you can re-use sample.config for chain_samples.py)
"""
sample_dirs: Dict[str, Path] = {}
sample_names: List[str] = []
gff_filename: Optional[Union[str, Path]] = None
genome_filename: Optional[Union[str, Path]] = None
junction_filename: Optional[Union[str, Path]] = None
if not filename.exists():
raise FileNotFoundError(f"The config file {filename} could not be found!")
with open(filename) as f:
for line in f:
if line.startswith("tmpSAMPLE="):
logger.error(
"Please only use SAMPLE=, not tmpSAMPLE= for junction reports!"
)
sys.exit(-1)
elif line.startswith("SAMPLE="):
name, path = line.strip()[len("SAMPLE=") :].split(";")
if name.startswith("tmp_"):
logger.error(
f"Sample names are not allowed to start with tmp_! Please change {name} to something else."
)
sys.exit(-1)
sample_dirs[name] = Path(path).resolve()
sample_names.append(name)
elif line.startswith("GFF_FILENAME="):
gff_filename = Path(line.strip()[len("GFF_FILENAME=") :])
elif line.startswith("GENOME_FILENAME="):
genome_filename = Path(line.strip()[len("GENOME_FILENAME=") :])
elif line.startswith("JUNCTION_FILENAME="):
junction_filename = Path(line.strip()[len("JUNCTION_FILENAME=") :])
if gff_filename is None:
raise Exception(
f"Expected GFF_FILENAME= but not in config file {filename}! Abort."
)
if len(sample_names) == 0:
logger.error("No samples given. Exit.")
sys.exit(-1)
return sample_dirs, gff_filename, genome_filename, junction_filename
def read_annotation_junction_bed(junction_filename: Union[str, Path]) -> defaultdict:
"""
junction.bed is in format:
seqname, left (0-based), right (0-based), +/-
following junction.bed format from TopHat
http://ccb.jhu.edu/software/tophat/manual.shtml
"""
junction = defaultdict(dict) # (seqname, strand) --> (start, end)
for line in open(junction_filename):
chrom, left, right, strand = line.strip().split("\t")
junction[chrom, strand][(int(left), int(right))] = 1
return junction
def summarize_junctions(
sample_dirs: Dict[str, Path],
# sample_names: List[str],
gff_filename: Union[str, Path],
output_prefix: Union[str, Path],
genome_d: Optional[Union[str, Path]] = None,
junction_known: Optional[Union[str, Path]] = None,
) -> defaultdict:
"""
1. for each sample, read all the GFF, store the junction information (both 0-based)
"""
junc_by_chr_strand = defaultdict(
lambda: defaultdict(list)
) # (seqname,strand) --> (donor,acceptor) --> samples it show up in (more than once possible)
for sample_name, d in sample_dirs.items():
for r in GFF.collapseGFFReader(Path(d, gff_filename)):
n = len(r.ref_exons)
if n == 1:
continue # ignore single exon transcripts
for i in range(n - 1):
donor = r.ref_exons[i].end - 1 # make it 0-based
accep = r.ref_exons[i + 1].start # start is already 0-based
junc_by_chr_strand[r.seqname, r.strand][donor, accep].append(
sample_name
)
# write junction report
with open(f"{output_prefix}.junction.bed", "w") as f1, open(
f"{output_prefix}.junction_detail.txt", "w"
) as f:
f1.write(f'track name=junctions description="{output_prefix}" useScore=1\n')
JUNC_DETAIL_FIELDS = [
"seqname",
"left",
"right",
"strand",
"num_transcript",
"num_sample",
"genome",
"annotation",
"label",
]
writer = DictWriter(f, JUNC_DETAIL_FIELDS, delimiter="\t")
writer.writeheader()
keys = list(junc_by_chr_strand)
keys.sort()
for _seqname, _strand in keys:
v = junc_by_chr_strand[_seqname, _strand]
v_keys = list(v)
v_keys.sort()
labels = cluster_junctions(v_keys)
for i, (_donor, _accep) in enumerate(v_keys):
rec = {
"seqname": _seqname,
"left": _donor,
"right": _accep,
"strand": _strand,
"num_transcript": len(v[_donor, _accep]),
"num_sample": len(set(v[_donor, _accep])),
}
# f.write("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t".format(_chr, _donor, _accep, _strand, len(v[_donor,_accep]), len(set(v[_donor,_accep]))))
f1.write(
f"{_seqname}\t{_donor}\t{_accep + 1}\t{output_prefix}\t{len(v[_donor, _accep])}\t{_strand}\n"
)
# if genome is given, write acceptor-donor site
if genome_d is None or _seqname not in genome_d:
rec["genome"] = "NA"
# f.write("NA\t")
else:
up, down = (
genome_d[_seqname][(_donor + 1) : (_donor + 3)],
genome_d[_seqname][(_accep - 2) : _accep],
)
if _strand == "+":
rec["genome"] = f"{str(up.seq).upper()}-{str(down.seq).upper()}"
# f.write("{0}-{1}\t".format(str(up.seq).upper(), str(down.seq).upper()))
else:
rec[
"genome"
] = f"{str(down.reverse_complement().seq).upper()}-{str(up.reverse_complement().seq).upper()}"
# f.write("{0}-{1}\t".format(str(down.reverse_complement().seq).upper(), str(up.reverse_complement().seq).upper()))
# if annotation is given, check if matches with annotation
if junction_known is None:
rec["annotation"] = "NA"
# f.write("NA\n")
else:
if (_seqname, _strand) in junction_known and (
_donor,
_accep,
) in junction_known[_seqname, _strand]:
rec["annotation"] = "Y"
# f.write("Y\t")
else:
rec["annotation"] = "N"
# f.write("N\t")
rec["label"] = f"{_seqname}_{_strand}_{labels[i]}"
writer.writerow(rec)
# f.write("{c}_{s}_{lab}\n".format(c=_seqname, s=_strand, lab=labels[i]))
return junc_by_chr_strand
def cluster_junctions(juncs: List[int]) -> np.ndarray:
birch_model = Birch(threshold=3, n_clusters=None)
X = np.array(juncs)
birch_model.fit(X)
return birch_model.labels_
@app.command(name="")
def main(
config: Union[str, Path] = typer.Argument(..., help="Config filename"),
output_prefix: str = typer.Argument(..., help="Output prefix"),
version: bool = typer.Option(
None,
"--version",
callback=version_callback,
is_eager=True,
help="Prints the version of the SQANTI3 package.",
),
):
try:
(
sample_dirs,
gff_filename,
genome_filename,
junction_filename,
) = read_config(config)
except FileNotFoundError as error:
logger.error(error)
sanity_check(sample_dirs, gff_filename, genome_filename, junction_filename)
if genome_filename is not None:
logger.info(f"Reading genome file {genome_filename}...")
genome_d = SeqIO.to_dict(SeqIO.parse(open(genome_filename), "fasta"))
else:
logger.info("No genome file given. Ignore.")
genome_d = None
if junction_filename is not None:
logger.info(f"Reading junction file {junction_filename}....")
junction_bed = read_annotation_junction_bed(junction_filename)
else:
logger.info("No junction file given. Ignore.")
junction_bed = None
summarize_junctions(
sample_dirs,
gff_filename,
output_prefix,
genome_d,
junction_bed,
)
if __name__ == "__main__":
typer.run(main)
| [
"csv.DictWriter",
"cupcake.cupcake_logger.info",
"pathlib.Path",
"typer.Option",
"typer.Typer",
"numpy.array",
"collections.defaultdict",
"sys.exit",
"cupcake.cupcake_logger.error",
"typer.run",
"sklearn.cluster.Birch",
"typer.Argument"
] | [((742, 814), 'typer.Typer', 'typer.Typer', ([], {'name': '"""cupcake.tofu.counting.summarize_sample_GFF_junctions"""'}), "(name='cupcake.tofu.counting.summarize_sample_GFF_junctions')\n", (753, 814), False, 'import typer\n'), ((4085, 4102), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (4096, 4102), False, 'from collections import defaultdict\n'), ((8869, 8904), 'sklearn.cluster.Birch', 'Birch', ([], {'threshold': '(3)', 'n_clusters': 'None'}), '(threshold=3, n_clusters=None)\n', (8874, 8904), False, 'from sklearn.cluster import Birch\n'), ((8913, 8928), 'numpy.array', 'np.array', (['juncs'], {}), '(juncs)\n', (8921, 8928), True, 'import numpy as np\n'), ((9049, 9092), 'typer.Argument', 'typer.Argument', (['...'], {'help': '"""Config filename"""'}), "(..., help='Config filename')\n", (9063, 9092), False, 'import typer\n'), ((9119, 9160), 'typer.Argument', 'typer.Argument', (['...'], {'help': '"""Output prefix"""'}), "(..., help='Output prefix')\n", (9133, 9160), False, 'import typer\n'), ((9182, 9310), 'typer.Option', 'typer.Option', (['None', '"""--version"""'], {'callback': 'version_callback', 'is_eager': '(True)', 'help': '"""Prints the version of the SQANTI3 package."""'}), "(None, '--version', callback=version_callback, is_eager=True,\n help='Prints the version of the SQANTI3 package.')\n", (9194, 9310), False, 'import typer\n'), ((10380, 10395), 'typer.run', 'typer.run', (['main'], {}), '(main)\n', (10389, 10395), False, 'import typer\n'), ((1098, 1119), 'pathlib.Path', 'Path', (['d', 'gff_filename'], {}), '(d, gff_filename)\n', (1102, 1119), False, 'from pathlib import Path\n'), ((1336, 1415), 'cupcake.cupcake_logger.error', 'logger.error', (['f"""Genome file {genome_filename} given but does not exist. Abort!"""'], {}), "(f'Genome file {genome_filename} given but does not exist. Abort!')\n", (1348, 1415), True, 'from cupcake import cupcake_logger as logger\n'), ((1424, 1436), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1432, 1436), False, 'import sys\n'), ((1525, 1613), 'cupcake.cupcake_logger.error', 'logger.error', (['f"""Junction file {junction_filename} given but does not exist. Abort!"""'], {}), "(\n f'Junction file {junction_filename} given but does not exist. Abort!')\n", (1537, 1613), True, 'from cupcake import cupcake_logger as logger\n'), ((1639, 1651), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1647, 1651), False, 'import sys\n'), ((3650, 3689), 'cupcake.cupcake_logger.error', 'logger.error', (['"""No samples given. Exit."""'], {}), "('No samples given. Exit.')\n", (3662, 3689), True, 'from cupcake import cupcake_logger as logger\n'), ((3698, 3710), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (3706, 3710), False, 'import sys\n'), ((5934, 5983), 'csv.DictWriter', 'DictWriter', (['f', 'JUNC_DETAIL_FIELDS'], {'delimiter': '"""\t"""'}), "(f, JUNC_DETAIL_FIELDS, delimiter='\\t')\n", (5944, 5983), False, 'from csv import DictWriter\n'), ((9714, 9770), 'cupcake.cupcake_logger.info', 'logger.info', (['f"""Reading genome file {genome_filename}..."""'], {}), "(f'Reading genome file {genome_filename}...')\n", (9725, 9770), True, 'from cupcake import cupcake_logger as logger\n'), ((9867, 9911), 'cupcake.cupcake_logger.info', 'logger.info', (['"""No genome file given. Ignore."""'], {}), "('No genome file given. Ignore.')\n", (9878, 9911), True, 'from cupcake import cupcake_logger as logger\n'), ((9983, 10044), 'cupcake.cupcake_logger.info', 'logger.info', (['f"""Reading junction file {junction_filename}...."""'], {}), "(f'Reading junction file {junction_filename}....')\n", (9994, 10044), True, 'from cupcake import cupcake_logger as logger\n'), ((10134, 10180), 'cupcake.cupcake_logger.info', 'logger.info', (['"""No junction file given. Ignore."""'], {}), "('No junction file given. Ignore.')\n", (10145, 10180), True, 'from cupcake import cupcake_logger as logger\n'), ((1162, 1226), 'cupcake.cupcake_logger.error', 'logger.error', (['f"""Expected GFF file {file} does not exist. Abort!"""'], {}), "(f'Expected GFF file {file} does not exist. Abort!')\n", (1174, 1226), True, 'from cupcake import cupcake_logger as logger\n'), ((1239, 1251), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1247, 1251), False, 'import sys\n'), ((4771, 4788), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4782, 4788), False, 'from collections import defaultdict\n'), ((4975, 4996), 'pathlib.Path', 'Path', (['d', 'gff_filename'], {}), '(d, gff_filename)\n', (4979, 4996), False, 'from pathlib import Path\n'), ((9568, 9587), 'cupcake.cupcake_logger.error', 'logger.error', (['error'], {}), '(error)\n', (9580, 9587), True, 'from cupcake import cupcake_logger as logger\n'), ((2457, 2534), 'cupcake.cupcake_logger.error', 'logger.error', (['"""Please only use SAMPLE=, not tmpSAMPLE= for junction reports!"""'], {}), "('Please only use SAMPLE=, not tmpSAMPLE= for junction reports!')\n", (2469, 2534), True, 'from cupcake import cupcake_logger as logger\n'), ((2589, 2601), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (2597, 2601), False, 'import sys\n'), ((1296, 1317), 'pathlib.Path', 'Path', (['genome_filename'], {}), '(genome_filename)\n', (1300, 1317), False, 'from pathlib import Path\n'), ((1483, 1506), 'pathlib.Path', 'Path', (['junction_filename'], {}), '(junction_filename)\n', (1487, 1506), False, 'from pathlib import Path\n'), ((2782, 2897), 'cupcake.cupcake_logger.error', 'logger.error', (['f"""Sample names are not allowed to start with tmp_! Please change {name} to something else."""'], {}), "(\n f'Sample names are not allowed to start with tmp_! Please change {name} to something else.'\n )\n", (2794, 2897), True, 'from cupcake import cupcake_logger as logger\n'), ((2954, 2966), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (2962, 2966), False, 'import sys\n'), ((3003, 3013), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (3007, 3013), False, 'from pathlib import Path\n')] |
from collections import OrderedDict, namedtuple
from typing import Tuple
import numpy as np
import torch
import torch.optim as optim
from rlkit.core.loss import LossFunction, LossStatistics
from torch import nn as nn
import rlkit.torch.pytorch_util as ptu
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.torch.torch_rl_algorithm import TorchTrainer
from rlkit.core.logging import add_prefix
import gtimer as gt
import os
import random
SACLosses = namedtuple(
'SACLosses',
'policy_loss qf1_loss qf2_loss alpha_loss state_estimator_loss',
)
# Active Soft Actor Critic
class ASACTrainer(TorchTrainer, LossFunction):
def __init__(
self,
env,
policy,
qf1,
qf2,
target_qf1,
target_qf2,
state_estimator,
discount=0.99,
reward_scale=1.0,
cost=1e-4, # Measurement Cost
policy_lr=1e-3,
qf_lr=1e-3,
optimizer_class=optim.Adam,
replay="nope",
soft_target_tau=1e-2,
target_update_period=1,
plotter=None,
render_eval_paths=False,
use_automatic_entropy_tuning=True,
target_entropy=None,
device = 'cpu'
):
super().__init__()
self.device = device
self.env = env
self.policy = policy
self.qf1 = qf1
self.qf2 = qf2
self.target_qf1 = target_qf1
self.target_qf2 = target_qf2
self.soft_target_tau = soft_target_tau
self.target_update_period = target_update_period
self.cost = cost
self.replay = replay
self.state_estimator = state_estimator
self.use_automatic_entropy_tuning = use_automatic_entropy_tuning
if self.use_automatic_entropy_tuning:
if target_entropy is None:
# Use heuristic value from SAC paper
self.target_entropy = -np.prod(
self.env.action_space.shape).item()
else:
self.target_entropy = target_entropy
self.log_alpha = ptu.zeros(1, requires_grad=True)
self.alpha_optimizer = optimizer_class(
[self.log_alpha],
lr=policy_lr,
)
self.plotter = plotter
self.render_eval_paths = render_eval_paths
self.qf_criterion = nn.MSELoss()
self.policy_optimizer = optimizer_class(
self.policy.parameters(),
lr=policy_lr,
)
self.qf1_optimizer = optimizer_class(
self.qf1.parameters(),
lr=qf_lr,
)
self.qf2_optimizer = optimizer_class(
self.qf2.parameters(),
lr=qf_lr,
)
self.discount = discount
self.reward_scale = reward_scale
self._n_train_steps_total = 0
self._need_to_update_eval_statistics = True
self.eval_statistics = OrderedDict()
num_batch = 8000
num_sample_steps = 6000
print("beginning relay")
if replay == "txt":
# Read in buffer for training ASAC with "expert" data
observations = torch.Tensor(np.loadtxt("observations.txt"))
print("loaded obs")
actions = torch.Tensor(np.loadtxt("actions.txt"))
print("loaded acts")
print("actions[0]: ", actions[0])
next_observations = torch.Tensor(np.loadtxt("next_observations.txt"))
print("loaded nxt_obs")
all_indices = list(range(len(observations)))
for i in range(num_batch):
print(i)
random_sample_indices = random.sample(all_indices, num_sample_steps)
state_estimator_pred = self.state_estimator.get_predictions(
[observations[index] for index in random_sample_indices].to(self.device),
[actions[index] for index in random_sample_indices].to(self.device)
)
state_estimator_losses = self.state_estimator.get_losses(
state_estimator_pred,
[next_observations[index] for index in random_sample_indices].to(self.device)
)
self.state_estimator.update_networks(state_estimator_losses)
elif replay == "npy" or replay == "concat":
prefix = "data/replay buffer"
if replay == "npy":
count = 0
buffer_size = int(1e9)
observations = np.zeros((buffer_size,17))
actions = np.zeros((buffer_size,6))
next_observations = np.zeros((buffer_size,17))
index = 0
with open('observations.npy', 'rb') as obs, open('actions.npy', 'rb'
) as act, open('next_observations.npy', 'rb') as next_obs:
try:
while True:
temp = np.load(obs)
size = temp.shape[0]
observations[index:size + index] = temp
actions[index:size + index] = np.load(act)
next_observations[index:size + index] = np.load(next_obs)
count += 1
index += size
if index >= buffer_size: # Do not read all steps into buffer - too large
print(f"\nbuffer reached, {count} lines\n")
break
except ValueError:
print(f"\nend of file, {count} lines\n")
observations = observations[:index]
actions = actions[:index]
next_observations = next_observations[:index]
else:
with open(f'{prefix}/concat_obs.npy', 'rb') as f:
observations = np.load(f)
with open(f'{prefix}/concat_acts.npy', 'rb') as f:
actions = np.load(f)
with open(f'{prefix}/concat_nextobs.npy', 'rb') as f:
next_observations = np.load(f)
print("Finished reading buffer files, beginning state-estimator training")
obs_size = len(observations)
observations = torch.tensor(observations).float().to(self.device)
actions = torch.tensor(actions).float().to(self.device)
next_observations = torch.tensor(next_observations).float().to(self.device)
probs = torch.ones(obs_size).to(self.device)
for i in range(num_batch):
if i % 100 == 0:
print(f"Beginning training round {i}")
index = probs.multinomial(num_samples=num_sample_steps, replacement=False)
obs_sample = observations[index]
acts_sample = actions[index]
next_obs_sample = next_observations[index]
state_estimator_pred = self.state_estimator.get_predictions(
obs_sample,
acts_sample
)
state_estimator_losses = self.state_estimator.get_losses(
state_estimator_pred,
next_obs_sample
)
self.state_estimator.update_networks(state_estimator_losses)
print("State estimator training complete")
def train_from_torch(self, batch):
# This is the entry point for training for ASAC
gt.blank_stamp()
losses, stats = self.compute_loss(
batch,
skip_statistics=not self._need_to_update_eval_statistics,
)
"""
Update networks
"""
if self.use_automatic_entropy_tuning:
self.alpha_optimizer.zero_grad()
losses.alpha_loss.backward()
self.alpha_optimizer.step()
self.policy_optimizer.zero_grad()
losses.policy_loss.backward()
clipping_value = 1
torch.nn.utils.clip_grad_norm_(self.policy.parameters(), clipping_value)
self.policy_optimizer.step()
self.qf1_optimizer.zero_grad()
losses.qf1_loss.backward()
self.qf1_optimizer.step()
self.qf2_optimizer.zero_grad()
losses.qf2_loss.backward()
self.qf2_optimizer.step()
if losses.state_estimator_loss is not None:
self.state_estimator.update_networks(losses.state_estimator_loss)
self._n_train_steps_total += 1
self.try_update_target_networks()
if self._need_to_update_eval_statistics:
self.eval_statistics = stats
# Compute statistics using only one batch per epoch
self._need_to_update_eval_statistics = False
gt.stamp('sac training', unique=False)
def try_update_target_networks(self):
if self._n_train_steps_total % self.target_update_period == 0:
self.update_target_networks()
def update_target_networks(self):
ptu.soft_update_from_to(
self.qf1, self.target_qf1, self.soft_target_tau
)
ptu.soft_update_from_to(
self.qf2, self.target_qf2, self.soft_target_tau
)
def compute_loss(
self,
batch,
skip_statistics=False,
) -> Tuple[SACLosses, LossStatistics]:
rewards = batch['rewards'] # torch.Size([256, 1])
terminals = batch['terminals']
obs = batch['observations'] # torch.Size([256, 17])
actions = batch['actions'] # torch.Size([256, 7])
actions_without_measure = actions[:,:-1] # [256, 6]
next_obs = batch['next_observations']
next_obs_only_measure = torch.zeros(next_obs.shape).to(self.device) # Fill only with measured next_observations
obs_only_measure = torch.zeros(obs.shape).to(self.device) # Observations corresponding to above next_observations
actions_without_measure_only_measure = torch.zeros(actions_without_measure.shape).to(self.device) # Acts corresponding to above
num_times_measured = 0
# Calculate costs based on measure/non-measure
# Fill _only_measure tensors only with steps that model measured
costs = torch.zeros(rewards.size()).to(self.device)
for i in range(len(rewards)):
if actions[i][-1] >= 0.0: # Range is (-1, 1); [0, 1) is measure
costs[i] = self.cost
next_obs_only_measure[num_times_measured] = next_obs[i]
obs_only_measure[num_times_measured] = obs[i]
actions_without_measure_only_measure[num_times_measured] = actions_without_measure[i]
num_times_measured += 1
# slice off empty space
next_obs_only_measure = next_obs_only_measure[:num_times_measured]
obs_only_measure = obs_only_measure[:num_times_measured]
actions_without_measure_only_measure = actions_without_measure_only_measure[:num_times_measured]
"""
Policy and Alpha Loss
"""
dist = self.policy(obs) # Gets distribution for stochastic action
new_obs_actions, log_pi = dist.rsample_and_logprob() # Chooses action from distribution
log_pi = log_pi.unsqueeze(-1)
if self.use_automatic_entropy_tuning:
alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()
alpha = self.log_alpha.exp()
else:
alpha_loss = 0
alpha = 1
q_new_actions = torch.min(
self.qf1(obs, new_obs_actions),
self.qf2(obs, new_obs_actions),
) # Finds min-Q value from both Q tables for this action
policy_loss = (alpha*log_pi - q_new_actions).mean()
"""
State Estimator Loss
"""
# obs.shape = torch.Size([256, 17]), type = torch.Tensor
# actions_without_measure.shape = torch.Size([256, 6]), type = torch.Tensor
# state_estimator_pred = self.state_estimator(obs, actions_without_measure)
# state_estimator_loss = self.state_estimator_criterion(state_estimator_pred, next_obs)
if num_times_measured > 0:
state_estimator_pred = self.state_estimator.get_predictions(
obs_only_measure,
actions_without_measure_only_measure
)
state_estimator_losses = self.state_estimator.get_losses(
state_estimator_pred,
next_obs_only_measure
)
else:
state_estimator_losses = None
"""
QF Loss
"""
q1_pred = self.qf1(obs, actions)
q2_pred = self.qf2(obs, actions)
next_dist = self.policy(next_obs)
new_next_actions, new_log_pi = next_dist.rsample_and_logprob()
new_log_pi = new_log_pi.unsqueeze(-1)
target_q_values = torch.min(
self.target_qf1(next_obs, new_next_actions),
self.target_qf2(next_obs, new_next_actions),
) - alpha * new_log_pi
q_target = self.reward_scale * (rewards - costs) + (1. - terminals) * self.discount * target_q_values # Update with Cost
qf1_loss = self.qf_criterion(q1_pred, q_target.detach())
qf2_loss = self.qf_criterion(q2_pred, q_target.detach())
"""
Save some statistics for eval
"""
eval_statistics = OrderedDict()
if not skip_statistics:
total_loss = 0.
if state_estimator_losses is not None:
for i in range(self.state_estimator.get_ensemble_count()):
individual_loss = np.mean(ptu.get_numpy(state_estimator_losses[i]))
total_loss += individual_loss
eval_statistics[f'State Estimator {i} Loss'] = individual_loss
eval_statistics['State Estimator Mean Loss'] = total_loss / self.state_estimator.get_ensemble_count()
eval_statistics['QF1 Loss'] = np.mean(ptu.get_numpy(qf1_loss))
eval_statistics['QF2 Loss'] = np.mean(ptu.get_numpy(qf2_loss))
eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(
policy_loss
))
eval_statistics.update(create_stats_ordered_dict(
'Q1 Predictions',
ptu.get_numpy(q1_pred),
))
eval_statistics.update(create_stats_ordered_dict(
'Q2 Predictions',
ptu.get_numpy(q2_pred),
))
eval_statistics.update(create_stats_ordered_dict(
'Q Targets',
ptu.get_numpy(q_target),
))
eval_statistics.update(create_stats_ordered_dict(
'Log Pis',
ptu.get_numpy(log_pi),
))
policy_statistics = add_prefix(dist.get_diagnostics(), "policy/")
eval_statistics.update(policy_statistics)
if self.use_automatic_entropy_tuning:
eval_statistics['Alpha'] = alpha.item()
eval_statistics['Alpha Loss'] = alpha_loss.item()
loss = SACLosses(
policy_loss=policy_loss,
qf1_loss=qf1_loss,
qf2_loss=qf2_loss,
alpha_loss=alpha_loss,
state_estimator_loss=state_estimator_losses,
)
return loss, eval_statistics
def get_diagnostics(self):
stats = super().get_diagnostics()
stats.update(self.eval_statistics)
return stats
def end_epoch(self, epoch):
self._need_to_update_eval_statistics = True
@property
def networks(self):
return [
self.policy,
self.qf1,
self.qf2,
self.target_qf1,
self.target_qf2,
self.state_estimator,
]
@property
def optimizers(self):
return [
self.alpha_optimizer,
self.qf1_optimizer,
self.qf2_optimizer,
self.policy_optimizer,
self.state_estimator_optimizer,
]
def get_snapshot(self):
return dict(
policy=self.policy,
qf1=self.qf1,
qf2=self.qf2,
target_qf1=self.target_qf1,
target_qf2=self.target_qf2,
) | [
"numpy.prod",
"collections.OrderedDict",
"collections.namedtuple",
"random.sample",
"torch.ones",
"torch.nn.MSELoss",
"gtimer.blank_stamp",
"numpy.loadtxt",
"rlkit.torch.pytorch_util.get_numpy",
"numpy.zeros",
"torch.tensor",
"torch.zeros",
"rlkit.torch.pytorch_util.soft_update_from_to",
"... | [((472, 564), 'collections.namedtuple', 'namedtuple', (['"""SACLosses"""', '"""policy_loss qf1_loss qf2_loss alpha_loss state_estimator_loss"""'], {}), "('SACLosses',\n 'policy_loss qf1_loss qf2_loss alpha_loss state_estimator_loss')\n", (482, 564), False, 'from collections import OrderedDict, namedtuple\n'), ((2425, 2437), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2435, 2437), True, 'from torch import nn as nn\n'), ((2984, 2997), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2995, 2997), False, 'from collections import OrderedDict, namedtuple\n'), ((7582, 7598), 'gtimer.blank_stamp', 'gt.blank_stamp', ([], {}), '()\n', (7596, 7598), True, 'import gtimer as gt\n'), ((8838, 8876), 'gtimer.stamp', 'gt.stamp', (['"""sac training"""'], {'unique': '(False)'}), "('sac training', unique=False)\n", (8846, 8876), True, 'import gtimer as gt\n'), ((9080, 9152), 'rlkit.torch.pytorch_util.soft_update_from_to', 'ptu.soft_update_from_to', (['self.qf1', 'self.target_qf1', 'self.soft_target_tau'], {}), '(self.qf1, self.target_qf1, self.soft_target_tau)\n', (9103, 9152), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((9183, 9255), 'rlkit.torch.pytorch_util.soft_update_from_to', 'ptu.soft_update_from_to', (['self.qf2', 'self.target_qf2', 'self.soft_target_tau'], {}), '(self.qf2, self.target_qf2, self.soft_target_tau)\n', (9206, 9255), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((13418, 13431), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13429, 13431), False, 'from collections import OrderedDict, namedtuple\n'), ((2150, 2182), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['(1)'], {'requires_grad': '(True)'}), '(1, requires_grad=True)\n', (2159, 2182), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((3223, 3253), 'numpy.loadtxt', 'np.loadtxt', (['"""observations.txt"""'], {}), "('observations.txt')\n", (3233, 3253), True, 'import numpy as np\n'), ((3322, 3347), 'numpy.loadtxt', 'np.loadtxt', (['"""actions.txt"""'], {}), "('actions.txt')\n", (3332, 3347), True, 'import numpy as np\n'), ((3473, 3508), 'numpy.loadtxt', 'np.loadtxt', (['"""next_observations.txt"""'], {}), "('next_observations.txt')\n", (3483, 3508), True, 'import numpy as np\n'), ((3707, 3751), 'random.sample', 'random.sample', (['all_indices', 'num_sample_steps'], {}), '(all_indices, num_sample_steps)\n', (3720, 3751), False, 'import random\n'), ((9760, 9787), 'torch.zeros', 'torch.zeros', (['next_obs.shape'], {}), '(next_obs.shape)\n', (9771, 9787), False, 'import torch\n'), ((9875, 9897), 'torch.zeros', 'torch.zeros', (['obs.shape'], {}), '(obs.shape)\n', (9886, 9897), False, 'import torch\n'), ((10017, 10059), 'torch.zeros', 'torch.zeros', (['actions_without_measure.shape'], {}), '(actions_without_measure.shape)\n', (10028, 10059), False, 'import torch\n'), ((14007, 14030), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['qf1_loss'], {}), '(qf1_loss)\n', (14020, 14030), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((14082, 14105), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['qf2_loss'], {}), '(qf2_loss)\n', (14095, 14105), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((14160, 14186), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['policy_loss'], {}), '(policy_loss)\n', (14173, 14186), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((4562, 4589), 'numpy.zeros', 'np.zeros', (['(buffer_size, 17)'], {}), '((buffer_size, 17))\n', (4570, 4589), True, 'import numpy as np\n'), ((4615, 4641), 'numpy.zeros', 'np.zeros', (['(buffer_size, 6)'], {}), '((buffer_size, 6))\n', (4623, 4641), True, 'import numpy as np\n'), ((4677, 4704), 'numpy.zeros', 'np.zeros', (['(buffer_size, 17)'], {}), '((buffer_size, 17))\n', (4685, 4704), True, 'import numpy as np\n'), ((14330, 14352), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['q1_pred'], {}), '(q1_pred)\n', (14343, 14352), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((14481, 14503), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['q2_pred'], {}), '(q2_pred)\n', (14494, 14503), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((14627, 14650), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['q_target'], {}), '(q_target)\n', (14640, 14650), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((14772, 14793), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['log_pi'], {}), '(log_pi)\n', (14785, 14793), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((5980, 5990), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (5987, 5990), True, 'import numpy as np\n'), ((6088, 6098), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (6095, 6098), True, 'import numpy as np\n'), ((6209, 6219), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (6216, 6219), True, 'import numpy as np\n'), ((6603, 6623), 'torch.ones', 'torch.ones', (['obs_size'], {}), '(obs_size)\n', (6613, 6623), False, 'import torch\n'), ((13664, 13704), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['state_estimator_losses[i]'], {}), '(state_estimator_losses[i])\n', (13677, 13704), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((1985, 2021), 'numpy.prod', 'np.prod', (['self.env.action_space.shape'], {}), '(self.env.action_space.shape)\n', (1992, 2021), True, 'import numpy as np\n'), ((4994, 5006), 'numpy.load', 'np.load', (['obs'], {}), '(obs)\n', (5001, 5006), True, 'import numpy as np\n'), ((5182, 5194), 'numpy.load', 'np.load', (['act'], {}), '(act)\n', (5189, 5194), True, 'import numpy as np\n'), ((5263, 5280), 'numpy.load', 'np.load', (['next_obs'], {}), '(next_obs)\n', (5270, 5280), True, 'import numpy as np\n'), ((6376, 6402), 'torch.tensor', 'torch.tensor', (['observations'], {}), '(observations)\n', (6388, 6402), False, 'import torch\n'), ((6449, 6470), 'torch.tensor', 'torch.tensor', (['actions'], {}), '(actions)\n', (6461, 6470), False, 'import torch\n'), ((6527, 6558), 'torch.tensor', 'torch.tensor', (['next_observations'], {}), '(next_observations)\n', (6539, 6558), False, 'import torch\n')] |
import numpy as np
import torch
import ptan
def unpack_batch_a2c(batch, net, last_val_gamma, device="cpu"):
"""
Convert batch into training tensors
:param batch:
:param net:
:return: states variable, actions tensor, reference values variable
"""
states = []
actions = []
rewards = []
not_done_idx = []
last_states = []
for idx, exp in enumerate(batch):
states.append(exp.state)
actions.append(exp.action)
rewards.append(exp.reward)
if exp.last_state is not None:
not_done_idx.append(idx)
last_states.append(exp.last_state)
states_v = ptan.agent.float32_preprocessor(states).to(device)
actions_v = torch.FloatTensor(actions).to(device)
# handle rewards
rewards_np = np.array(rewards, dtype=np.float32)
if not_done_idx:
last_states_v = ptan.agent.float32_preprocessor(last_states).to(device)
last_vals_v = net(last_states_v)
last_vals_np = last_vals_v.data.cpu().numpy()[:, 0]
rewards_np[not_done_idx] += last_val_gamma * last_vals_np
ref_vals_v = torch.FloatTensor(rewards_np).to(device)
return states_v, actions_v, ref_vals_v
| [
"numpy.array",
"ptan.agent.float32_preprocessor",
"torch.FloatTensor"
] | [((789, 824), 'numpy.array', 'np.array', (['rewards'], {'dtype': 'np.float32'}), '(rewards, dtype=np.float32)\n', (797, 824), True, 'import numpy as np\n'), ((645, 684), 'ptan.agent.float32_preprocessor', 'ptan.agent.float32_preprocessor', (['states'], {}), '(states)\n', (676, 684), False, 'import ptan\n'), ((712, 738), 'torch.FloatTensor', 'torch.FloatTensor', (['actions'], {}), '(actions)\n', (729, 738), False, 'import torch\n'), ((1111, 1140), 'torch.FloatTensor', 'torch.FloatTensor', (['rewards_np'], {}), '(rewards_np)\n', (1128, 1140), False, 'import torch\n'), ((870, 914), 'ptan.agent.float32_preprocessor', 'ptan.agent.float32_preprocessor', (['last_states'], {}), '(last_states)\n', (901, 914), False, 'import ptan\n')] |
# -*- coding: utf-8 -*-
"""
Create Synthetic data.
"""
import numpy.random as rd
import numpy as np
import time
class CreateData(object):
def __init__(self, users, arms, dims, seed=int(time.time())):
self.users = users
self.arms = arms
self.dims = dims
self.data_rand = rd.RandomState(seed)
self.contexts = np.ones((self.users, self.arms, self.dims), dtype=np.float)
# ---- create a matrix of shape users*arms*dims (currently users should always set to 1) ----
def data(self, mean, var):
assert len(mean) == self.dims
assert len(var) == self.dims
res = []
mean = np.array(mean)
var = np.array(var)
for i in range(self.users):
res.append(self.data_rand.normal(mean, var, (self.arms, self.dims)))
return np.array(res)
def get_synthetic_context(self, args):
"""
Generate Synthetic Contexts via given mean and variance
"""
#create_data = CreateData(args.num_of_users, args.num_of_arms, args.dim)
mean = [0.2, 0.9, 0.5, 3, 1.1, 0.9, 2, 2.5, 1.6, 1.8] * int(self.dims / 10)
var = [3, 2, 4, 3, 3.5, 5.5, 5, 3.5, 5, 3.5] * int(self.dims / 10)
context_gen = self.data(mean, var)
# normalize
ctx_norm = np.max(np.sqrt(np.sum(context_gen * context_gen, 2)), 1)
for idx in range(self.users):
context_gen[idx] = context_gen[idx] / ctx_norm[idx]
self.contexts = context_gen
def sample_spherical(self):
"""
Generate Synthetic Contexts via randomly sampling from unit ball. Number of users = 1
"""
vec = np.random.randn(self.dims, self.arms)
vec /= np.linalg.norm(vec, axis=0)
self.contexts = vec.T
| [
"numpy.ones",
"time.time",
"numpy.array",
"numpy.sum",
"numpy.linalg.norm",
"numpy.random.randn",
"numpy.random.RandomState"
] | [((308, 328), 'numpy.random.RandomState', 'rd.RandomState', (['seed'], {}), '(seed)\n', (322, 328), True, 'import numpy.random as rd\n'), ((353, 412), 'numpy.ones', 'np.ones', (['(self.users, self.arms, self.dims)'], {'dtype': 'np.float'}), '((self.users, self.arms, self.dims), dtype=np.float)\n', (360, 412), True, 'import numpy as np\n'), ((651, 665), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (659, 665), True, 'import numpy as np\n'), ((680, 693), 'numpy.array', 'np.array', (['var'], {}), '(var)\n', (688, 693), True, 'import numpy as np\n'), ((826, 839), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (834, 839), True, 'import numpy as np\n'), ((1655, 1692), 'numpy.random.randn', 'np.random.randn', (['self.dims', 'self.arms'], {}), '(self.dims, self.arms)\n', (1670, 1692), True, 'import numpy as np\n'), ((1708, 1735), 'numpy.linalg.norm', 'np.linalg.norm', (['vec'], {'axis': '(0)'}), '(vec, axis=0)\n', (1722, 1735), True, 'import numpy as np\n'), ((191, 202), 'time.time', 'time.time', ([], {}), '()\n', (200, 202), False, 'import time\n'), ((1310, 1346), 'numpy.sum', 'np.sum', (['(context_gen * context_gen)', '(2)'], {}), '(context_gen * context_gen, 2)\n', (1316, 1346), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image, CompressedImage,LaserScan
from cv_bridge import CvBridge, CvBridgeError
from message_filters import ApproximateTimeSynchronizer, Subscriber
from ackermann_msgs.msg import AckermannDriveStamped
import imutils
from race.msg import drive_param
import os
import rospkg
import numpy as np
# import sys so we can use packages outside of this folder in
# either python 2 or python 3, I know it's janky, chill
import sys
import os
from pathlib import Path
#insert parent directory into the path
sys.path.insert(0,str(Path(os.path.abspath(__file__)).parent.parent))
from preprocessing.utils import ImageUtils
class MessageSynchronizer:
''' Gathers messages with vehicle information that have similar time stamps
/camera/zed/rgb/image_rect_color/compressed: 18 hz
/camera/zed/rgb/image_rect_color: 18 hz
/vesc/ackermann_cmd_mux/input/teleop: 40 hz
'''
def __init__(self,racecar_name,vesc_name,data_path):
self.image_topic = racecar_name+'/camera/zed/rgb/image_rect_color'
self.drive_topic = vesc_name+'/ackermann_cmd_mux/input/teleop'
self.lidar_topic = racecar_name+'/scan'
print(self.image_topic,self.drive_topic,self.lidar_topic)
self.image_rect_color=Subscriber(self.image_topic,Image)
self.ackermann_stamped=Subscriber(self.drive_topic,AckermannDriveStamped)
self.lidar_sub=Subscriber(self.lidar_topic,LaserScan)
r = rospkg.RosPack()
self.util=ImageUtils()
self.save_path_root=os.path.sep.join([r.get_path('computer_vision'),data_path])
self.cv_bridge=CvBridge()
self.count=0
self.save_count=0
#create the time synchronizer
self.sub = ApproximateTimeSynchronizer([self.image_rect_color,self.ackermann_stamped,self.lidar_sub], queue_size = 20, slop = 0.08)
#register the callback to the synchronizer
self.sub.registerCallback(self.master_callback)
#callback for the synchronized messages
#Note: a negative value means turning to the right, a postive value means turning to the left
def master_callback(self,image,ackermann_msg,lidar_msg): #drive_param):
#convert rosmsg to cv image
try:
cv_image=self.cv_bridge.imgmsg_to_cv2(image,"bgr8")
self.count+=1
except CvBridgeError as e:
print(e)
#convert the steering command to a string to I can store it with the image name
#for efficient data storage
command='%.10f' % ackermann_msg.drive.steering_angle
#replace the period with ~ so it's a valid filename
command=command.replace('.','~')
#save path
save_path=os.path.join(self.save_path_root,self.label_image(ackermann_msg.drive.steering_angle),str(rospy.Time.now())+'~'+command+'.png')
limited_ranges=np.asarray(lidar_msg.ranges)
indices=np.where(limited_ranges>=10.0)[0]
limited_ranges[indices]=10.0
limited_ranges= limited_ranges[29:1053]
limited_ranges = limited_ranges.reshape((32,32,1))
limited_ranges = limited_ranges
if(self.count % 1==0):
dirPath = os.path.split(save_path)[0]
if not 'straight' in dirPath and 'weak_right' not in dirPath and 'weak_left' not in dirPath:
self.save_image(cv_image,save_path)
np.save(save_path.replace(".png",".npy"),limited_ranges)
self.save_count+=1
self.count+=1
#function that categorizes images into left, weak_left, straight, weak_right, right
def label_image(self,steering_angle):
if(steering_angle<-0.261799):
return "right"
elif(steering_angle>0.261799):
return "left"
elif(steering_angle<-0.0523599 and steering_angle>-0.261799):
return "weak_right"
elif(steering_angle>0.0523599 and steering_angle<0.261799):
return "weak_left"
else:
return "straight"
def save_image(self,image,path):
dirPath = os.path.split(path)[0]
# if the output directory does not exist, create it
if not os.path.exists(dirPath):
os.makedirs(dirPath)
print('does not exist')
print(path)
cv2.imwrite(path,image)
if __name__=='__main__':
rospy.init_node('image_command_sync')
args = rospy.myargv()[1:]
# get the racecar name so we know what to subscribe to
racecar_name=args[0]
# get the name of the vesc for the car
vesc_name=args[1]
# path where to store the dataset
data_path = args[2]
# initialize the message filter
mf=MessageSynchronizer(racecar_name,vesc_name,data_path)
# spin so that we can receive messages
rospy.spin() | [
"cv2.imwrite",
"os.path.exists",
"os.makedirs",
"numpy.where",
"rospy.init_node",
"numpy.asarray",
"preprocessing.utils.ImageUtils",
"cv_bridge.CvBridge",
"os.path.split",
"rospy.Time.now",
"rospy.myargv",
"rospkg.RosPack",
"rospy.spin",
"message_filters.Subscriber",
"message_filters.App... | [((4426, 4463), 'rospy.init_node', 'rospy.init_node', (['"""image_command_sync"""'], {}), "('image_command_sync')\n", (4441, 4463), False, 'import rospy\n'), ((4871, 4883), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (4881, 4883), False, 'import rospy\n'), ((1342, 1377), 'message_filters.Subscriber', 'Subscriber', (['self.image_topic', 'Image'], {}), '(self.image_topic, Image)\n', (1352, 1377), False, 'from message_filters import ApproximateTimeSynchronizer, Subscriber\n'), ((1408, 1459), 'message_filters.Subscriber', 'Subscriber', (['self.drive_topic', 'AckermannDriveStamped'], {}), '(self.drive_topic, AckermannDriveStamped)\n', (1418, 1459), False, 'from message_filters import ApproximateTimeSynchronizer, Subscriber\n'), ((1482, 1521), 'message_filters.Subscriber', 'Subscriber', (['self.lidar_topic', 'LaserScan'], {}), '(self.lidar_topic, LaserScan)\n', (1492, 1521), False, 'from message_filters import ApproximateTimeSynchronizer, Subscriber\n'), ((1533, 1549), 'rospkg.RosPack', 'rospkg.RosPack', ([], {}), '()\n', (1547, 1549), False, 'import rospkg\n'), ((1568, 1580), 'preprocessing.utils.ImageUtils', 'ImageUtils', ([], {}), '()\n', (1578, 1580), False, 'from preprocessing.utils import ImageUtils\n'), ((1692, 1702), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (1700, 1702), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((1808, 1930), 'message_filters.ApproximateTimeSynchronizer', 'ApproximateTimeSynchronizer', (['[self.image_rect_color, self.ackermann_stamped, self.lidar_sub]'], {'queue_size': '(20)', 'slop': '(0.08)'}), '([self.image_rect_color, self.ackermann_stamped,\n self.lidar_sub], queue_size=20, slop=0.08)\n', (1835, 1930), False, 'from message_filters import ApproximateTimeSynchronizer, Subscriber\n'), ((2936, 2964), 'numpy.asarray', 'np.asarray', (['lidar_msg.ranges'], {}), '(lidar_msg.ranges)\n', (2946, 2964), True, 'import numpy as np\n'), ((4372, 4396), 'cv2.imwrite', 'cv2.imwrite', (['path', 'image'], {}), '(path, image)\n', (4383, 4396), False, 'import cv2\n'), ((4476, 4490), 'rospy.myargv', 'rospy.myargv', ([], {}), '()\n', (4488, 4490), False, 'import rospy\n'), ((2981, 3013), 'numpy.where', 'np.where', (['(limited_ranges >= 10.0)'], {}), '(limited_ranges >= 10.0)\n', (2989, 3013), True, 'import numpy as np\n'), ((4152, 4171), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (4165, 4171), False, 'import os\n'), ((4250, 4273), 'os.path.exists', 'os.path.exists', (['dirPath'], {}), '(dirPath)\n', (4264, 4273), False, 'import os\n'), ((4287, 4307), 'os.makedirs', 'os.makedirs', (['dirPath'], {}), '(dirPath)\n', (4298, 4307), False, 'import os\n'), ((3261, 3285), 'os.path.split', 'os.path.split', (['save_path'], {}), '(save_path)\n', (3274, 3285), False, 'import os\n'), ((629, 654), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (644, 654), False, 'import os\n'), ((2874, 2890), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (2888, 2890), False, 'import rospy\n')] |
# GPU performance tests extracted from py-videocorevi Python library.
# Testing for Raspberry Pi 4 Benchmarking and device identification.
# TREASURE PROJECT 2021
import time
from time import clock_gettime,CLOCK_MONOTONIC
from time import monotonic
import fcntl
import socket
import struct
import numpy as np
from videocore6.v3d import *
from videocore6 import pack_unpack
from videocore6.driver import Driver
from videocore6.assembler import qpu
from bench_helper import BenchHelper
import sys
import os
import random
import hashlib
def getsec():
return clock_gettime(CLOCK_MONOTONIC)
@qpu
def load_params(asm, thread, regs):
if thread == 1:
bxor(r0, r0, r0, sig = ldunifrf(rf0))
elif thread == 8:
# 8 threads (1 threads / qpu)
tidx(r0, sig = ldunifrf(rf0))
shr(r0, r0, 2)
mov(r1, 0b1111)
elif thread == 16:
# 16 threads (2 threads / qpu)
tidx(r0, sig = ldunifrf(rf0))
shr(r0, r0, 1).mov(r1, 1)
shl(r1, r1, 5)
sub(r1, r1, 1)
else:
assert thread in [1,8,16]
band(r3, r0, r1, sig = ldunifrf(rf1))
shl(r0, rf1, 2)
umul24(r0, r0, r3)
eidx(r1).add(r0, r0, rf0)
shl(r1, r1, 2)
shl(r3, 4, 4).add(r0, r0, r1)
n = len(regs)
mov(tmua, r0, sig = thrsw).add(r0, r0, r3)
nop()
nop()
nop(sig = ldtmu(r1))
for i in range(n):
if i % 16 == 0:
mov(r5rep, r1)
mov(regs[i], r5)
elif i % 16 == 15 and i != n - 1:
mov(tmua, r0, sig = thrsw).add(r0, r0, r3)
rotate(r5rep, r1, - (i % 16))
mov(regs[i], r5)
nop(sig = ldtmu(r1))
else:
rotate(r5rep, r1, - (i % 16))
mov(regs[i], r5)
@qpu
def qpu_sgemm_rnn_naive(asm, thread):
params = [
'P',
'Q',
'R',
'A_base',
'A_stride',
'B_base',
'B_stride',
'C_base',
'C_stride',
'alpha',
'beta',
]
values = [
'A_cur',
'B_cur',
'C_cur',
'i', 'j', 'k',
]
g = globals()
for i, reg in enumerate(params + values):
g['reg_' + reg] = g['rf' + str(i+32)]
load_params(asm, thread, [g['reg_' + reg] for reg in params])
add(r0, reg_P, 15)
shr(r0, r0, 4)
shl(r0, r0, 4)
add(r1, reg_R, 15)
shr(r1, r1, 4)
shl(r1, r1, 6)
umul24(r3, r0, reg_A_stride)
add(reg_A_base, reg_A_base, r3)
add(reg_B_base, reg_B_base, r1)
umul24(r3, r0, reg_C_stride)
add(reg_C_base, reg_C_base, r3)
add(reg_C_base, reg_C_base, r1)
for i in range(16):
mov(rf[i], 0.0).mov(rf[i+16], 0.0)
# i=(p+15)/16.
add(r0, reg_P, 15)
shr(reg_i, r0, 4)
with loop as li:
# j=(r+15)/16
add(r0, reg_R, 15)
shr(reg_j, r0, 4)
with loop as lj:
shl(r0, reg_i, 4)
umul24(r3, r0, reg_C_stride)
shl(r1, reg_j, 6)
sub(reg_C_cur, reg_C_base, r3)
sub(reg_C_cur, reg_C_cur, r1)
umul24(r3, r0, reg_A_stride)
sub(reg_A_cur, reg_A_base, r3)
sub(reg_B_cur, reg_B_base, r1)
mov(reg_k, reg_Q)
with loop as lk:
eidx(r0)
umul24(r1, r0, reg_A_stride)
add(r1, r1, reg_A_cur).add(reg_A_cur, reg_A_cur, 4)
mov(tmua, r1, sig = thrsw)
shl(r1, r0, 2)
add(r1, r1, reg_B_cur).add(reg_B_cur, reg_B_cur, reg_B_stride)
mov(tmua, r1, sig = thrsw)
nop(sig = ldtmu(r0))
mov(r5rep, r0)
nop(sig = ldtmu(r4))
nop().fmul(r3, r5, r4)
for i in range(1,16):
rotate(r5rep, r0, -i)
fadd(rf[i-1], rf[i-1], r3).fmul(r3, r5, r4)
fadd(rf15, rf15, r3)
sub(reg_k, reg_k, 1, cond = 'pushz')
lk.b(cond = 'anyna')
nop() # delay slot
nop() # delay slot
nop() # delay slot
eidx(r0)
shl(r0, r0, 2)
add(r1, reg_C_cur, r0)
mov(tmua, r1, sig = thrsw).add(r1, r1, reg_C_stride)
fmul(rf[0], rf[0], reg_alpha)
for i in range(1, 16):
mov(tmua, r1, sig = thrsw).add(r1, r1, reg_C_stride)
fmul(rf[i], rf[i], reg_alpha, sig = ldtmu(rf[i+15]))
mov(r0, reg_beta).fmul(r3, rf[16], reg_beta, sig = ldtmu(rf[31]))
for i in range(16):
fadd(rf[i], rf[i], r3).fmul(r3, rf[i+17], r0)
eidx(r0)
shl(r0, r0, 2)
add(r1, reg_C_cur, r0)
for i in range(16):
mov(tmud, rf[i])
mov(tmua, r1).add(r1, r1, reg_C_stride)
mov(rf[i], 0.0).mov(rf[i+16], 0.0)
tmuwt()
sub(reg_j, reg_j, 1, cond = 'pushz')
lj.b(cond = 'anyna')
nop() # delay slot
nop() # delay slot
nop() # delay slot
sub(reg_i, reg_i, 1, cond = 'pushz')
li.b(cond = 'anyna')
nop()
nop()
nop()
nop(sig = thrsw)
nop(sig = thrsw)
nop()
nop()
nop(sig = thrsw)
nop()
nop()
nop()
def sgemm_rnn_naive():
thread = 8
P = 1024
Q = 1024
R = 1024
assert P % (16 * 2) == 0
assert R % (16 * 4) == 0
with Driver() as drv:
code = drv.program(lambda asm: qpu_sgemm_rnn_naive(asm, thread))
A = drv.alloc((P, Q), dtype = 'float32')
B = drv.alloc((Q, R), dtype = 'float32')
C = drv.alloc((P, R), dtype = 'float32')
np.random.seed(0)
alpha = np.random.randn()
beta = np.random.randn()
A_ref = np.random.randn(*A.shape).astype(A.dtype)
B_ref = np.random.randn(*B.shape).astype(B.dtype)
C_ref = np.random.randn(*C.shape).astype(C.dtype)
A[:] = A_ref
B[:] = B_ref
C[:] = C_ref
start = time.perf_counter_ns()
C_ref[:] = alpha * A_ref.dot(B_ref) + beta * C_ref
time_ref = time.perf_counter_ns() - start
def block_2x4_params(i, j):
tile_P = P // 2
tile_R = R // 4
return [
tile_P, Q, tile_R,
A.addresses()[tile_P*i, 0 ],
A.strides[0],
B.addresses()[0 , tile_R*j],
B.strides[0],
C.addresses()[tile_P*i, tile_R*j],
C.strides[0],
*pack_unpack('f', 'I', [alpha, beta]),
]
unif_params = drv.alloc((thread, len(block_2x4_params(0,0))), dtype = 'uint32')
for th in range(thread):
unif_params[th] = block_2x4_params(th // 4, th % 4)
unif = drv.alloc(2, dtype = 'uint32')
unif[0] = unif_params.addresses()[0,0]
unif[1] = unif_params.shape[1]
start = time.perf_counter_ns()
drv.execute(code, unif.addresses()[0], thread = thread)
time_gpu = time.perf_counter_ns() - start
np.set_printoptions(threshold=np.inf)
def Gflops(sec):
return (2 * P * Q * R + 3 * P * R) / sec * 1e-9
return [time_ref,time_gpu] #Gflops(time_ref),time_gpu,Gflops(time_gpu)]
def sleep(duration):
duration=duration*1000000000
now = time.perf_counter_ns()
end = now + duration
while now < end:
now = time.perf_counter_ns()
def get_QPU_freq(seg):
with RegisterMapping() as regmap:
with PerformanceCounter(regmap, [CORE_PCTR_CYCLE_COUNT]) as pctr:
time.sleep(seg)
result = pctr.result()
return (result[0] * 1e-6)
def cpu_random():
with RegisterMapping() as regmap:
with PerformanceCounter(regmap, [CORE_PCTR_CYCLE_COUNT]) as pctr:
a=random.random()
result = pctr.result()
return (result[0])
def cpu_true_random(n):
with RegisterMapping() as regmap:
with PerformanceCounter(regmap, [CORE_PCTR_CYCLE_COUNT]) as pctr:
a=os.urandom(n)
result = pctr.result()
return (result[0])
def cpu_hash():
with RegisterMapping() as regmap:
with PerformanceCounter(regmap, [CORE_PCTR_CYCLE_COUNT]) as pctr:
h=int(hashlib.sha256("test string".encode('utf-8')).hexdigest(), 16) % 10**8
result = pctr.result()
return (result[0])
@qpu
def qpu_summation(asm, *, num_qpus, unroll_shift, code_offset,
align_cond=lambda pos: pos % 512 == 170):
g = globals()
for i, v in enumerate(['length', 'src', 'dst', 'qpu_num', 'stride', 'sum']):
g[f'reg_{v}'] = rf[i]
nop(sig=ldunifrf(reg_length))
nop(sig=ldunifrf(reg_src))
nop(sig=ldunifrf(reg_dst))
if num_qpus == 1:
num_qpus_shift = 0
mov(reg_qpu_num, 0)
elif num_qpus == 8:
num_qpus_shift = 3
tidx(r0)
shr(r0, r0, 2)
band(reg_qpu_num, r0, 0b1111)
else:
raise Exception('num_qpus must be 1 or 8')
# addr += 4 * (thread_num + 16 * qpu_num)
shl(r0, reg_qpu_num, 4)
eidx(r1)
add(r0, r0, r1)
shl(r0, r0, 2)
add(reg_src, reg_src, r0).add(reg_dst, reg_dst, r0)
# stride = 4 * 16 * num_qpus
mov(reg_stride, 1)
shl(reg_stride, reg_stride, 6 + num_qpus_shift)
# The QPU performs shifts and rotates modulo 32, so it actually supports
# shift amounts [0, 31] only with small immediates.
num_shifts = [*range(16), *range(-16, 0)]
# length /= 16 * 8 * num_qpus * unroll
shr(reg_length, reg_length, num_shifts[7 + num_qpus_shift + unroll_shift])
# This single thread switch and two instructions just before the loop are
# really important for TMU read to achieve a better performance.
# This also enables TMU read requests without the thread switch signal, and
# the eight-depth TMU read request queue.
nop(sig=thrsw)
nop()
bxor(reg_sum, 1, 1).mov(r1, 1)
while not align_cond(code_offset + len(asm)):
nop()
with loop as l:
unroll = 1 << unroll_shift
for i in range(7):
mov(tmua, reg_src).add(reg_src, reg_src, reg_stride)
mov(tmua, reg_src).sub(reg_length, reg_length, r1, cond='pushz')
add(reg_src, reg_src, reg_stride, sig=ldtmu(r0))
for j in range(unroll - 1):
for i in range(8):
mov(tmua, reg_src).add(reg_src, reg_src, reg_stride)
add(reg_sum, reg_sum, r0, sig=ldtmu(r0))
for i in range(5):
add(reg_sum, reg_sum, r0, sig=ldtmu(r0))
l.b(cond='na0')
add(reg_sum, reg_sum, r0, sig=ldtmu(r0)) # delay slot
add(reg_sum, reg_sum, r0, sig=ldtmu(r0)) # delay slot
add(reg_sum, reg_sum, r0) # delay slot
mov(tmud, reg_sum)
mov(tmua, reg_dst)
# This synchronization is needed between the last TMU operation and the
# program end with the thread switch just before the loop above.
barrierid(syncb, sig=thrsw)
nop()
nop()
nop(sig=thrsw)
nop(sig=thrsw)
nop()
nop()
nop(sig=thrsw)
nop()
nop()
nop()
def summation(*, length, num_qpus=8, unroll_shift=5):
assert length > 0
assert length % (16 * 8 * num_qpus * (1 << unroll_shift)) == 0
with Driver(data_area_size=(length + 1024) * 4) as drv:
code = drv.program(qpu_summation, num_qpus=num_qpus,
unroll_shift=unroll_shift,
code_offset=drv.code_pos // 8)
X = drv.alloc(length, dtype='uint32')
Y = drv.alloc(16 * num_qpus, dtype='uint32')
X[:] = np.arange(length, dtype=X.dtype)
Y.fill(0)
assert sum(Y) == 0
unif = drv.alloc(3, dtype='uint32')
unif[0] = length
unif[1] = X.addresses()[0]
unif[2] = Y.addresses()[0]
start = time.perf_counter_ns()
drv.execute(code, unif.addresses()[0], thread=num_qpus)
end = time.perf_counter_ns()
assert sum(Y) % 2**32 == (length - 1) * length // 2 % 2**32
return [end - start] #,length * 4 / (end - start) * 1e-6]
@qpu
def qpu_scopy(asm, *, num_qpus, unroll_shift, code_offset,
align_cond=lambda pos: pos % 512 == 259):
g = globals()
for i, v in enumerate(['length', 'src', 'dst', 'qpu_num', 'stride']):
g[f'reg_{v}'] = rf[i]
nop(sig=ldunifrf(reg_length))
nop(sig=ldunifrf(reg_src))
nop(sig=ldunifrf(reg_dst))
if num_qpus == 1:
num_qpus_shift = 0
mov(reg_qpu_num, 0)
elif num_qpus == 8:
num_qpus_shift = 3
tidx(r0)
shr(r0, r0, 2)
band(reg_qpu_num, r0, 0b1111)
else:
raise Exception('num_qpus must be 1 or 8')
# addr += 4 * (thread_num + 16 * qpu_num)
shl(r0, reg_qpu_num, 4)
eidx(r1)
add(r0, r0, r1)
shl(r0, r0, 2)
add(reg_src, reg_src, r0).add(reg_dst, reg_dst, r0)
# stride = 4 * 16 * num_qpus
mov(reg_stride, 1)
shl(reg_stride, reg_stride, 6 + num_qpus_shift)
# length /= 16 * 8 * num_qpus * unroll
shr(reg_length, reg_length, 7 + num_qpus_shift + unroll_shift)
# This single thread switch and two nops just before the loop are really
# important for TMU read to achieve a better performance.
# This also enables TMU read requests without the thread switch signal, and
# the eight-depth TMU read request queue.
nop(sig=thrsw)
nop()
nop()
while not align_cond(code_offset + len(asm)):
nop()
with loop as l:
unroll = 1 << unroll_shift
for i in range(8):
mov(tmua, reg_src).add(reg_src, reg_src, reg_stride)
for j in range(unroll - 1):
for i in range(8):
nop(sig=ldtmu(r0))
mov(tmua, reg_src).add(reg_src, reg_src, reg_stride)
mov(tmud, r0)
mov(tmua, reg_dst).add(reg_dst, reg_dst, reg_stride)
for i in range(6):
nop(sig=ldtmu(r0))
mov(tmud, r0)
mov(tmua, reg_dst).add(reg_dst, reg_dst, reg_stride)
nop(sig=ldtmu(r0))
mov(tmud, r0).sub(reg_length, reg_length, 1, cond='pushz')
mov(tmua, reg_dst).add(reg_dst, reg_dst, reg_stride)
l.b(cond='na0')
nop(sig=ldtmu(r0)) # delay slot
mov(tmud, r0) # delay slot
mov(tmua, reg_dst).add(reg_dst, reg_dst, reg_stride) # delay slot
# This synchronization is needed between the last TMU operation and the
# program end with the thread switch just before the loop above.
barrierid(syncb, sig=thrsw)
nop()
nop()
nop(sig=thrsw)
nop(sig=thrsw)
nop()
nop()
nop(sig=thrsw)
nop()
nop()
nop()
def scopy(*, length, num_qpus=8, unroll_shift=0):
assert length > 0
assert length % (16 * 8 * num_qpus * (1 << unroll_shift)) == 0
with Driver(data_area_size=(length * 2 + 1024) * 4) as drv:
code = drv.program(qpu_scopy, num_qpus=num_qpus,
unroll_shift=unroll_shift,
code_offset=drv.code_pos // 8)
X = drv.alloc(length, dtype='float32')
Y = drv.alloc(length, dtype='float32')
X[:] = np.arange(*X.shape, dtype=X.dtype)
Y[:] = -X
assert not np.array_equal(X, Y)
unif = drv.alloc(3, dtype='uint32')
unif[0] = length
unif[1] = X.addresses()[0]
unif[2] = Y.addresses()[0]
start = time.perf_counter_ns()
drv.execute(code, unif.addresses()[0], thread=num_qpus)
end = time.perf_counter_ns()
assert np.array_equal(X, Y)
return[end - start] #, length * 4 / (end - start) * 1e-6]
@qpu
def qpu_memset(asm, *, num_qpus, unroll_shift, code_offset,
align_cond=lambda pos: pos % 512 == 0):
g = globals()
for i, v in enumerate(['dst', 'fill', 'length', 'qpu_num', 'stride']):
g[f'reg_{v}'] = rf[i]
nop(sig=ldunifrf(reg_dst))
nop(sig=ldunifrf(reg_fill))
nop(sig=ldunifrf(reg_length))
if num_qpus == 1:
num_qpus_shift = 0
mov(reg_qpu_num, 0)
elif num_qpus == 8:
num_qpus_shift = 3
tidx(r0)
shr(r0, r0, 2)
band(reg_qpu_num, r0, 0b1111)
else:
raise Exception('num_qpus must be 1 or 8')
# addr += 4 * (thread_num + 16 * qpu_num)
shl(r0, reg_qpu_num, 4)
eidx(r1)
add(r0, r0, r1)
shl(r0, r0, 2)
add(reg_dst, reg_dst, r0)
# stride = 4 * 16 * num_qpus
# r0 = 1
mov(r0, 1)
shl(reg_stride, r0, 6 + num_qpus_shift)
# length /= 16 * num_qpus * unroll
shr(reg_length, reg_length, 4 + num_qpus_shift + unroll_shift)
unroll = 1 << unroll_shift
if unroll == 1:
sub(reg_length, reg_length, r0, cond='pushz')
while not align_cond(code_offset + len(asm)):
nop()
with loop as l:
l.b(cond='na0')
mov(tmud, reg_fill) # delay slot
mov(tmua, reg_dst).add(reg_dst, reg_dst, reg_stride) # delay slot
sub(reg_length, reg_length, r0, cond='pushz') # delay slot
else:
while not align_cond(code_offset + len(asm)):
nop()
with loop as l:
for i in range(unroll - 2):
mov(tmud, reg_fill)
mov(tmua, reg_dst).add(reg_dst, reg_dst, reg_stride)
mov(tmud, reg_fill).sub(reg_length, reg_length, r0, cond='pushz')
l.b(cond='na0')
mov(tmua, reg_dst).add(reg_dst, reg_dst, reg_stride) # delay slot
mov(tmud, reg_fill) # delay slot
mov(tmua, reg_dst).add(reg_dst, reg_dst, reg_stride) # delay slot
nop(sig=thrsw)
nop(sig=thrsw)
nop()
nop()
nop(sig=thrsw)
nop()
nop()
nop()
def memset(*, fill, length, num_qpus=8, unroll_shift=1):
assert length > 0
assert length % (16 * num_qpus * (1 << unroll_shift)) == 0
with Driver(data_area_size=(length + 1024) * 4) as drv:
code = drv.program(qpu_memset, num_qpus=num_qpus,
unroll_shift=unroll_shift,
code_offset=drv.code_pos // 8)
X = drv.alloc(length, dtype='uint32')
X.fill(~fill)
assert not np.array_equiv(X, fill)
unif = drv.alloc(3, dtype='uint32')
unif[0] = X.addresses()[0]
unif[1] = fill
unif[2] = length
start = monotonic()
drv.execute(code, unif.addresses()[0], thread=num_qpus)
end = monotonic()
assert np.array_equiv(X, fill)
return [end - start] #, length * 4 / (end - start) * 1e-6]
@qpu
def qpu_clock(asm):
nop(sig = ldunif)
nop(sig = ldunifrf(rf0))
with loop as l:
sub(r5, r5, 1, cond = 'pushn')
l.b(cond = 'anyna')
nop()
nop()
nop()
mov(tmud, 1)
mov(tmua, rf0)
tmuwt()
nop(sig = thrsw)
nop(sig = thrsw)
nop()
nop()
nop(sig = thrsw)
nop()
nop()
nop()
def test_clock():
bench = BenchHelper('./libbench_helper.so')
with Driver() as drv:
f = pow(2, 25)
code = drv.program(qpu_clock)
unif = drv.alloc(2, dtype = 'uint32')
done = drv.alloc(1, dtype = 'uint32')
done[:] = 0
unif[0] = f
unif[1] = done.addresses()[0]
with drv.compute_shader_dispatcher() as csd:
start = time.perf_counter_ns()
csd.dispatch(code, unif.addresses()[0])
bench.wait_address(done)
end = time.perf_counter_ns()
return [f * 5 / (end - start) / 1000 / 1000 * 4] #end - start] #, f * 5 / (end - start) / 1000 / 1000 * 4]
@qpu
def qpu_write_N(asm, N):
eidx(r0, sig = ldunif)
nop(sig = ldunifrf(rf0))
shl(r0, r0, 2)
mov(tmud, N)
add(tmua, r5, r0)
tmuwt()
mov(tmud, 1)
mov(tmua, rf0)
tmuwt()
nop(sig = thrsw)
nop(sig = thrsw)
nop()
nop()
nop(sig = thrsw)
nop()
nop()
nop()
def test_multiple_dispatch_delay():
bench = BenchHelper('./libbench_helper.so')
with Driver() as drv:
data = drv.alloc((5, 16), dtype = 'uint32')
code = [drv.program(lambda asm: qpu_write_N(asm, i)) for i in range(data.shape[0])]
unif = drv.alloc((data.shape[0], 2), dtype = 'uint32')
done = drv.alloc(1, dtype = 'uint32')
data[:] = 0
unif[:,0] = data.addresses()[:,0]
unif[:,1] = done.addresses()[0]
ref_start = time.perf_counter_ns()
with drv.compute_shader_dispatcher() as csd:
for i in range(data.shape[0]):
csd.dispatch(code[i], unif.addresses()[i,0])
ref_end = time.perf_counter_ns()
assert (data == np.arange(data.shape[0]).reshape(data.shape[0],1)).all()
data[:] = 0
naive_results = np.zeros(data.shape[0], dtype='float32')
with drv.compute_shader_dispatcher() as csd:
for i in range(data.shape[0]):
done[:] = 0
start = time.perf_counter_ns()
csd.dispatch(code[i], unif.addresses()[i,0])
bench.wait_address(done)
end = time.perf_counter_ns()
naive_results[i] = end - start
assert (data == np.arange(data.shape[0]).reshape(data.shape[0],1)).all()
sleep_results = np.zeros(data.shape[0], dtype='float32')
with drv.compute_shader_dispatcher() as csd:
for i in range(data.shape[0]):
done[:] = 0
time.sleep(1)
start = time.perf_counter_ns()
csd.dispatch(code[i], unif.addresses()[i,0])
bench.wait_address(done)
end = time.perf_counter_ns()
sleep_results[i] = end - start
assert (data == np.arange(data.shape[0]).reshape(data.shape[0],1)).all()
return [ref_end - ref_start,np.sum(naive_results),np.sum(sleep_results)]
@qpu
def qpu_tmu_load_1_slot_1_qpu(asm, nops):
nop(sig = ldunifrf(rf0)) # X.shape[1]
nop(sig = ldunifrf(rf1)) # X
nop(sig = ldunifrf(rf2)) # X.stride[1]
nop(sig = ldunifrf(rf3)) # X.stride[0]
nop(sig = ldunifrf(rf4)) # Y
nop(sig = ldunifrf(rf5)) # done
barrierid(syncb, sig = thrsw)
nop()
nop()
tidx(r0)
shr(r0, r0, 2)
band(r0, r0, 0b1111, cond = 'pushz')
b(R.done, cond = 'allna')
nop() # delay slot
nop() # delay slot
nop() # delay slot
eidx(r0)
shl(r0, r0, 2)
add(rf4, rf4, r0)
eidx(r0)
umul24(r0, r0, rf3)
add(rf1, rf1, r0)
mov(r2, 0.0)
with loop as l:
mov(tmua, rf1).add(rf1, rf1, rf2)
for i in range(nops):
nop()
nop(sig = ldtmu(r3))
sub(rf0, rf0, 1, cond = 'pushz')
l.b(cond = 'anyna')
fadd(r2, r2, r3) # delay slot
nop() # delay slot
nop() # delay slot
mov(tmud, r2)
mov(tmua, rf4)
tmuwt()
mov(tmud, 1)
mov(tmua, rf5)
tmuwt()
L.done
barrierid(syncb, sig = thrsw)
nop()
nop()
nop(sig = thrsw)
nop(sig = thrsw)
nop()
nop()
nop(sig = thrsw)
nop()
nop()
nop()
def test_tmu_load_1_slot_1_qpu():
bench = BenchHelper('./libbench_helper.so')
res = []
for trans in [False, True]:
with Driver() as drv:
loop = 2**15
X = drv.alloc((16, loop) if trans else (loop, 16), dtype = 'float32')
Y = drv.alloc(16, dtype = 'float32')
unif = drv.alloc(6, dtype = 'uint32')
done = drv.alloc(1, dtype = 'uint32')
unif[0] = loop
unif[1] = X.addresses()[0,0]
unif[2] = X.strides[int(trans)]
unif[3] = X.strides[1-int(trans)]
unif[4] = Y.addresses()[0]
unif[5] = done.addresses()[0]
results = np.zeros((1, 10), dtype = 'float32')
#fig = plt.figure()
#ax = fig.add_subplot(1,1,1)
#ax.set_title(f'TMU load latency (1 slot, 1 qpu, stride=({unif[2]},{unif[3]}))')
#ax.set_xlabel('# of nop (between request and load signal)')
#ax.set_ylabel('sec')
for nops in range(results.shape[0]):
code = drv.program(lambda asm: qpu_tmu_load_1_slot_1_qpu(asm, nops))
for i in range(results.shape[1]):
with drv.compute_shader_dispatcher() as csd:
X[:] = np.random.randn(*X.shape) / X.shape[int(trans)]
Y[:] = 0.0
done[:] = 0
start = time.perf_counter_ns()
csd.dispatch(code, unif.addresses()[0], thread = 8)
bench.wait_address(done)
end = time.perf_counter_ns()
results[nops,i] = end - start
assert np.allclose(Y, np.sum(X, axis=int(trans)), atol = 1e-4)
#ax.scatter(np.zeros(results.shape[1])+nops, results[nops], s=1, c='blue')
#print('{:4}/{}\t{:.9f}'.format(nops, results.shape[0], np.sum(results[nops]) / results.shape[1]))
res.append(np.sum(results[nops]) / results.shape[1])
return res
#ax.set_ylim(auto=True)
#ax.set_xlim(0, results.shape[0])
#fig.savefig(f'benchmarks/tmu_load_1_slot_1_qpu_{unif[2]}_{unif[3]}.png')
@qpu
def qpu_tmu_load_2_slot_1_qpu(asm, nops):
nop(sig = ldunifrf(rf0)) # X.shape[1]
nop(sig = ldunifrf(rf1)) # X
nop(sig = ldunifrf(rf2)) # X.stride[1]
nop(sig = ldunifrf(rf3)) # X.stride[0]
nop(sig = ldunifrf(rf4)) # Y
nop(sig = ldunifrf(rf5)) # done
barrierid(syncb, sig = thrsw)
nop()
nop()
tidx(r0)
shr(r0, r0, 2)
band(r0, r0, 0b0011, cond = 'pushz')
b(R.skip_bench, cond = 'allna')
nop()
nop()
nop()
eidx(r0)
shl(r0, r0, 2)
add(rf4, rf4, r0)
tidx(r0)
shr(r0, r0, 2)
band(r0, r0, 0b1111)
shl(r1, 4, 4)
umul24(r0, r0, r1)
add(rf4, rf4, r0)
eidx(r0)
umul24(r0, r0, rf3)
add(rf1, rf1, r0)
tidx(r0)
shr(r0, r0, 2)
band(r0, r0, 0b1111)
shl(r1, rf0, 6)
umul24(r0, r0, r1)
add(rf1, rf1, r0)
mov(r2, 0.0)
with loop as l:
mov(tmua, rf1).add(rf1, rf1, rf2)
for i in range(nops):
nop()
nop(sig = ldtmu(r3))
sub(rf0, rf0, 1, cond = 'pushz')
l.b(cond = 'anyna')
fadd(r2, r2, r3) # delay slot
nop() # delay slot
nop() # delay slot
mov(tmud, r2)
mov(tmua, rf4)
tmuwt()
L.skip_bench
barrierid(syncb, sig = thrsw)
nop()
nop()
tidx(r0)
shr(r0, r0, 2)
band(r0, r0, 0b1111, cond = 'pushz')
b(R.skip_done, cond = 'allna')
nop()
nop()
nop()
mov(tmud, 1)
mov(tmua, rf5)
tmuwt()
L.skip_done
nop(sig = thrsw)
nop(sig = thrsw)
nop()
nop()
nop(sig = thrsw)
nop()
nop()
nop()
def test_tmu_load_2_slot_1_qpu():
bench = BenchHelper('./libbench_helper.so')
res=[]
for trans, min_nops, max_nops in [(False, 0, 1), (True, 0, 1)]:
with Driver() as drv:
loop = 2**13
X = drv.alloc((8, 16, loop) if trans else (8, loop, 16), dtype = 'float32')
Y = drv.alloc((8, 16), dtype = 'float32')
unif = drv.alloc(6, dtype = 'uint32')
done = drv.alloc(1, dtype = 'uint32')
unif[0] = loop
unif[1] = X.addresses()[0,0,0]
unif[2] = X.strides[1+int(trans)]
unif[3] = X.strides[2-int(trans)]
unif[4] = Y.addresses()[0,0]
unif[5] = done.addresses()[0]
results = np.zeros((max_nops, 10), dtype = 'float32')
#fig = plt.figure()
#ax = fig.add_subplot(1,1,1)
#ax.set_title(f'TMU load latency (2 slot, 1 qpu, stride=({unif[2]},{unif[3]}))')
#ax.set_xlabel('# of nop (between request and load signal)')
#ax.set_ylabel('sec')
#print()
for nops in range(min_nops, results.shape[0]):
code = drv.program(lambda asm: qpu_tmu_load_2_slot_1_qpu(asm, nops))
for i in range(results.shape[1]):
with drv.compute_shader_dispatcher() as csd:
X[:] = np.random.randn(*X.shape) / X.shape[1+int(trans)]
Y[:] = 0.0
done[:] = 0
start = time.perf_counter_ns()
csd.dispatch(code, unif.addresses()[0], thread = 8)
bench.wait_address(done)
end = time.perf_counter_ns()
results[nops,i] = end - start
assert np.allclose(Y[0::4], np.sum(X[0::4], axis=1+int(trans)), atol = 1e-4)
assert (Y[1:4] == 0).all()
assert (Y[5:8] == 0).all()
#ax.scatter(np.zeros(results.shape[1])+nops, results[nops], s=1, c='blue')
#print('{:4}/{}\t{:.9f}'.format(nops, results.shape[0], np.sum(results[nops]) / results.shape[1]))
res.append(np.sum(results[nops]) / results.shape[1])
#ax.set_ylim(auto=True)
#ax.set_xlim(min_nops, max_nops)
#fig.savefig(f'benchmarks/tmu_load_2_slot_1_qpu_{unif[2]}_{unif[3]}.png')
return res
def getHwAddr(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', bytes(ifname, 'utf-8')[:15]))
return ':'.join('%02x' % b for b in info[18:24])
#for x in range(0,10):
def main():
#for n in range(0,100):
#
s=int(sys.argv[1])
r=int(sys.argv[2])
#f=sys.argv[2]
mac=getHwAddr('eth0')
results=[]
#results.append(c)
#results.append(f)
results.append(os.popen("vcgencmd measure_temp | cut -d = -f 2 | cut -d \"'\" -f 1").read()[:-1])
results.append(get_QPU_freq(s))
#for i in test_clock():
# results.append(i)
#for i in test_clock():
# results.append(i)
results.append(cpu_hash())
#results.append(os.popen("vcgencmd measure_clock core").read[:-1])
results.append(cpu_random())
results.append(cpu_true_random(r))
for i in sgemm_rnn_naive():
results.append(i)
"""results.append(get_QPU_freq(1))
results.append(get_QPU_freq(2))
results.append(get_QPU_freq(5))
results.append(get_QPU_freq(7))
results.append(get_QPU_freq(8))
results.append(get_QPU_freq(10))
results.append(get_QPU_freq(60))"""
"""results.append(cpu_true_random(1000))
results.append(cpu_true_random(1000000))
results.append(cpu_true_random(100000000))
for i in test_clock():
results.append(i)
for i in test_clock():
results.append(i)
for i in sgemm_rnn_naive():
results.append(i)
for i in summation(length=32 * 1024 * 1024):
results.append(i)
for i in scopy(length=16*1024*1024):
results.append(i)
for i in test_multiple_dispatch_delay():
results.append(i)"""
#for i in test_tmu_load_1_slot_1_qpu():
# results.append(i)
#for i in test_tmu_load_2_slot_1_qpu():
# results.append(i)
results.append(mac)
print(*results, sep=',')
#print(memset(fill=0x5a5a5a5a, length=16 * 1024 * 1024))
if __name__ == "__main__":
main()
| [
"time.sleep",
"bench_helper.BenchHelper",
"time.perf_counter_ns",
"numpy.arange",
"os.popen",
"numpy.random.seed",
"os.urandom",
"time.monotonic",
"numpy.random.randn",
"numpy.set_printoptions",
"numpy.array_equiv",
"videocore6.driver.Driver",
"socket.socket",
"videocore6.pack_unpack",
"... | [((563, 593), 'time.clock_gettime', 'clock_gettime', (['CLOCK_MONOTONIC'], {}), '(CLOCK_MONOTONIC)\n', (576, 593), False, 'from time import clock_gettime, CLOCK_MONOTONIC\n'), ((7391, 7413), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (7411, 7413), False, 'import time\n'), ((19278, 19313), 'bench_helper.BenchHelper', 'BenchHelper', (['"""./libbench_helper.so"""'], {}), "('./libbench_helper.so')\n", (19289, 19313), False, 'from bench_helper import BenchHelper\n'), ((20294, 20329), 'bench_helper.BenchHelper', 'BenchHelper', (['"""./libbench_helper.so"""'], {}), "('./libbench_helper.so')\n", (20305, 20329), False, 'from bench_helper import BenchHelper\n'), ((23482, 23517), 'bench_helper.BenchHelper', 'BenchHelper', (['"""./libbench_helper.so"""'], {}), "('./libbench_helper.so')\n", (23493, 23517), False, 'from bench_helper import BenchHelper\n'), ((27326, 27361), 'bench_helper.BenchHelper', 'BenchHelper', (['"""./libbench_helper.so"""'], {}), "('./libbench_helper.so')\n", (27337, 27361), False, 'from bench_helper import BenchHelper\n'), ((29749, 29797), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (29762, 29797), False, 'import socket\n'), ((5458, 5466), 'videocore6.driver.Driver', 'Driver', ([], {}), '()\n', (5464, 5466), False, 'from videocore6.driver import Driver\n'), ((5706, 5723), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5720, 5723), True, 'import numpy as np\n'), ((5740, 5757), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (5755, 5757), True, 'import numpy as np\n'), ((5773, 5790), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (5788, 5790), True, 'import numpy as np\n'), ((6046, 6068), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (6066, 6068), False, 'import time\n'), ((6975, 6997), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (6995, 6997), False, 'import time\n'), ((7121, 7158), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (7140, 7158), True, 'import numpy as np\n'), ((7474, 7496), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (7494, 7496), False, 'import time\n'), ((11399, 11441), 'videocore6.driver.Driver', 'Driver', ([], {'data_area_size': '((length + 1024) * 4)'}), '(data_area_size=(length + 1024) * 4)\n', (11405, 11441), False, 'from videocore6.driver import Driver\n'), ((11740, 11772), 'numpy.arange', 'np.arange', (['length'], {'dtype': 'X.dtype'}), '(length, dtype=X.dtype)\n', (11749, 11772), True, 'import numpy as np\n'), ((11977, 11999), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (11997, 11999), False, 'import time\n'), ((12078, 12100), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (12098, 12100), False, 'import time\n'), ((15052, 15098), 'videocore6.driver.Driver', 'Driver', ([], {'data_area_size': '((length * 2 + 1024) * 4)'}), '(data_area_size=(length * 2 + 1024) * 4)\n', (15058, 15098), False, 'from videocore6.driver import Driver\n'), ((15388, 15422), 'numpy.arange', 'np.arange', (['*X.shape'], {'dtype': 'X.dtype'}), '(*X.shape, dtype=X.dtype)\n', (15397, 15422), True, 'import numpy as np\n'), ((15639, 15661), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (15659, 15661), False, 'import time\n'), ((15740, 15762), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (15760, 15762), False, 'import time\n'), ((15779, 15799), 'numpy.array_equal', 'np.array_equal', (['X', 'Y'], {}), '(X, Y)\n', (15793, 15799), True, 'import numpy as np\n'), ((18182, 18224), 'videocore6.driver.Driver', 'Driver', ([], {'data_area_size': '((length + 1024) * 4)'}), '(data_area_size=(length + 1024) * 4)\n', (18188, 18224), False, 'from videocore6.driver import Driver\n'), ((18663, 18674), 'time.monotonic', 'monotonic', ([], {}), '()\n', (18672, 18674), False, 'from time import monotonic\n'), ((18753, 18764), 'time.monotonic', 'monotonic', ([], {}), '()\n', (18762, 18764), False, 'from time import monotonic\n'), ((18781, 18804), 'numpy.array_equiv', 'np.array_equiv', (['X', 'fill'], {}), '(X, fill)\n', (18795, 18804), True, 'import numpy as np\n'), ((19324, 19332), 'videocore6.driver.Driver', 'Driver', ([], {}), '()\n', (19330, 19332), False, 'from videocore6.driver import Driver\n'), ((20340, 20348), 'videocore6.driver.Driver', 'Driver', ([], {}), '()\n', (20346, 20348), False, 'from videocore6.driver import Driver\n'), ((20735, 20757), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (20755, 20757), False, 'import time\n'), ((20933, 20955), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (20953, 20955), False, 'import time\n'), ((21083, 21123), 'numpy.zeros', 'np.zeros', (['data.shape[0]'], {'dtype': '"""float32"""'}), "(data.shape[0], dtype='float32')\n", (21091, 21123), True, 'import numpy as np\n'), ((21595, 21635), 'numpy.zeros', 'np.zeros', (['data.shape[0]'], {'dtype': '"""float32"""'}), "(data.shape[0], dtype='float32')\n", (21603, 21635), True, 'import numpy as np\n'), ((6147, 6169), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (6167, 6169), False, 'import time\n'), ((7081, 7103), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (7101, 7103), False, 'import time\n'), ((7627, 7642), 'time.sleep', 'time.sleep', (['seg'], {}), '(seg)\n', (7637, 7642), False, 'import time\n'), ((7867, 7882), 'random.random', 'random.random', ([], {}), '()\n', (7880, 7882), False, 'import random\n'), ((8148, 8161), 'os.urandom', 'os.urandom', (['n'], {}), '(n)\n', (8158, 8161), False, 'import os\n'), ((15461, 15481), 'numpy.array_equal', 'np.array_equal', (['X', 'Y'], {}), '(X, Y)\n', (15475, 15481), True, 'import numpy as np\n'), ((18494, 18517), 'numpy.array_equiv', 'np.array_equiv', (['X', 'fill'], {}), '(X, fill)\n', (18508, 18517), True, 'import numpy as np\n'), ((19650, 19672), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (19670, 19672), False, 'import time\n'), ((19780, 19802), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (19800, 19802), False, 'import time\n'), ((22148, 22169), 'numpy.sum', 'np.sum', (['naive_results'], {}), '(naive_results)\n', (22154, 22169), True, 'import numpy as np\n'), ((22170, 22191), 'numpy.sum', 'np.sum', (['sleep_results'], {}), '(sleep_results)\n', (22176, 22191), True, 'import numpy as np\n'), ((23577, 23585), 'videocore6.driver.Driver', 'Driver', ([], {}), '()\n', (23583, 23585), False, 'from videocore6.driver import Driver\n'), ((24115, 24149), 'numpy.zeros', 'np.zeros', (['(1, 10)'], {'dtype': '"""float32"""'}), "((1, 10), dtype='float32')\n", (24123, 24149), True, 'import numpy as np\n'), ((27455, 27463), 'videocore6.driver.Driver', 'Driver', ([], {}), '()\n', (27461, 27463), False, 'from videocore6.driver import Driver\n'), ((28010, 28051), 'numpy.zeros', 'np.zeros', (['(max_nops, 10)'], {'dtype': '"""float32"""'}), "((max_nops, 10), dtype='float32')\n", (28018, 28051), True, 'import numpy as np\n'), ((5807, 5832), 'numpy.random.randn', 'np.random.randn', (['*A.shape'], {}), '(*A.shape)\n', (5822, 5832), True, 'import numpy as np\n'), ((5865, 5890), 'numpy.random.randn', 'np.random.randn', (['*B.shape'], {}), '(*B.shape)\n', (5880, 5890), True, 'import numpy as np\n'), ((5923, 5948), 'numpy.random.randn', 'np.random.randn', (['*C.shape'], {}), '(*C.shape)\n', (5938, 5948), True, 'import numpy as np\n'), ((21272, 21294), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (21292, 21294), False, 'import time\n'), ((21419, 21441), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (21439, 21441), False, 'import time\n'), ((21776, 21789), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (21786, 21789), False, 'import time\n'), ((21814, 21836), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (21834, 21836), False, 'import time\n'), ((21961, 21983), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (21981, 21983), False, 'import time\n'), ((6587, 6623), 'videocore6.pack_unpack', 'pack_unpack', (['"""f"""', '"""I"""', '[alpha, beta]'], {}), "('f', 'I', [alpha, beta])\n", (6598, 6623), False, 'from videocore6 import pack_unpack\n'), ((30169, 30237), 'os.popen', 'os.popen', (['"""vcgencmd measure_temp | cut -d = -f 2 | cut -d "\'" -f 1"""'], {}), '(\'vcgencmd measure_temp | cut -d = -f 2 | cut -d "\\\'" -f 1\')\n', (30177, 30237), False, 'import os\n'), ((24863, 24885), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (24883, 24885), False, 'import time\n'), ((25041, 25063), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (25061, 25063), False, 'import time\n'), ((25442, 25463), 'numpy.sum', 'np.sum', (['results[nops]'], {}), '(results[nops])\n', (25448, 25463), True, 'import numpy as np\n'), ((28798, 28820), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (28818, 28820), False, 'import time\n'), ((28976, 28998), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (28996, 28998), False, 'import time\n'), ((29493, 29514), 'numpy.sum', 'np.sum', (['results[nops]'], {}), '(results[nops])\n', (29499, 29514), True, 'import numpy as np\n'), ((20980, 21004), 'numpy.arange', 'np.arange', (['data.shape[0]'], {}), '(data.shape[0])\n', (20989, 21004), True, 'import numpy as np\n'), ((21513, 21537), 'numpy.arange', 'np.arange', (['data.shape[0]'], {}), '(data.shape[0])\n', (21522, 21537), True, 'import numpy as np\n'), ((22055, 22079), 'numpy.arange', 'np.arange', (['data.shape[0]'], {}), '(data.shape[0])\n', (22064, 22079), True, 'import numpy as np\n'), ((24711, 24736), 'numpy.random.randn', 'np.random.randn', (['*X.shape'], {}), '(*X.shape)\n', (24726, 24736), True, 'import numpy as np\n'), ((28644, 28669), 'numpy.random.randn', 'np.random.randn', (['*X.shape'], {}), '(*X.shape)\n', (28659, 28669), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib.animation as anim
import math
import numpy as np
import os
import glob
import sys
import gaussianFit as gF
import RL
import Accelerated as ac
import projections as proj
import GMM_sample as GMMs
import scipy
from scipy.stats import entropy
class OMDPI:
def __init__(self, RL):
self.RL = RL
timeStep = self.RL.n*self.RL.times
dimension = self.RL.n_dims
rfs = self.RL.n_rfs
reps = self.RL.reps
timeIndex = range(timeStep)
self.G = np.zeros((reps,dimension,timeStep,rfs))
self.psi = np.zeros((reps,timeStep,rfs))
def init(self):
self.f = open(self.RL.path+'/'+'Entropy-'+self.RL.MethodMode+'.csv', 'a')
timeStep = self.RL.n*self.RL.times
# muはrepsに依存しないため,1とした
self.mu0=np.zeros([1, self.RL.n_dims, timeStep, self.RL.n_rfs])
self.mu0[0,:,0,:]=self.RL.meanIni+np.zeros([self.RL.n_dims, self.RL.n_rfs])
#self.mu0[0,:,0,:]=np.array([[.55, .25, .2]]).T
#TODO 念のためtimestep全体にコピー
for t in range(timeStep):
self.mu0[0,:,t,:]=self.mu0[0,:,0,:]
self.distroIni=[\
[self.RL.lambdakIni, self.mu0],\
[1.0-self.RL.lambdakIni, self.mu0]]
# AMDクラスの初期化
self.MDmethod=self.__amd_init(self.RL.MethodMode)
# パラメータの初期化
self._tmp = [np.random.rand() for _ in range(self.RL.reps)]
self.x0 = self.ztilde0 = np.array(self._tmp)/sum(self._tmp)
self.x=self.x0
self.ztilde=self.ztilde0
self.distro=self.distroIni
self.lambdak=self.RL.lambdakIni
def __amd_init(self, methodMode):
# dimension: num of Rollouts
d = self.RL.reps
precision = 1e-10
epsilon = .3
# Simplex constrained projections
psExpS = proj.SimplexProjectionExpSort(dimension = d, epsilon = epsilon)
psExpS0 = proj.SimplexProjectionExpSort(dimension = d, epsilon = 0)
h = self.RL.h
r = 3
p1 = psExpS
p2 = psExpS0
s1 = h*p1.epsilon/(1.0+d*p1.epsilon)
s2 = h
#x0 = np.random.rand(d).T
x0 = np.ones(d).T
x0 = x0/np.sum(x0)
if(methodMode == 'acc'):
method = ac.AcceleratedMethod(p1, p2, s1, s2, r, x0, 'accelerated descent')
#method = ac.AcceleratedMethodWithRestartGradScheme(p1, p2, s1, s2, r, x0, 'gradient restart')
elif(methodMode == 'norm'):
method = ac.MDMethod(p2, s2, x0, 'mirror descent')
else:
error('unknown method mode in OMDPI')
return method
def __approximate(self, data, xtilde, ztilde, lambdak, params1, params2):
'''
mux, sigmax = gF.solver2(np.squeeze(data), xtilde)
muz, sigmaz = gF.solver2(np.squeeze(data), ztilde)
mux1 = np.array([[[[mux]]] for i in range(10)])
muz1 = np.array([[[[muz]]] for i in range(10)])
params=[[lambdak, mux1, sigmax], [1.0-lambdak, muz1, sigmaz]]
return params
'''
reps = self.RL.reps
timestep = self.RL.n*self.RL.times
mean1 = np.squeeze(np.reshape(params1[1][0,:,0,:],[1,-1]))
mean2 = np.squeeze(np.reshape(params2[1][0,:,0,:],[1,-1]))
Delta1=0
Delta2=0
#theta1=0
#theta2=0
for i in range(reps):
_data = np.squeeze(np.reshape(data[i,:,0,:],[1,-1]))
epsilon1 = _data-mean1
epsilon2 = _data-mean2
# ノイズの期待値を計算
Delta1 += epsilon1*xtilde[i]
Delta2 += epsilon2*ztilde[i]
#theta1 += _data*xtilde[i]
#theta2 += _data*ztilde[i]
# θの更新
theta1=np.add(mean1,Delta1)
theta2=np.add(mean2,Delta2)
theta11=np.reshape(theta1,(1, self.RL.n_dims, 1, self.RL.n_rfs))
theta22=np.reshape(theta2,(1, self.RL.n_dims, 1, self.RL.n_rfs))
#TODO ごり押しでtimestep分コピー
theta11=np.tile(theta11[:], (timestep,1))
theta22=np.tile(theta22[:], (timestep,1))
# weight, meanを更新
params=[[lambdak, theta11], [1.0-lambdak, theta22]]
return params
def __update(self, data, distro, stdEps, mean, r):
rfs = self.RL.n_rfs
dims = self.RL.n_dims
valMat = stdEps**2*np.matrix(np.identity(dims*rfs))
# 累積報酬和を計算
S=np.rot90(np.rot90((np.cumsum(np.rot90(np.rot90(r.T)), 0))))
# weight, mean, variance
distroX = [distro[0][0],distro[0][1], valMat]
distroZ = [distro[1][0],distro[1][1], valMat]
# xの離散分布を求める
x = GMMs.data_GMMs_likelihood(data, distroX, distroZ)
#xtilde = GMMs.data_normalized_likelihood(data, distroX)
# ztildeの離散分布を求める
ztilde = GMMs.data_normalized_likelihood(data, distroZ)
# 正規化
g = S[0,:]
g = (g-min(g))/(max(g)-min(g))
if(not self.RL.skipMode):
self.x = x
self.ztilde = ztilde
# AMDに従って分布を更新
if(self.RL.ztildeXequal):
x, xtilde, ztilde, lambdak = self.MDmethod.step(g, np.ones_like(self.x)/sum(np.ones_like(self.x)), np.ones_like(self.ztilde)/sum(np.ones_like(self.ztilde)))
else:
x, xtilde, ztilde, lambdak = self.MDmethod.step(g, self.x, self.ztilde)
if(self.RL.skipMode):
self.x = x
self.ztilde = ztilde
self.f.write(str(entropy(self.x, self.ztilde, 2))+'\n')
#self.f.write(str(entropy(x, ztilde, 2))+'\n')
# print('x: '+str(self.x))
# print('ztilde: '+str(self.ztilde))
# print('entropy: '+str(entropy(self.x, self.ztilde, 2)))
# 連続分布を更新
return self.__approximate(data, xtilde, ztilde, lambdak, distroX, distroZ)
def act_and_train(self, obs, reward):
return action
def __rollouts(self, reps, distro, stdEps):
timeStep = self.RL.n*self.RL.times
rfs = self.RL.n_rfs
dims = self.RL.n_dims
valMat = stdEps**2*np.matrix(np.identity(dims*rfs))
# weight, mean, variance
distro1 = [distro[0][0],distro[0][1],valMat]
distro2 = [distro[1][0],distro[1][1],valMat]
mean=np.zeros([reps, dims, timeStep, rfs])
data=np.zeros([reps, dims, timeStep, rfs])
for k in range(reps):
# 2つの分布からサンプリング
data[k,:,0,:], mean[k,:,0,:]=GMMs.GMM_sample(distro1, distro2)
for t in range(timeStep):
data[:,:,t,:]=data[:,:,0,:]
mean[:,:,t,:]=mean[:,:,0,:]
R=np.zeros((reps,timeStep))
for k in range(reps):
R[k] = self.RL.task.step(reps, mean, data - mean, self.G, self.psi, k)
return np.array(R), mean, data
def act(self, obs, gDof, _epsilon, theta, t):
dof = self.RL.n_dims
xi, dxi, ddxi = obs[t]
# Todo: 10 = rfs
action = np.zeros((dof,10))
#報酬の計算(t=100というのは経由点を通りすぎて欲しい時間)
for d in range(dof):
gTg=np.sum(np.array(gDof[d])*np.array(gDof[d]))
gTeps=np.array(gDof[d])*np.array(_epsilon[d][t])
Meps=gDof[d]*gTeps/(gTg+1.e-10)
action[d] = theta[d]+Meps
return action
def simulate(self, reps, step):
distro=self.distro
# 探索ノイズ
noiseMult = float(self.RL.updates-step)/float(self.RL.updates)
noiseMult = np.max((0.1, noiseMult))
stdEps = self.RL.std*noiseMult
# ロールアウト
r,mean,data = self.__rollouts(reps, distro, stdEps)
# 更新
if(reps>1): distro = self.__update(data, distro, stdEps, mean, r)
# ノイズのないロールアウト
r,_,_ = self.__rollouts(1, distro, 0)
# コストの計算
self.RL.cost[step] = np.sum(r)
self.distro = distro
| [
"numpy.random.rand",
"GMM_sample.GMM_sample",
"numpy.array",
"numpy.rot90",
"numpy.reshape",
"GMM_sample.data_normalized_likelihood",
"numpy.max",
"Accelerated.AcceleratedMethod",
"projections.SimplexProjectionExpSort",
"numpy.identity",
"numpy.tile",
"scipy.stats.entropy",
"numpy.add",
"n... | [((573, 615), 'numpy.zeros', 'np.zeros', (['(reps, dimension, timeStep, rfs)'], {}), '((reps, dimension, timeStep, rfs))\n', (581, 615), True, 'import numpy as np\n'), ((640, 671), 'numpy.zeros', 'np.zeros', (['(reps, timeStep, rfs)'], {}), '((reps, timeStep, rfs))\n', (648, 671), True, 'import numpy as np\n'), ((872, 926), 'numpy.zeros', 'np.zeros', (['[1, self.RL.n_dims, timeStep, self.RL.n_rfs]'], {}), '([1, self.RL.n_dims, timeStep, self.RL.n_rfs])\n', (880, 926), True, 'import numpy as np\n'), ((1894, 1953), 'projections.SimplexProjectionExpSort', 'proj.SimplexProjectionExpSort', ([], {'dimension': 'd', 'epsilon': 'epsilon'}), '(dimension=d, epsilon=epsilon)\n', (1923, 1953), True, 'import projections as proj\n'), ((1976, 2029), 'projections.SimplexProjectionExpSort', 'proj.SimplexProjectionExpSort', ([], {'dimension': 'd', 'epsilon': '(0)'}), '(dimension=d, epsilon=0)\n', (2005, 2029), True, 'import projections as proj\n'), ((3793, 3814), 'numpy.add', 'np.add', (['mean1', 'Delta1'], {}), '(mean1, Delta1)\n', (3799, 3814), True, 'import numpy as np\n'), ((3829, 3850), 'numpy.add', 'np.add', (['mean2', 'Delta2'], {}), '(mean2, Delta2)\n', (3835, 3850), True, 'import numpy as np\n'), ((3867, 3924), 'numpy.reshape', 'np.reshape', (['theta1', '(1, self.RL.n_dims, 1, self.RL.n_rfs)'], {}), '(theta1, (1, self.RL.n_dims, 1, self.RL.n_rfs))\n', (3877, 3924), True, 'import numpy as np\n'), ((3940, 3997), 'numpy.reshape', 'np.reshape', (['theta2', '(1, self.RL.n_dims, 1, self.RL.n_rfs)'], {}), '(theta2, (1, self.RL.n_dims, 1, self.RL.n_rfs))\n', (3950, 3997), True, 'import numpy as np\n'), ((4045, 4079), 'numpy.tile', 'np.tile', (['theta11[:]', '(timestep, 1)'], {}), '(theta11[:], (timestep, 1))\n', (4052, 4079), True, 'import numpy as np\n'), ((4095, 4129), 'numpy.tile', 'np.tile', (['theta22[:]', '(timestep, 1)'], {}), '(theta22[:], (timestep, 1))\n', (4102, 4129), True, 'import numpy as np\n'), ((4679, 4728), 'GMM_sample.data_GMMs_likelihood', 'GMMs.data_GMMs_likelihood', (['data', 'distroX', 'distroZ'], {}), '(data, distroX, distroZ)\n', (4704, 4728), True, 'import GMM_sample as GMMs\n'), ((4846, 4892), 'GMM_sample.data_normalized_likelihood', 'GMMs.data_normalized_likelihood', (['data', 'distroZ'], {}), '(data, distroZ)\n', (4877, 4892), True, 'import GMM_sample as GMMs\n'), ((6268, 6305), 'numpy.zeros', 'np.zeros', (['[reps, dims, timeStep, rfs]'], {}), '([reps, dims, timeStep, rfs])\n', (6276, 6305), True, 'import numpy as np\n'), ((6319, 6356), 'numpy.zeros', 'np.zeros', (['[reps, dims, timeStep, rfs]'], {}), '([reps, dims, timeStep, rfs])\n', (6327, 6356), True, 'import numpy as np\n'), ((6614, 6640), 'numpy.zeros', 'np.zeros', (['(reps, timeStep)'], {}), '((reps, timeStep))\n', (6622, 6640), True, 'import numpy as np\n'), ((6945, 6964), 'numpy.zeros', 'np.zeros', (['(dof, 10)'], {}), '((dof, 10))\n', (6953, 6964), True, 'import numpy as np\n'), ((7443, 7467), 'numpy.max', 'np.max', (['(0.1, noiseMult)'], {}), '((0.1, noiseMult))\n', (7449, 7467), True, 'import numpy as np\n'), ((7790, 7799), 'numpy.sum', 'np.sum', (['r'], {}), '(r)\n', (7796, 7799), True, 'import numpy as np\n'), ((969, 1010), 'numpy.zeros', 'np.zeros', (['[self.RL.n_dims, self.RL.n_rfs]'], {}), '([self.RL.n_dims, self.RL.n_rfs])\n', (977, 1010), True, 'import numpy as np\n'), ((1432, 1448), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1446, 1448), True, 'import numpy as np\n'), ((1512, 1531), 'numpy.array', 'np.array', (['self._tmp'], {}), '(self._tmp)\n', (1520, 1531), True, 'import numpy as np\n'), ((2245, 2255), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (2252, 2255), True, 'import numpy as np\n'), ((2274, 2284), 'numpy.sum', 'np.sum', (['x0'], {}), '(x0)\n', (2280, 2284), True, 'import numpy as np\n'), ((2340, 2406), 'Accelerated.AcceleratedMethod', 'ac.AcceleratedMethod', (['p1', 'p2', 's1', 's2', 'r', 'x0', '"""accelerated descent"""'], {}), "(p1, p2, s1, s2, r, x0, 'accelerated descent')\n", (2360, 2406), True, 'import Accelerated as ac\n'), ((3232, 3275), 'numpy.reshape', 'np.reshape', (['params1[1][0, :, 0, :]', '[1, -1]'], {}), '(params1[1][0, :, 0, :], [1, -1])\n', (3242, 3275), True, 'import numpy as np\n'), ((3299, 3342), 'numpy.reshape', 'np.reshape', (['params2[1][0, :, 0, :]', '[1, -1]'], {}), '(params2[1][0, :, 0, :], [1, -1])\n', (3309, 3342), True, 'import numpy as np\n'), ((6456, 6489), 'GMM_sample.GMM_sample', 'GMMs.GMM_sample', (['distro1', 'distro2'], {}), '(distro1, distro2)\n', (6471, 6489), True, 'import GMM_sample as GMMs\n'), ((6768, 6779), 'numpy.array', 'np.array', (['R'], {}), '(R)\n', (6776, 6779), True, 'import numpy as np\n'), ((2571, 2612), 'Accelerated.MDMethod', 'ac.MDMethod', (['p2', 's2', 'x0', '"""mirror descent"""'], {}), "(p2, s2, x0, 'mirror descent')\n", (2582, 2612), True, 'import Accelerated as ac\n'), ((3472, 3509), 'numpy.reshape', 'np.reshape', (['data[i, :, 0, :]', '[1, -1]'], {}), '(data[i, :, 0, :], [1, -1])\n', (3482, 3509), True, 'import numpy as np\n'), ((4389, 4412), 'numpy.identity', 'np.identity', (['(dims * rfs)'], {}), '(dims * rfs)\n', (4400, 4412), True, 'import numpy as np\n'), ((6092, 6115), 'numpy.identity', 'np.identity', (['(dims * rfs)'], {}), '(dims * rfs)\n', (6103, 6115), True, 'import numpy as np\n'), ((7112, 7129), 'numpy.array', 'np.array', (['gDof[d]'], {}), '(gDof[d])\n', (7120, 7129), True, 'import numpy as np\n'), ((7130, 7154), 'numpy.array', 'np.array', (['_epsilon[d][t]'], {}), '(_epsilon[d][t])\n', (7138, 7154), True, 'import numpy as np\n'), ((5177, 5197), 'numpy.ones_like', 'np.ones_like', (['self.x'], {}), '(self.x)\n', (5189, 5197), True, 'import numpy as np\n'), ((5225, 5250), 'numpy.ones_like', 'np.ones_like', (['self.ztilde'], {}), '(self.ztilde)\n', (5237, 5250), True, 'import numpy as np\n'), ((5492, 5523), 'scipy.stats.entropy', 'entropy', (['self.x', 'self.ztilde', '(2)'], {}), '(self.x, self.ztilde, 2)\n', (5499, 5523), False, 'from scipy.stats import entropy\n'), ((7057, 7074), 'numpy.array', 'np.array', (['gDof[d]'], {}), '(gDof[d])\n', (7065, 7074), True, 'import numpy as np\n'), ((7075, 7092), 'numpy.array', 'np.array', (['gDof[d]'], {}), '(gDof[d])\n', (7083, 7092), True, 'import numpy as np\n'), ((4480, 4493), 'numpy.rot90', 'np.rot90', (['r.T'], {}), '(r.T)\n', (4488, 4493), True, 'import numpy as np\n'), ((5202, 5222), 'numpy.ones_like', 'np.ones_like', (['self.x'], {}), '(self.x)\n', (5214, 5222), True, 'import numpy as np\n'), ((5255, 5280), 'numpy.ones_like', 'np.ones_like', (['self.ztilde'], {}), '(self.ztilde)\n', (5267, 5280), True, 'import numpy as np\n')] |
import numpy as np
from collections import Counter
from termcolor import colored
from pyfiglet import *
print(colored("Advent of Code - Day 19", "yellow").center(80, "-"))
print(colored(figlet_format("Beacon Scanner",font="small",justify="center"), 'green'))
print(colored("Output","yellow").center(80, "-"))
r = []
r.append(lambda x,y,z: np.array([x,y,z]))
r.append(lambda x,y,z: np.array([x,-z,y]))
r.append(lambda x,y,z: np.array([x,-y,-z]))
r.append(lambda x,y,z: np.array([x,z,-y]))
r.append(lambda x,y,z: np.array([-x,y,-z]))
r.append(lambda x,y,z: np.array([-x,-z,-y]))
r.append(lambda x,y,z: np.array([-x,-y,z]))
r.append(lambda x,y,z: np.array([-x,z,y]))
r.append(lambda y,z,x: np.array([x,y,z]))
r.append(lambda y,z,x: np.array([x,-z,y]))
r.append(lambda y,z,x: np.array([x,-y,-z]))
r.append(lambda y,z,x: np.array([x,z,-y]))
r.append(lambda y,z,x: np.array([-x,y,-z]))
r.append(lambda y,z,x: np.array([-x,-z,-y]))
r.append(lambda y,z,x: np.array([-x,-y,z]))
r.append(lambda y,z,x: np.array([-x,z,y]))
r.append(lambda z,x,y: np.array([x,y,z]))
r.append(lambda z,x,y: np.array([x,-z,y]))
r.append(lambda z,x,y: np.array([x,-y,-z]))
r.append(lambda z,x,y: np.array([x,z,-y]))
r.append(lambda z,x,y: np.array([-x,y,-z]))
r.append(lambda z,x,y: np.array([-x,-z,-y]))
r.append(lambda z,x,y: np.array([-x,-y,z]))
r.append(lambda z,x,y: np.array([-x,z,y]))
data = [x for x in open("../Input/day19.txt", "r").read().splitlines()]
scanners = []
for d in data:
if 'scanner' in d:
beacons = []
if ',' in d:
beacons.append([int(x) for x in d.split(',')])
if len(d) == 0:
scanners.append(beacons)
scanners.append(beacons)
signal_id = 0
signal_pos = [0,0,0]
beacons = set()
signals = {}
threshold = 12
signals[signal_id] = signal_pos
beacons.update([tuple(x) for x in scanners[0]])
while len(signals) < len(scanners):
for signal_id, reference in enumerate(scanners):
if signal_id in signals:
dist1 = np.array([[np.abs(np.subtract(x,y)).sum() for x in reference] for y in reference])
for i, s in enumerate(scanners):
if i not in signals and i != signal_id:
# print(f'Checking sensor {i}')
dist2 = np.array([[np.abs(np.subtract(x,y)).sum() for x in s] for y in s])
overlaps = []
for n1, row1 in enumerate(dist1):
for n2, row2 in enumerate(dist2):
o = list((Counter(row1) & Counter(row2)).elements())
if len(o) >= threshold:
overlaps.append((n1, n2, len(o)))
break
if len(overlaps) >= threshold:
originals = [reference[x[0]] for x in overlaps]
for rot in r:
trans = [rot(*s[x[1]]) for x in overlaps]
diff = [tuple(x-y) for x,y in zip(originals, trans)]
if len(set(diff)) == 1:
signal_pos = list(diff[0])
signals[i] = signal_pos
scanners[i] = [list(signal_pos + rot(*x)) for x in s]
beacons.update([tuple(x) for x in scanners[i]])
break
print('\nPuzzle 1: ', len(beacons))
r = []
r.append(lambda x,y,z: np.array([x,y,z]))
r.append(lambda x,y,z: np.array([x,-z,y]))
r.append(lambda x,y,z: np.array([x,-y,-z]))
r.append(lambda x,y,z: np.array([x,z,-y]))
r.append(lambda x,y,z: np.array([-x,y,-z]))
r.append(lambda x,y,z: np.array([-x,-z,-y]))
r.append(lambda x,y,z: np.array([-x,-y,z]))
r.append(lambda x,y,z: np.array([-x,z,y]))
r.append(lambda y,z,x: np.array([x,y,z]))
r.append(lambda y,z,x: np.array([x,-z,y]))
r.append(lambda y,z,x: np.array([x,-y,-z]))
r.append(lambda y,z,x: np.array([x,z,-y]))
r.append(lambda y,z,x: np.array([-x,y,-z]))
r.append(lambda y,z,x: np.array([-x,-z,-y]))
r.append(lambda y,z,x: np.array([-x,-y,z]))
r.append(lambda y,z,x: np.array([-x,z,y]))
r.append(lambda z,x,y: np.array([x,y,z]))
r.append(lambda z,x,y: np.array([x,-z,y]))
r.append(lambda z,x,y: np.array([x,-y,-z]))
r.append(lambda z,x,y: np.array([x,z,-y]))
r.append(lambda z,x,y: np.array([-x,y,-z]))
r.append(lambda z,x,y: np.array([-x,-z,-y]))
r.append(lambda z,x,y: np.array([-x,-y,z]))
r.append(lambda z,x,y: np.array([-x,z,y]))
data = [x for x in open("../Input/day19.txt", "r").read().splitlines()]
scanners = []
for d in data:
if 'scanner' in d:
beacons = []
if ',' in d:
beacons.append([int(x) for x in d.split(',')])
if len(d) == 0:
scanners.append(beacons)
scanners.append(beacons)
signal_id = 0
signal_pos = [0,0,0]
beacons = set()
signals = {}
threshold = 12
signals[signal_id] = signal_pos
beacons.update([tuple(x) for x in scanners[0]])
while len(signals) < len(scanners):
for signal_id, reference in enumerate(scanners):
if signal_id in signals:
dist1 = np.array([[np.abs(np.subtract(x,y)).sum() for x in reference] for y in reference])
for i, s in enumerate(scanners):
if i not in signals and i != signal_id:
# print(f'Checking sensor {i}')
dist2 = np.array([[np.abs(np.subtract(x,y)).sum() for x in s] for y in s])
overlaps = []
for n1, row1 in enumerate(dist1):
for n2, row2 in enumerate(dist2):
o = list((Counter(row1) & Counter(row2)).elements())
if len(o) >= threshold:
overlaps.append((n1, n2, len(o)))
break
if len(overlaps) >= threshold:
originals = [reference[x[0]] for x in overlaps]
for rot in r:
trans = [rot(*s[x[1]]) for x in overlaps]
diff = [tuple(x-y) for x,y in zip(originals, trans)]
if len(set(diff)) == 1:
signal_pos = list(diff[0])
signals[i] = signal_pos
scanners[i] = [list(signal_pos + rot(*x)) for x in s]
beacons.update([tuple(x) for x in scanners[i]])
break
max_dist = 0
for signal1 in signals.values():
for signal2 in signals.values():
max_dist = max(max_dist, sum([abs(x-y) for x,y in zip(signal1, signal2)]))
print('Puzzle 2: ', max_dist,end='\n\n')
print(colored("=".center(71, "="), "yellow")) | [
"collections.Counter",
"numpy.array",
"termcolor.colored",
"numpy.subtract"
] | [((340, 359), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (348, 359), True, 'import numpy as np\n'), ((383, 403), 'numpy.array', 'np.array', (['[x, -z, y]'], {}), '([x, -z, y])\n', (391, 403), True, 'import numpy as np\n'), ((427, 448), 'numpy.array', 'np.array', (['[x, -y, -z]'], {}), '([x, -y, -z])\n', (435, 448), True, 'import numpy as np\n'), ((472, 492), 'numpy.array', 'np.array', (['[x, z, -y]'], {}), '([x, z, -y])\n', (480, 492), True, 'import numpy as np\n'), ((516, 537), 'numpy.array', 'np.array', (['[-x, y, -z]'], {}), '([-x, y, -z])\n', (524, 537), True, 'import numpy as np\n'), ((561, 583), 'numpy.array', 'np.array', (['[-x, -z, -y]'], {}), '([-x, -z, -y])\n', (569, 583), True, 'import numpy as np\n'), ((607, 628), 'numpy.array', 'np.array', (['[-x, -y, z]'], {}), '([-x, -y, z])\n', (615, 628), True, 'import numpy as np\n'), ((652, 672), 'numpy.array', 'np.array', (['[-x, z, y]'], {}), '([-x, z, y])\n', (660, 672), True, 'import numpy as np\n'), ((697, 716), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (705, 716), True, 'import numpy as np\n'), ((740, 760), 'numpy.array', 'np.array', (['[x, -z, y]'], {}), '([x, -z, y])\n', (748, 760), True, 'import numpy as np\n'), ((784, 805), 'numpy.array', 'np.array', (['[x, -y, -z]'], {}), '([x, -y, -z])\n', (792, 805), True, 'import numpy as np\n'), ((829, 849), 'numpy.array', 'np.array', (['[x, z, -y]'], {}), '([x, z, -y])\n', (837, 849), True, 'import numpy as np\n'), ((873, 894), 'numpy.array', 'np.array', (['[-x, y, -z]'], {}), '([-x, y, -z])\n', (881, 894), True, 'import numpy as np\n'), ((918, 940), 'numpy.array', 'np.array', (['[-x, -z, -y]'], {}), '([-x, -z, -y])\n', (926, 940), True, 'import numpy as np\n'), ((964, 985), 'numpy.array', 'np.array', (['[-x, -y, z]'], {}), '([-x, -y, z])\n', (972, 985), True, 'import numpy as np\n'), ((1009, 1029), 'numpy.array', 'np.array', (['[-x, z, y]'], {}), '([-x, z, y])\n', (1017, 1029), True, 'import numpy as np\n'), ((1054, 1073), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (1062, 1073), True, 'import numpy as np\n'), ((1097, 1117), 'numpy.array', 'np.array', (['[x, -z, y]'], {}), '([x, -z, y])\n', (1105, 1117), True, 'import numpy as np\n'), ((1141, 1162), 'numpy.array', 'np.array', (['[x, -y, -z]'], {}), '([x, -y, -z])\n', (1149, 1162), True, 'import numpy as np\n'), ((1186, 1206), 'numpy.array', 'np.array', (['[x, z, -y]'], {}), '([x, z, -y])\n', (1194, 1206), True, 'import numpy as np\n'), ((1230, 1251), 'numpy.array', 'np.array', (['[-x, y, -z]'], {}), '([-x, y, -z])\n', (1238, 1251), True, 'import numpy as np\n'), ((1275, 1297), 'numpy.array', 'np.array', (['[-x, -z, -y]'], {}), '([-x, -z, -y])\n', (1283, 1297), True, 'import numpy as np\n'), ((1321, 1342), 'numpy.array', 'np.array', (['[-x, -y, z]'], {}), '([-x, -y, z])\n', (1329, 1342), True, 'import numpy as np\n'), ((1366, 1386), 'numpy.array', 'np.array', (['[-x, z, y]'], {}), '([-x, z, y])\n', (1374, 1386), True, 'import numpy as np\n'), ((3454, 3473), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (3462, 3473), True, 'import numpy as np\n'), ((3497, 3517), 'numpy.array', 'np.array', (['[x, -z, y]'], {}), '([x, -z, y])\n', (3505, 3517), True, 'import numpy as np\n'), ((3541, 3562), 'numpy.array', 'np.array', (['[x, -y, -z]'], {}), '([x, -y, -z])\n', (3549, 3562), True, 'import numpy as np\n'), ((3586, 3606), 'numpy.array', 'np.array', (['[x, z, -y]'], {}), '([x, z, -y])\n', (3594, 3606), True, 'import numpy as np\n'), ((3630, 3651), 'numpy.array', 'np.array', (['[-x, y, -z]'], {}), '([-x, y, -z])\n', (3638, 3651), True, 'import numpy as np\n'), ((3675, 3697), 'numpy.array', 'np.array', (['[-x, -z, -y]'], {}), '([-x, -z, -y])\n', (3683, 3697), True, 'import numpy as np\n'), ((3721, 3742), 'numpy.array', 'np.array', (['[-x, -y, z]'], {}), '([-x, -y, z])\n', (3729, 3742), True, 'import numpy as np\n'), ((3766, 3786), 'numpy.array', 'np.array', (['[-x, z, y]'], {}), '([-x, z, y])\n', (3774, 3786), True, 'import numpy as np\n'), ((3811, 3830), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (3819, 3830), True, 'import numpy as np\n'), ((3854, 3874), 'numpy.array', 'np.array', (['[x, -z, y]'], {}), '([x, -z, y])\n', (3862, 3874), True, 'import numpy as np\n'), ((3898, 3919), 'numpy.array', 'np.array', (['[x, -y, -z]'], {}), '([x, -y, -z])\n', (3906, 3919), True, 'import numpy as np\n'), ((3943, 3963), 'numpy.array', 'np.array', (['[x, z, -y]'], {}), '([x, z, -y])\n', (3951, 3963), True, 'import numpy as np\n'), ((3987, 4008), 'numpy.array', 'np.array', (['[-x, y, -z]'], {}), '([-x, y, -z])\n', (3995, 4008), True, 'import numpy as np\n'), ((4032, 4054), 'numpy.array', 'np.array', (['[-x, -z, -y]'], {}), '([-x, -z, -y])\n', (4040, 4054), True, 'import numpy as np\n'), ((4078, 4099), 'numpy.array', 'np.array', (['[-x, -y, z]'], {}), '([-x, -y, z])\n', (4086, 4099), True, 'import numpy as np\n'), ((4123, 4143), 'numpy.array', 'np.array', (['[-x, z, y]'], {}), '([-x, z, y])\n', (4131, 4143), True, 'import numpy as np\n'), ((4168, 4187), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (4176, 4187), True, 'import numpy as np\n'), ((4211, 4231), 'numpy.array', 'np.array', (['[x, -z, y]'], {}), '([x, -z, y])\n', (4219, 4231), True, 'import numpy as np\n'), ((4255, 4276), 'numpy.array', 'np.array', (['[x, -y, -z]'], {}), '([x, -y, -z])\n', (4263, 4276), True, 'import numpy as np\n'), ((4300, 4320), 'numpy.array', 'np.array', (['[x, z, -y]'], {}), '([x, z, -y])\n', (4308, 4320), True, 'import numpy as np\n'), ((4344, 4365), 'numpy.array', 'np.array', (['[-x, y, -z]'], {}), '([-x, y, -z])\n', (4352, 4365), True, 'import numpy as np\n'), ((4389, 4411), 'numpy.array', 'np.array', (['[-x, -z, -y]'], {}), '([-x, -z, -y])\n', (4397, 4411), True, 'import numpy as np\n'), ((4435, 4456), 'numpy.array', 'np.array', (['[-x, -y, z]'], {}), '([-x, -y, z])\n', (4443, 4456), True, 'import numpy as np\n'), ((4480, 4500), 'numpy.array', 'np.array', (['[-x, z, y]'], {}), '([-x, z, y])\n', (4488, 4500), True, 'import numpy as np\n'), ((110, 154), 'termcolor.colored', 'colored', (['"""Advent of Code - Day 19"""', '"""yellow"""'], {}), "('Advent of Code - Day 19', 'yellow')\n", (117, 154), False, 'from termcolor import colored\n'), ((265, 292), 'termcolor.colored', 'colored', (['"""Output"""', '"""yellow"""'], {}), "('Output', 'yellow')\n", (272, 292), False, 'from termcolor import colored\n'), ((2007, 2024), 'numpy.subtract', 'np.subtract', (['x', 'y'], {}), '(x, y)\n', (2018, 2024), True, 'import numpy as np\n'), ((5121, 5138), 'numpy.subtract', 'np.subtract', (['x', 'y'], {}), '(x, y)\n', (5132, 5138), True, 'import numpy as np\n'), ((2271, 2288), 'numpy.subtract', 'np.subtract', (['x', 'y'], {}), '(x, y)\n', (2282, 2288), True, 'import numpy as np\n'), ((2504, 2517), 'collections.Counter', 'Counter', (['row1'], {}), '(row1)\n', (2511, 2517), False, 'from collections import Counter\n'), ((2520, 2533), 'collections.Counter', 'Counter', (['row2'], {}), '(row2)\n', (2527, 2533), False, 'from collections import Counter\n'), ((5385, 5402), 'numpy.subtract', 'np.subtract', (['x', 'y'], {}), '(x, y)\n', (5396, 5402), True, 'import numpy as np\n'), ((5618, 5631), 'collections.Counter', 'Counter', (['row1'], {}), '(row1)\n', (5625, 5631), False, 'from collections import Counter\n'), ((5634, 5647), 'collections.Counter', 'Counter', (['row2'], {}), '(row2)\n', (5641, 5647), False, 'from collections import Counter\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from keras import backend as K
from keras import initializers
from keras import regularizers, constraints
from keras.engine import Layer, InputSpec
if K._BACKEND == 'tensorflow':
import tensorflow as tf
def logsumexp(x, axis=None):
'''Returns `log(sum(exp(x), axis=axis))` with improved numerical stability.
'''
return tf.reduce_logsumexp(x, axis=[axis])
def batch_gather(reference, indices):
'''Batchwise gathering of row indices.
The numpy equivalent is reference[np.arange(batch_size), indices].
# Arguments
reference: tensor with ndim >= 2 of shape
(batch_size, dim1, dim2, ..., dimN)
indices: 1d integer tensor of shape (batch_size) satisfiying
0 <= i < dim2 for each element i.
# Returns
A tensor with shape (batch_size, dim2, ..., dimN)
equal to reference[1:batch_size, indices]
'''
batch_size = K.shape(reference)[0]
indices = tf.stack([tf.range(batch_size), indices], axis=1)
return tf.gather_nd(reference, indices)
else:
import theano.tensor as T
def logsumexp(x, axis=None):
'''Returns `log(sum(exp(x), axis=axis))` with improved numerical stability.
'''
xmax = K.max(x, axis=axis, keepdims=True)
xmax_ = K.max(x, axis=axis)
return xmax_ + K.log(K.sum(K.exp(x - xmax), axis=axis))
def batch_gather(reference, indices):
'''Batchwise gathering of row indices.
The numpy equivalent is reference[np.arange(batch_size), indices],
# Arguments
reference: tensor with ndim >= 2 of shape
(batch_size, dim1, dim2, ..., dimN)
indices: 1d integer tensor of shape (batch_size) satisfiying
0 <= i < dim2 for each element i.
# Returns
A tensor with shape (batch_size, dim2, ..., dimN)
equal to reference[1:batch_size, indices]
'''
batch_size = K.shape(reference)[0]
return reference[T.arange(batch_size), indices]
def path_energy(y, x, U, b_start=None, b_end=None, mask=None):
'''Calculates the energy of a tag path y for a given input x (with mask),
transition energies U and boundary energies b_start, b_end.'''
x = add_boundary_energy(x, b_start, b_end, mask)
return path_energy0(y, x, U, mask)
def path_energy0(y, x, U, mask=None):
'''Path energy without boundary potential handling.'''
n_classes = K.shape(x)[2]
y_one_hot = K.one_hot(y, n_classes)
# Tag path energy
energy = K.sum(x * y_one_hot, 2)
energy = K.sum(energy, 1)
# Transition energy
y_t = y[:, :-1]
y_tp1 = y[:, 1:]
U_flat = K.reshape(U, [-1])
# Convert 2-dim indices (y_t, y_tp1) of U to 1-dim indices of U_flat:
flat_indices = y_t * n_classes + y_tp1
U_y_t_tp1 = K.gather(U_flat, flat_indices)
if mask is not None:
mask = K.cast(mask, K.floatx())
y_t_mask = mask[:, :-1]
y_tp1_mask = mask[:, 1:]
U_y_t_tp1 *= y_t_mask * y_tp1_mask
energy += K.sum(U_y_t_tp1, axis=1)
return energy
def sparse_chain_crf_loss(y, x, U, b_start=None, b_end=None, mask=None):
'''Given the true sparsely encoded tag sequence y, input x (with mask),
transition energies U, boundary energies b_start and b_end, it computes
the loss function of a Linear Chain Conditional Random Field:
loss(y, x) = NNL(P(y|x)), where P(y|x) = exp(E(y, x)) / Z.
So, loss(y, x) = - E(y, x) + log(Z)
Here, E(y, x) is the tag path energy, and Z is the normalization constant.
The values log(Z) is also called free energy.
'''
x = add_boundary_energy(x, b_start, b_end, mask)
energy = path_energy0(y, x, U, mask)
energy -= free_energy0(x, U, mask)
return K.expand_dims(-energy, -1)
def chain_crf_loss(y, x, U, b_start=None, b_end=None, mask=None):
'''Variant of sparse_chain_crf_loss but with one-hot encoded tags y.'''
y_sparse = K.argmax(y, -1)
y_sparse = K.cast(y_sparse, 'int32')
return sparse_chain_crf_loss(y_sparse, x, U, b_start, b_end, mask)
def add_boundary_energy(x, b_start=None, b_end=None, mask=None):
'''Given the observations x, it adds the start boundary energy b_start (resp.
end boundary energy b_end on the start (resp. end) elements and multiplies
the mask.'''
if mask is None:
if b_start is not None:
x = K.concatenate([x[:, :1, :] + b_start, x[:, 1:, :]], axis=1)
if b_end is not None:
x = K.concatenate([x[:, :-1, :], x[:, -1:, :] + b_end], axis=1)
else:
mask = K.cast(mask, K.floatx())
mask = K.expand_dims(mask, 2)
x *= mask
if b_start is not None:
mask_r = K.concatenate([K.zeros_like(mask[:, :1]), mask[:, :-1]], axis=1)
start_mask = K.cast(K.greater(mask, mask_r), K.floatx())
x = x + start_mask * b_start
if b_end is not None:
mask_l = K.concatenate([mask[:, 1:], K.zeros_like(mask[:, -1:])], axis=1)
end_mask = K.cast(K.greater(mask, mask_l), K.floatx())
x = x + end_mask * b_end
return x
def viterbi_decode(x, U, b_start=None, b_end=None, mask=None):
'''Computes the best tag sequence y for a given input x, i.e. the one that
maximizes the value of path_energy.'''
x = add_boundary_energy(x, b_start, b_end, mask)
alpha_0 = x[:, 0, :]
gamma_0 = K.zeros_like(alpha_0)
initial_states = [gamma_0, alpha_0]
_, gamma = _forward(x,
lambda B: [K.cast(K.argmax(B, axis=1), K.floatx()), K.max(B, axis=1)],
initial_states,
U,
mask)
y = _backward(gamma, mask)
return y
def free_energy(x, U, b_start=None, b_end=None, mask=None):
'''Computes efficiently the sum of all path energies for input x, when
runs over all possible tag sequences.'''
x = add_boundary_energy(x, b_start, b_end, mask)
return free_energy0(x, U, mask)
def free_energy0(x, U, mask=None):
'''Free energy without boundary potential handling.'''
initial_states = [x[:, 0, :]]
last_alpha, _ = _forward(x,
lambda B: [logsumexp(B, axis=1)],
initial_states,
U,
mask)
return last_alpha[:, 0]
def _forward(x, reduce_step, initial_states, U, mask=None):
'''Forward recurrence of the linear chain crf.'''
def _forward_step(energy_matrix_t, states):
alpha_tm1 = states[-1]
new_states = reduce_step(K.expand_dims(alpha_tm1, 2) + energy_matrix_t)
return new_states[0], new_states
U_shared = K.expand_dims(K.expand_dims(U, 0), 0)
if mask is not None:
mask = K.cast(mask, K.floatx())
mask_U = K.expand_dims(K.expand_dims(mask[:, :-1] * mask[:, 1:], 2), 3)
U_shared = U_shared * mask_U
inputs = K.expand_dims(x[:, 1:, :], 2) + U_shared
inputs = K.concatenate([inputs, K.zeros_like(inputs[:, -1:, :, :])], axis=1)
last, values, _ = K.rnn(_forward_step, inputs, initial_states)
return last, values
def _backward(gamma, mask):
'''Backward recurrence of the linear chain crf.'''
gamma = K.cast(gamma, 'int32')
def _backward_step(gamma_t, states):
y_tm1 = K.squeeze(states[0], 0)
y_t = batch_gather(gamma_t, y_tm1)
return y_t, [K.expand_dims(y_t, 0)]
initial_states = [K.expand_dims(K.zeros_like(gamma[:, 0, 0]), 0)]
_, y_rev, _ = K.rnn(_backward_step,
gamma,
initial_states,
go_backwards=True)
y = K.reverse(y_rev, 1)
if mask is not None:
mask = K.cast(mask, dtype='int32')
# mask output
y *= mask
# set masked values to -1
y += -(1 - mask)
return y
class ChainCRF(Layer):
'''A Linear Chain Conditional Random Field output layer.
It carries the loss function and its weights for computing
the global tag sequence scores. While training it acts as
the identity function that passes the inputs to the subsequently
used loss function. While testing it applies Viterbi decoding
and returns the best scoring tag sequence as one-hot encoded vectors.
# Arguments
init: weight initialization function for chain energies U.
Can be the name of an existing function (str),
or a Theano function (see: [initializations](../initializations.md)).
U_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the transition weight matrix.
b_start_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the start bias b.
b_end_regularizer: instance of [WeightRegularizer](../regularizers.md)
module, applied to the end bias b.
b_start_constraint: instance of the [constraints](../constraints.md)
module, applied to the start bias b.
b_end_regularizer: instance of the [constraints](../constraints.md)
module, applied to the end bias b.
weights: list of Numpy arrays for initializing [U, b_start, b_end].
Thus it should be a list of 3 elements of shape
[(n_classes, n_classes), (n_classes, ), (n_classes, )]
# Input shape
3D tensor with shape `(nb_samples, timesteps, nb_classes)`, where
´timesteps >= 2`and `nb_classes >= 2`.
# Output shape
Same shape as input.
# Masking
This layer supports masking for input sequences of variable length.
# Example
```python
# As the last layer of sequential layer with
# model.output_shape == (None, timesteps, nb_classes)
crf = ChainCRF()
model.add(crf)
# now: model.output_shape == (None, timesteps, nb_classes)
# Compile model with chain crf loss (and one-hot encoded labels) and accuracy
model.compile(loss=crf.loss, optimizer='sgd', metrics=['accuracy'])
# Alternatively, compile model with sparsely encoded labels and sparse accuracy:
model.compile(loss=crf.sparse_loss, optimizer='sgd', metrics=['sparse_categorical_accuracy'])
```
# Gotchas
## Model loading
When you want to load a saved model that has a crf output, then loading
the model with 'keras.models.load_model' won't work properly because
the reference of the loss function to the transition parameters is lost. To
fix this, you need to use the parameter 'custom_objects' as follows:
```python
from keras.layer.crf import create_custom_objects:
model = keras.models.load_model(filename, custom_objects=create_custom_objects())
```
## Temporal sample weights
Given a ChainCRF instance crf both loss functions, crf.loss and crf.sparse_loss
return a tensor of shape (batch_size, 1) and not (batch_size, maxlen).
that sample weighting in temporal mode.
'''
def __init__(self, init='glorot_uniform',
U_regularizer=None, b_start_regularizer=None, b_end_regularizer=None,
U_constraint=None, b_start_constraint=None, b_end_constraint=None,
weights=None,
**kwargs):
self.supports_masking = True
self.uses_learning_phase = True
self.input_spec = [InputSpec(ndim=3)]
self.init = initializers.get(init)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_start_regularizer = regularizers.get(b_start_regularizer)
self.b_end_regularizer = regularizers.get(b_end_regularizer)
self.U_constraint = constraints.get(U_constraint)
self.b_start_constraint = constraints.get(b_start_constraint)
self.b_end_constraint = constraints.get(b_end_constraint)
self.initial_weights = weights
super(ChainCRF, self).__init__(**kwargs)
def compute_output_shape(self, input_shape): # get_output_shape_for compute_output_shape
assert input_shape and len(input_shape) == 3
return (input_shape[0], input_shape[1], input_shape[2])
def compute_mask(self, input, mask=None):
if mask is not None:
return K.any(mask, axis=1)
return mask
def _fetch_mask(self):
mask = None
if self.inbound_nodes:
mask = self.inbound_nodes[0].input_masks[0]
return mask
def build(self, input_shape):
assert len(input_shape) == 3
n_classes = input_shape[2]
n_steps = input_shape[1]
assert n_classes >= 2
assert n_steps is None or n_steps >= 2
self.input_spec = [InputSpec(dtype=K.floatx(),
shape=(None, n_steps, n_classes))]
self.U = self.add_weight((n_classes, n_classes),
initializer=self.init,
name='{}_U'.format(self.name),
regularizer=self.U_regularizer,
constraint=self.U_constraint)
self.b_start = self.add_weight((n_classes, ),
initializer='zero',
name='{}_b_start'.format(self.name),
regularizer=self.b_start_regularizer,
constraint=self.b_start_constraint)
self.b_end = self.add_weight((n_classes, ),
initializer='zero',
name='{}_b_end'.format(self.name),
regularizer=self.b_end_regularizer,
constraint=self.b_end_constraint)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def call(self, x, mask=None):
y_pred = viterbi_decode(x, self.U, self.b_start, self.b_end, mask)
nb_classes = self.input_spec[0].shape[2]
y_pred_one_hot = K.one_hot(y_pred, nb_classes)
return K.in_train_phase(x, y_pred_one_hot)
def loss(self, y_true, y_pred):
'''Linear Chain Conditional Random Field loss function.
'''
mask = self._fetch_mask()
return chain_crf_loss(y_true, y_pred, self.U, self.b_start, self.b_end, mask)
def sparse_loss(self, y_true, y_pred):
'''Linear Chain Conditional Random Field loss function with sparse
tag sequences.
'''
y_true = K.argmax(y_true, -1)
y_true = K.cast(y_true, 'int32')
# y_true = K.squeeze(y_true, 2)
mask = self._fetch_mask()
return sparse_chain_crf_loss(y_true, y_pred, self.U, self.b_start, self.b_end, mask)
def get_config(self):
config = {'init': self.init,
'U_regularizer': self.U_regularizer.get_config() if self.U_regularizer else None,
'b_start_regularizer': self.b_start_regularizer.get_config() if self.b_start_regularizer else None,
'b_end_regularizer': self.b_end_regularizer.get_config() if self.b_end_regularizer else None,
'U_constraint': self.U_constraint.get_config() if self.U_constraint else None,
'b_start_constraint': self.b_start_constraint.get_config() if self.b_start_constraint else None,
'b_end_constraint': self.b_end_constraint.get_config() if self.b_end_constraint else None,
}
base_config = super(ChainCRF, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def create_custom_objects():
'''Returns the custom objects, needed for loading a persisted model.'''
instanceHolder = {'instance': None}
class ClassWrapper(ChainCRF):
def __init__(self, *args, **kwargs):
instanceHolder['instance'] = self
super(ClassWrapper, self).__init__(*args, **kwargs)
def loss(*args):
method = getattr(instanceHolder['instance'], 'loss')
return method(*args)
def sparse_loss(*args):
method = getattr(instanceHolder['instance'], 'sparse_loss')
return method(*args)
return {'ChainCRF': ClassWrapper, 'loss': loss, 'sparse_loss': sparse_loss}
if __name__ == '__main__':
from keras.models import Sequential, Model
from keras.layers import Embedding, Dense
from keras.models import load_model
import numpy as np
from keras.layers import Input, concatenate
vocab_size = 20
n_classes = 11
'''
when use crf.sparse_loss, error in squeeze dimension 2.
when use different ChainCRF layer, ValueError: None values not supported.
'''
emb = Embedding(vocab_size, n_classes)
inp = Input(shape=(2,))
x = emb(inp)
inp2 = Input(shape=(2,))
xx = emb(inp2)
inp_aux = Input(shape=(2,))
x_aux = emb(inp_aux)
inp2_aux = Input(shape=(2,))
xx_aux = emb(inp2_aux)
main_input = concatenate([x, x_aux], axis=-1)
main_input = Dense(11)(main_input)
aux_input = concatenate([xx, xx_aux], axis=-1)
aux_input = Dense(11)(aux_input)
crf = ChainCRF(name='main')
out1 = crf(main_input)
crf = ChainCRF(name='aux')
out2 = crf(aux_input)
model = Model([inp, inp2, inp_aux, inp2_aux], [out1, out2])
# model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd') # ok
model.compile(loss={'main':crf.sparse_loss, 'aux':crf.sparse_loss}, optimizer='sgd') # ok
model.summary()
# inp = Input(shape=(2,))
# x = Embedding(vocab_size, n_classes)(inp)
# layer1 = ChainCRF(name='main')
# x1 = layer1(x)
# layer2 = ChainCRF(name='aux')
# x2 = layer2(x)
# model = Model(inp, [x1, x2])
# model.compile(loss=[layer1.sparse_loss, layer2.sparse_loss], optimizer='sgd') # ValueError: None values not supported.
# Train first mini batch
batch_size, maxlen = 2, 2
x = np.random.randint(1, vocab_size, size=(batch_size, maxlen))
y = np.random.randint(n_classes, size=(batch_size, maxlen))
# y = np.expand_dims(y, -1)
y = np.eye(n_classes)[y]
# model.train_on_batch(x, y)
model.fit([x,x,x,x], [y,y])
print(x)
print(y)
model.save('../model/tmpModel2.h5')
model = load_model('../model/tmpModel2.h5', custom_objects=create_custom_objects())
print(model.predict(x=[x, x, x, x]))
print('\nok')
| [
"keras.engine.InputSpec",
"keras.backend.shape",
"keras.backend.sum",
"keras.backend.reshape",
"keras.backend.floatx",
"keras.backend.squeeze",
"keras.layers.Dense",
"keras.backend.greater",
"keras.backend.max",
"theano.tensor.arange",
"keras.layers.concatenate",
"keras.backend.rnn",
"keras.... | [((2677, 2700), 'keras.backend.one_hot', 'K.one_hot', (['y', 'n_classes'], {}), '(y, n_classes)\n', (2686, 2700), True, 'from keras import backend as K\n'), ((2737, 2760), 'keras.backend.sum', 'K.sum', (['(x * y_one_hot)', '(2)'], {}), '(x * y_one_hot, 2)\n', (2742, 2760), True, 'from keras import backend as K\n'), ((2774, 2790), 'keras.backend.sum', 'K.sum', (['energy', '(1)'], {}), '(energy, 1)\n', (2779, 2790), True, 'from keras import backend as K\n'), ((2870, 2888), 'keras.backend.reshape', 'K.reshape', (['U', '[-1]'], {}), '(U, [-1])\n', (2879, 2888), True, 'from keras import backend as K\n'), ((3022, 3052), 'keras.backend.gather', 'K.gather', (['U_flat', 'flat_indices'], {}), '(U_flat, flat_indices)\n', (3030, 3052), True, 'from keras import backend as K\n'), ((3242, 3266), 'keras.backend.sum', 'K.sum', (['U_y_t_tp1'], {'axis': '(1)'}), '(U_y_t_tp1, axis=1)\n', (3247, 3266), True, 'from keras import backend as K\n'), ((3963, 3989), 'keras.backend.expand_dims', 'K.expand_dims', (['(-energy)', '(-1)'], {}), '(-energy, -1)\n', (3976, 3989), True, 'from keras import backend as K\n'), ((4149, 4164), 'keras.backend.argmax', 'K.argmax', (['y', '(-1)'], {}), '(y, -1)\n', (4157, 4164), True, 'from keras import backend as K\n'), ((4180, 4205), 'keras.backend.cast', 'K.cast', (['y_sparse', '"""int32"""'], {}), "(y_sparse, 'int32')\n", (4186, 4205), True, 'from keras import backend as K\n'), ((5604, 5625), 'keras.backend.zeros_like', 'K.zeros_like', (['alpha_0'], {}), '(alpha_0)\n', (5616, 5625), True, 'from keras import backend as K\n'), ((7278, 7322), 'keras.backend.rnn', 'K.rnn', (['_forward_step', 'inputs', 'initial_states'], {}), '(_forward_step, inputs, initial_states)\n', (7283, 7322), True, 'from keras import backend as K\n'), ((7444, 7466), 'keras.backend.cast', 'K.cast', (['gamma', '"""int32"""'], {}), "(gamma, 'int32')\n", (7450, 7466), True, 'from keras import backend as K\n'), ((7725, 7788), 'keras.backend.rnn', 'K.rnn', (['_backward_step', 'gamma', 'initial_states'], {'go_backwards': '(True)'}), '(_backward_step, gamma, initial_states, go_backwards=True)\n', (7730, 7788), True, 'from keras import backend as K\n'), ((7869, 7888), 'keras.backend.reverse', 'K.reverse', (['y_rev', '(1)'], {}), '(y_rev, 1)\n', (7878, 7888), True, 'from keras import backend as K\n'), ((16945, 16977), 'keras.layers.Embedding', 'Embedding', (['vocab_size', 'n_classes'], {}), '(vocab_size, n_classes)\n', (16954, 16977), False, 'from keras.layers import Embedding, Dense\n'), ((16989, 17006), 'keras.layers.Input', 'Input', ([], {'shape': '(2,)'}), '(shape=(2,))\n', (16994, 17006), False, 'from keras.layers import Input, concatenate\n'), ((17035, 17052), 'keras.layers.Input', 'Input', ([], {'shape': '(2,)'}), '(shape=(2,))\n', (17040, 17052), False, 'from keras.layers import Input, concatenate\n'), ((17086, 17103), 'keras.layers.Input', 'Input', ([], {'shape': '(2,)'}), '(shape=(2,))\n', (17091, 17103), False, 'from keras.layers import Input, concatenate\n'), ((17144, 17161), 'keras.layers.Input', 'Input', ([], {'shape': '(2,)'}), '(shape=(2,))\n', (17149, 17161), False, 'from keras.layers import Input, concatenate\n'), ((17207, 17239), 'keras.layers.concatenate', 'concatenate', (['[x, x_aux]'], {'axis': '(-1)'}), '([x, x_aux], axis=-1)\n', (17218, 17239), False, 'from keras.layers import Input, concatenate\n'), ((17295, 17329), 'keras.layers.concatenate', 'concatenate', (['[xx, xx_aux]'], {'axis': '(-1)'}), '([xx, xx_aux], axis=-1)\n', (17306, 17329), False, 'from keras.layers import Input, concatenate\n'), ((17496, 17547), 'keras.models.Model', 'Model', (['[inp, inp2, inp_aux, inp2_aux]', '[out1, out2]'], {}), '([inp, inp2, inp_aux, inp2_aux], [out1, out2])\n', (17501, 17547), False, 'from keras.models import Sequential, Model\n'), ((18166, 18225), 'numpy.random.randint', 'np.random.randint', (['(1)', 'vocab_size'], {'size': '(batch_size, maxlen)'}), '(1, vocab_size, size=(batch_size, maxlen))\n', (18183, 18225), True, 'import numpy as np\n'), ((18234, 18289), 'numpy.random.randint', 'np.random.randint', (['n_classes'], {'size': '(batch_size, maxlen)'}), '(n_classes, size=(batch_size, maxlen))\n', (18251, 18289), True, 'import numpy as np\n'), ((462, 497), 'tensorflow.reduce_logsumexp', 'tf.reduce_logsumexp', (['x'], {'axis': '[axis]'}), '(x, axis=[axis])\n', (481, 497), True, 'import tensorflow as tf\n'), ((1204, 1236), 'tensorflow.gather_nd', 'tf.gather_nd', (['reference', 'indices'], {}), '(reference, indices)\n', (1216, 1236), True, 'import tensorflow as tf\n'), ((1417, 1451), 'keras.backend.max', 'K.max', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), '(x, axis=axis, keepdims=True)\n', (1422, 1451), True, 'from keras import backend as K\n'), ((1468, 1487), 'keras.backend.max', 'K.max', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (1473, 1487), True, 'from keras import backend as K\n'), ((2647, 2657), 'keras.backend.shape', 'K.shape', (['x'], {}), '(x)\n', (2654, 2657), True, 'from keras import backend as K\n'), ((4822, 4844), 'keras.backend.expand_dims', 'K.expand_dims', (['mask', '(2)'], {}), '(mask, 2)\n', (4835, 4844), True, 'from keras import backend as K\n'), ((6912, 6931), 'keras.backend.expand_dims', 'K.expand_dims', (['U', '(0)'], {}), '(U, 0)\n', (6925, 6931), True, 'from keras import backend as K\n'), ((7133, 7162), 'keras.backend.expand_dims', 'K.expand_dims', (['x[:, 1:, :]', '(2)'], {}), '(x[:, 1:, :], 2)\n', (7146, 7162), True, 'from keras import backend as K\n'), ((7525, 7548), 'keras.backend.squeeze', 'K.squeeze', (['states[0]', '(0)'], {}), '(states[0], 0)\n', (7534, 7548), True, 'from keras import backend as K\n'), ((7930, 7957), 'keras.backend.cast', 'K.cast', (['mask'], {'dtype': '"""int32"""'}), "(mask, dtype='int32')\n", (7936, 7957), True, 'from keras import backend as K\n'), ((11595, 11617), 'keras.initializers.get', 'initializers.get', (['init'], {}), '(init)\n', (11611, 11617), False, 'from keras import initializers\n'), ((11648, 11679), 'keras.regularizers.get', 'regularizers.get', (['U_regularizer'], {}), '(U_regularizer)\n', (11664, 11679), False, 'from keras import regularizers, constraints\n'), ((11715, 11752), 'keras.regularizers.get', 'regularizers.get', (['b_start_regularizer'], {}), '(b_start_regularizer)\n', (11731, 11752), False, 'from keras import regularizers, constraints\n'), ((11786, 11821), 'keras.regularizers.get', 'regularizers.get', (['b_end_regularizer'], {}), '(b_end_regularizer)\n', (11802, 11821), False, 'from keras import regularizers, constraints\n'), ((11850, 11879), 'keras.constraints.get', 'constraints.get', (['U_constraint'], {}), '(U_constraint)\n', (11865, 11879), False, 'from keras import regularizers, constraints\n'), ((11914, 11949), 'keras.constraints.get', 'constraints.get', (['b_start_constraint'], {}), '(b_start_constraint)\n', (11929, 11949), False, 'from keras import regularizers, constraints\n'), ((11982, 12015), 'keras.constraints.get', 'constraints.get', (['b_end_constraint'], {}), '(b_end_constraint)\n', (11997, 12015), False, 'from keras import regularizers, constraints\n'), ((14274, 14303), 'keras.backend.one_hot', 'K.one_hot', (['y_pred', 'nb_classes'], {}), '(y_pred, nb_classes)\n', (14283, 14303), True, 'from keras import backend as K\n'), ((14319, 14354), 'keras.backend.in_train_phase', 'K.in_train_phase', (['x', 'y_pred_one_hot'], {}), '(x, y_pred_one_hot)\n', (14335, 14354), True, 'from keras import backend as K\n'), ((14759, 14779), 'keras.backend.argmax', 'K.argmax', (['y_true', '(-1)'], {}), '(y_true, -1)\n', (14767, 14779), True, 'from keras import backend as K\n'), ((14797, 14820), 'keras.backend.cast', 'K.cast', (['y_true', '"""int32"""'], {}), "(y_true, 'int32')\n", (14803, 14820), True, 'from keras import backend as K\n'), ((17257, 17266), 'keras.layers.Dense', 'Dense', (['(11)'], {}), '(11)\n', (17262, 17266), False, 'from keras.layers import Embedding, Dense\n'), ((17346, 17355), 'keras.layers.Dense', 'Dense', (['(11)'], {}), '(11)\n', (17351, 17355), False, 'from keras.layers import Embedding, Dense\n'), ((18330, 18347), 'numpy.eye', 'np.eye', (['n_classes'], {}), '(n_classes)\n', (18336, 18347), True, 'import numpy as np\n'), ((1099, 1117), 'keras.backend.shape', 'K.shape', (['reference'], {}), '(reference)\n', (1106, 1117), True, 'from keras import backend as K\n'), ((2153, 2171), 'keras.backend.shape', 'K.shape', (['reference'], {}), '(reference)\n', (2160, 2171), True, 'from keras import backend as K\n'), ((3107, 3117), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3115, 3117), True, 'from keras import backend as K\n'), ((4591, 4650), 'keras.backend.concatenate', 'K.concatenate', (['[x[:, :1, :] + b_start, x[:, 1:, :]]'], {'axis': '(1)'}), '([x[:, :1, :] + b_start, x[:, 1:, :]], axis=1)\n', (4604, 4650), True, 'from keras import backend as K\n'), ((4697, 4756), 'keras.backend.concatenate', 'K.concatenate', (['[x[:, :-1, :], x[:, -1:, :] + b_end]'], {'axis': '(1)'}), '([x[:, :-1, :], x[:, -1:, :] + b_end], axis=1)\n', (4710, 4756), True, 'from keras import backend as K\n'), ((4795, 4805), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (4803, 4805), True, 'from keras import backend as K\n'), ((6990, 7000), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (6998, 7000), True, 'from keras import backend as K\n'), ((7033, 7077), 'keras.backend.expand_dims', 'K.expand_dims', (['(mask[:, :-1] * mask[:, 1:])', '(2)'], {}), '(mask[:, :-1] * mask[:, 1:], 2)\n', (7046, 7077), True, 'from keras import backend as K\n'), ((7210, 7244), 'keras.backend.zeros_like', 'K.zeros_like', (['inputs[:, -1:, :, :]'], {}), '(inputs[:, -1:, :, :])\n', (7222, 7244), True, 'from keras import backend as K\n'), ((7673, 7701), 'keras.backend.zeros_like', 'K.zeros_like', (['gamma[:, 0, 0]'], {}), '(gamma[:, 0, 0])\n', (7685, 7701), True, 'from keras import backend as K\n'), ((11556, 11573), 'keras.engine.InputSpec', 'InputSpec', ([], {'ndim': '(3)'}), '(ndim=3)\n', (11565, 11573), False, 'from keras.engine import Layer, InputSpec\n'), ((12416, 12435), 'keras.backend.any', 'K.any', (['mask'], {'axis': '(1)'}), '(mask, axis=1)\n', (12421, 12435), True, 'from keras import backend as K\n'), ((1149, 1169), 'tensorflow.range', 'tf.range', (['batch_size'], {}), '(batch_size)\n', (1157, 1169), True, 'import tensorflow as tf\n'), ((2200, 2220), 'theano.tensor.arange', 'T.arange', (['batch_size'], {}), '(batch_size)\n', (2208, 2220), True, 'import theano.tensor as T\n'), ((5013, 5036), 'keras.backend.greater', 'K.greater', (['mask', 'mask_r'], {}), '(mask, mask_r)\n', (5022, 5036), True, 'from keras import backend as K\n'), ((5038, 5048), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (5046, 5048), True, 'from keras import backend as K\n'), ((5237, 5260), 'keras.backend.greater', 'K.greater', (['mask', 'mask_l'], {}), '(mask, mask_l)\n', (5246, 5260), True, 'from keras import backend as K\n'), ((5262, 5272), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (5270, 5272), True, 'from keras import backend as K\n'), ((5769, 5785), 'keras.backend.max', 'K.max', (['B'], {'axis': '(1)'}), '(B, axis=1)\n', (5774, 5785), True, 'from keras import backend as K\n'), ((6794, 6821), 'keras.backend.expand_dims', 'K.expand_dims', (['alpha_tm1', '(2)'], {}), '(alpha_tm1, 2)\n', (6807, 6821), True, 'from keras import backend as K\n'), ((7613, 7634), 'keras.backend.expand_dims', 'K.expand_dims', (['y_t', '(0)'], {}), '(y_t, 0)\n', (7626, 7634), True, 'from keras import backend as K\n'), ((1523, 1538), 'keras.backend.exp', 'K.exp', (['(x - xmax)'], {}), '(x - xmax)\n', (1528, 1538), True, 'from keras import backend as K\n'), ((4931, 4956), 'keras.backend.zeros_like', 'K.zeros_like', (['mask[:, :1]'], {}), '(mask[:, :1])\n', (4943, 4956), True, 'from keras import backend as K\n'), ((5170, 5196), 'keras.backend.zeros_like', 'K.zeros_like', (['mask[:, -1:]'], {}), '(mask[:, -1:])\n', (5182, 5196), True, 'from keras import backend as K\n'), ((5735, 5754), 'keras.backend.argmax', 'K.argmax', (['B'], {'axis': '(1)'}), '(B, axis=1)\n', (5743, 5754), True, 'from keras import backend as K\n'), ((5756, 5766), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (5764, 5766), True, 'from keras import backend as K\n'), ((12871, 12881), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (12879, 12881), True, 'from keras import backend as K\n')] |
import pandas as pd
import numpy as np
import sys
sys.path.append('./')
from data_layer.phoible import PhoibleInfo
from data_layer.parse import read_src_data, get_languages, separate_train, separate_per_language
from util import argparser
def get_symbols(df, field='IPA'):
symbols = set([])
for i, (index, x) in enumerate(df.iterrows()):
symbols |= set([y for y in x[field].split(' ')])
return symbols
def get_lang_len(df, field='IPA'):
lens = []
for i, (index, x) in enumerate(df.iterrows()):
word = x[field].split(' ')
lens += [len(word)]
return np.mean(lens)
def get_lang_ipa_info(df, languages_df, args, field='IPA'):
phoible = PhoibleInfo()
lang_data = []
for lang, lang_df in languages_df.items():
frames = [lang_df['train'], lang_df['val'], lang_df['test']]
full_data = pd.concat(frames)
avg_len = get_lang_len(full_data, field=field)
symbols = get_symbols(full_data, field=field)
consonant, vowel, tone, symbol, unrecognized = phoible.count_types(symbols)
lang_data += [[lang, len(symbols), vowel, consonant, tone, unrecognized, avg_len]]
columns = ['lang', 'inventory', 'vowel', 'consonant', 'tone', 'unrecognized', 'avg_len']
df_info = pd.DataFrame(lang_data, columns=columns)
rfolder = args.rfolder[:-len('orig')]
df_info.to_csv('%s/lang_inventory.csv' % (rfolder))
def main(args):
df = read_src_data(args.ffolder)
languages = get_languages(df)
train_df, val_df, test_df, _ = separate_train(df)
languages_df = separate_per_language(train_df, val_df, test_df, languages)
get_lang_ipa_info(df, languages_df, args, field='IPA')
if __name__ == '__main__':
args = argparser.parse_args(csv_folder='inventory')
assert args.data == 'northeuralex', 'this script should only be run with northeuralex data'
main(args)
| [
"numpy.mean",
"data_layer.parse.read_src_data",
"data_layer.parse.get_languages",
"util.argparser.parse_args",
"pandas.concat",
"data_layer.parse.separate_train",
"pandas.DataFrame",
"data_layer.phoible.PhoibleInfo",
"sys.path.append",
"data_layer.parse.separate_per_language"
] | [((51, 72), 'sys.path.append', 'sys.path.append', (['"""./"""'], {}), "('./')\n", (66, 72), False, 'import sys\n'), ((603, 616), 'numpy.mean', 'np.mean', (['lens'], {}), '(lens)\n', (610, 616), True, 'import numpy as np\n'), ((693, 706), 'data_layer.phoible.PhoibleInfo', 'PhoibleInfo', ([], {}), '()\n', (704, 706), False, 'from data_layer.phoible import PhoibleInfo\n'), ((1275, 1315), 'pandas.DataFrame', 'pd.DataFrame', (['lang_data'], {'columns': 'columns'}), '(lang_data, columns=columns)\n', (1287, 1315), True, 'import pandas as pd\n'), ((1441, 1468), 'data_layer.parse.read_src_data', 'read_src_data', (['args.ffolder'], {}), '(args.ffolder)\n', (1454, 1468), False, 'from data_layer.parse import read_src_data, get_languages, separate_train, separate_per_language\n'), ((1486, 1503), 'data_layer.parse.get_languages', 'get_languages', (['df'], {}), '(df)\n', (1499, 1503), False, 'from data_layer.parse import read_src_data, get_languages, separate_train, separate_per_language\n'), ((1539, 1557), 'data_layer.parse.separate_train', 'separate_train', (['df'], {}), '(df)\n', (1553, 1557), False, 'from data_layer.parse import read_src_data, get_languages, separate_train, separate_per_language\n'), ((1577, 1636), 'data_layer.parse.separate_per_language', 'separate_per_language', (['train_df', 'val_df', 'test_df', 'languages'], {}), '(train_df, val_df, test_df, languages)\n', (1598, 1636), False, 'from data_layer.parse import read_src_data, get_languages, separate_train, separate_per_language\n'), ((1737, 1781), 'util.argparser.parse_args', 'argparser.parse_args', ([], {'csv_folder': '"""inventory"""'}), "(csv_folder='inventory')\n", (1757, 1781), False, 'from util import argparser\n'), ((863, 880), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (872, 880), True, 'import pandas as pd\n')] |
from numpy import sum
from gwlfe.Memoization import memoize
def TotLAEU(NumAnimals, AvgAnimalWt):
result = 0
aeu3 = (NumAnimals[5] * AvgAnimalWt[5]) / 1000
aeu4 = (NumAnimals[4] * AvgAnimalWt[4]) / 1000
aeu5 = (NumAnimals[6] * AvgAnimalWt[6]) / 1000
aeu6 = (NumAnimals[0] * AvgAnimalWt[0]) / 1000
aeu7 = (NumAnimals[1] * AvgAnimalWt[1]) / 1000
result += aeu3 + aeu4 + aeu5 + aeu6 + aeu7
return result
@memoize
def TotLAEU_f(NumAnimals, AvgAnimalWt):
return sum(NumAnimals[[0, 1, 4, 5, 6]] * AvgAnimalWt[[0, 1, 4, 5, 6]] / 1000)
| [
"numpy.sum"
] | [((498, 568), 'numpy.sum', 'sum', (['(NumAnimals[[0, 1, 4, 5, 6]] * AvgAnimalWt[[0, 1, 4, 5, 6]] / 1000)'], {}), '(NumAnimals[[0, 1, 4, 5, 6]] * AvgAnimalWt[[0, 1, 4, 5, 6]] / 1000)\n', (501, 568), False, 'from numpy import sum\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 5 11:34:19 2016
@author: bcolsen
"""
from wtforms import validators
import numpy as np
import re
class DataLength():
def __init__(self, min=-1, max=-1, message=None):
self.min = min
self.max = max
if not message:
message = u'Data must be between %i and %i values long.' % (min, max)
self.message = message
def __call__(self, form, field):
data_list = data_split(field.data)
l = data_list and len(data_list) or 0
if l < self.min or self.max != -1 and l > self.max:
raise validators.ValidationError(self.message)
class DataLengthEqual():
def __init__(self, fieldname, message=None):
self.fieldname = fieldname
if not message:
message = u'Data must be the same length.'
self.message = message
def __call__(self, form, field):
data_list = data_split(field.data)
other_list = data_split(getattr(form, self.fieldname).data)
l = data_list and len(data_list) or 0
o = other_list and len(other_list) or 0
if l != o:
raise validators.ValidationError(self.message)
class DataFloat():
def __init__(self, message=None):
if not message:
message = u'Number cannot be converted to float.'
self.message = message
def __call__(self, form, field):
try:
data_list = data_split(field.data)
np.array(data_list, dtype=float)
except ValueError as err:
raise validators.ValidationError(err)
def data_split(data):
data_list = re.split(r'[\s,]+', data.strip())
try:
if not data_list[0]:
del data_list[0]
if not data_list[-1]:
del data_list[-1]
except IndexError:
return None
else:
return data_list
| [
"numpy.array",
"wtforms.validators.ValidationError"
] | [((633, 673), 'wtforms.validators.ValidationError', 'validators.ValidationError', (['self.message'], {}), '(self.message)\n', (659, 673), False, 'from wtforms import validators\n'), ((1175, 1215), 'wtforms.validators.ValidationError', 'validators.ValidationError', (['self.message'], {}), '(self.message)\n', (1201, 1215), False, 'from wtforms import validators\n'), ((1502, 1534), 'numpy.array', 'np.array', (['data_list'], {'dtype': 'float'}), '(data_list, dtype=float)\n', (1510, 1534), True, 'import numpy as np\n'), ((1587, 1618), 'wtforms.validators.ValidationError', 'validators.ValidationError', (['err'], {}), '(err)\n', (1613, 1618), False, 'from wtforms import validators\n')] |
"""
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from collections import Counter
import string
import numpy
from ..representation import QuestionAnsweringAnnotation, QuestionAnsweringPrediction
from ..representation import QuestionAnsweringEmbeddingAnnotation, QuestionAnsweringEmbeddingPrediction
from ..representation import QuestionAnsweringBiDAFAnnotation
from .metric import PerImageEvaluationMetric, FullDatasetEvaluationMetric
from ..config import NumberField
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s:
return []
return normalize_answer(s).split()
class ScoreF1(PerImageEvaluationMetric):
__provider__ = 'f1'
annotation_types = (QuestionAnsweringAnnotation,)
prediction_types = (QuestionAnsweringPrediction,)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.per_question_results = {}
def update(self, annotation, prediction):
gold_answers = [answer["text"] for answer in annotation.orig_answer_text if normalize_answer(answer["text"])]
if not gold_answers:
gold_answers = ['']
prediction_answer = prediction.tokens[0] if prediction.tokens else ''
max_f1_score = max(self.compute_f1(a, prediction_answer) for a in gold_answers)
current_max_f1_score = self.per_question_results.get(annotation.question_id, 0)
self.per_question_results[annotation.question_id] = max(max_f1_score, current_max_f1_score)
return max_f1_score
@staticmethod
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = Counter(gold_toks) & Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def evaluate(self, annotations, predictions):
return sum(self.per_question_results.values()) / len(self.per_question_results)
def reset(self):
del self.per_question_results
self.per_question_results = {}
class ExactMatchScore(PerImageEvaluationMetric):
__provider__ = 'exact_match'
annotation_types = (QuestionAnsweringAnnotation, QuestionAnsweringBiDAFAnnotation, )
prediction_types = (QuestionAnsweringPrediction, )
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.per_question_results = {}
def update(self, annotation, prediction):
gold_answers = [answer["text"] for answer in annotation.orig_answer_text if normalize_answer(answer["text"])]
if not gold_answers:
gold_answers = ['']
pred_answer = prediction.tokens[0] if prediction.tokens else ''
max_exact_match = max(self.compute_exact(a_gold, pred_answer) for a_gold in gold_answers)
self.per_question_results[annotation.question_id] = max(
max_exact_match, self.per_question_results.get(annotation.question_id, 0)
)
return max_exact_match
@staticmethod
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def evaluate(self, annotations, predictions):
return sum(self.per_question_results.values()) / len(self.per_question_results)
def reset(self):
del self.per_question_results
self.per_question_results = {}
class QuestionAnsweringEmbeddingAccuracy(FullDatasetEvaluationMetric):
__provider__ = 'qa_embedding_accuracy'
annotation_types = (QuestionAnsweringEmbeddingAnnotation,)
prediction_types = (QuestionAnsweringEmbeddingPrediction,)
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'top_k': NumberField(
value_type=int, min_value=1, max_value=1000, default=5, optional=True,
description='Specifies the number of closest context embeddings to check.'
),
})
return parameters
def configure(self):
self.top_k = self.get_value_from_config('top_k')
def evaluate(self, annotations, predictions):
ap_pairs = list(zip(annotations, predictions))
#check data alignment
assert all(
a.identifier is p.identifier
if not isinstance(p.identifier, tuple)
else p.identifier.values
for a, p in ap_pairs), "annotations and predictions are not aligned"
q_pairs = [(a, p) for a, p in ap_pairs if a.context_pos_indetifier is not None]
c_pairs = [(a, p) for a, p in ap_pairs if a.context_pos_indetifier is None]
c_data_identifiers = [a.identifier for a, p in c_pairs]
c_vecs = numpy.array([p.embedding for a, p in c_pairs])
# calc distances from each question to all contexts and check if top_k has true positives
true_pos = 0
for q_a, q_p in q_pairs:
#calc distance between question embedding with all context embeddings
d = c_vecs - q_p.embedding[None, :]
dist = numpy.linalg.norm(d, ord=2, axis=1)
index = dist.argsort()
#check that right context in the list of top_k
c_pos_index = c_data_identifiers.index(q_a.context_pos_indetifier)
if c_pos_index in index[:self.top_k]:
true_pos += 1
return [true_pos/len(q_pairs)] if q_pairs else 0
| [
"re.compile",
"collections.Counter",
"numpy.array",
"numpy.linalg.norm",
"re.sub"
] | [((1158, 1200), 're.compile', 're.compile', (['"""\\\\b(a|an|the)\\\\b"""', 're.UNICODE'], {}), "('\\\\b(a|an|the)\\\\b', re.UNICODE)\n", (1168, 1200), False, 'import re\n'), ((1215, 1239), 're.sub', 're.sub', (['regex', '""" """', 'text'], {}), "(regex, ' ', text)\n", (1221, 1239), False, 'import re\n'), ((6070, 6116), 'numpy.array', 'numpy.array', (['[p.embedding for a, p in c_pairs]'], {}), '([p.embedding for a, p in c_pairs])\n', (6081, 6116), False, 'import numpy\n'), ((2710, 2728), 'collections.Counter', 'Counter', (['gold_toks'], {}), '(gold_toks)\n', (2717, 2728), False, 'from collections import Counter\n'), ((2731, 2749), 'collections.Counter', 'Counter', (['pred_toks'], {}), '(pred_toks)\n', (2738, 2749), False, 'from collections import Counter\n'), ((6420, 6455), 'numpy.linalg.norm', 'numpy.linalg.norm', (['d'], {'ord': '(2)', 'axis': '(1)'}), '(d, ord=2, axis=1)\n', (6437, 6455), False, 'import numpy\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Built-in packages
# External packages
import numpy as np
from matplotlib import pyplot as plt
# import seaborn as sns
# Internal packages
from fynance.backtest.dynamic_plot_backtest import DynaPlotBackTest
from fynance.neural_networks.roll_multi_neural_networks import RollMultiNeuralNet
# Set plot style
plt.style.use('seaborn')
# TODO: Aggregated method
class RollAggrMultiNeuralNet(RollMultiNeuralNet):
""" Rolling Aggregated Multi Neural Networks object allow you to train
several neural networks along training periods (from t - n to t),
predict along testing periods (from t to t + s) and aggregate prediction
following a specified rule and roll along this time axis.
Attributes
----------
y : np.ndarray[np.float32, ndim=2] with shape=(T, 1)
Target to estimate or predict.
X : np.ndarray[np.float32, ndim=2] with shape=(T, N)
Features (inputs).
NN : list of keras.Model
Neural network to train and predict.
y_train : np.ndarray[np.float64, ndim=1]
Prediction on training set.
y_estim : np.ndarray[np.float64, ndim=1]
Prediction on estimating set.
Methods
-------
run(y, X, NN, plot_loss=True, plot_perf=True, x_axis=None)
Train several rolling neural networks along pre-specified training
period and predict along test period. Display loss and performance
if specified.
__call__(y, X, NN, start=0, end=1e8, x_axis=None)
Callable method to set target and features data, neural network
object (Keras object is prefered).
__iter__()
Train and predict along time axis from day number n to last day
number T and by step of size s period.
aggregate(mat_pred, y, t=0, t_s=-1)
Method to aggregate predictions from several neural networks.
set_aggregate(*args)
Set your own aggregation method.
plot_loss(self, f, ax)
Plot loss function
plot_perf(self, f, ax)
Plot perfomances.
See Also
--------
RollNeuralNet, RollMultiNeuralNet, RollMultiRollNeuralNet
"""
def __init__(self, *args, agg_fun='mean', **kwargs):
RollMultiNeuralNet.__init__(self, *args, **kwargs)
self.agg_fun = agg_fun
def __call__(self, y, X, NN, start=0, end=1e8, x_axis=None):
""" Callable method to set terget and features data, neural network
object (Keras object is prefered).
Parameters
----------
y : np.ndarray[ndim=1, dtype=np.float32]
Target to predict.
X : np.ndarray[ndim=2, dtype=np.float32]
Features data.
NN : list of keras.engine.training.Model
Neural network model.
start : int, optional
Starting observation, default is 0.
end : int, optional
Ending observation, default is end.
x_axis : np.ndarray[ndim=1], optional
X-Axis to use for the backtest.
Returns
-------
ramnn : RollAggrMultiNeuralNet
"""
RollMultiNeuralNet.__call__(
self, y, X, NN, start=start, end=end, x_axis=x_axis
)
self.agg_y = np.zeros([self.T, 1])
return self
def run(self, y, X, NN, plot_loss=True, plot_perf=True, x_axis=None):
""" Train several rolling neural networks along pre-specified train
period and predict along test period. Display loss and performance
if specified.
Parameters
----------
y : np.ndarray[np.float32, ndim=2] with shape=(T, 1)
Time series of target to estimate or predict.
X : np.ndarray[np.float32, ndim=2] with shape=(T, N)
Several time series of features.
NN : keras.Model or list of keras.Model
Neural networks to train and predict.
plot_loss : bool, optional
If true dynamic plot of loss function, default is True.
plot_perf : bool, optional
If true dynamic plot of strategy performance, default is True.
x_axis : list or array, optional
x-axis to plot (e.g. list of dates).
Returns
-------
ramnn : RollAggrMultiNeuralNet
"""
if isinstance(NN, list):
self.n_NN = len(NN)
else:
self.n_NN = 1
# Set perf and loss arrays
self.perf_train = self.V0 * np.ones([y.size, self.n_NN])
self.perf_estim = self.V0 * np.ones([y.size, self.n_NN])
self.perf_agg = self.V0 * np.ones([y.size, 1])
# Set axes and figure
f, ax_loss, ax_perf = self._set_figure(plot_loss, plot_perf)
# Start Rolling Neural Network
for pred_train, pred_estim in self(y, X, NN, x_axis=x_axis):
t, s, t_s = self.t, self.s, min(self.t + self.s, self.T)
# Set performances of training period
returns = np.sign(pred_train) * y[t - s: t]
cum_ret = np.exp(np.cumsum(returns, axis=0))
self.perf_train[t - s: t] = self.perf_train[t - s - 1] * cum_ret
# Set performances of estimated period
returns = np.sign(pred_estim) * y[t: t_s]
cum_ret = np.exp(np.cumsum(returns, axis=0))
self.perf_estim[t: t_s] = self.perf_estim[t - 1] * cum_ret
# Aggregate prediction
self.aggregate(pred_estim, y[t: t_s], t=t, t_s=t_s)
returns = np.sign(self.agg_y[t: t_s]) * y[t: t_s]
cum_ret = np.exp(np.cumsum(returns, axis=0))
self.perf_agg[t: t_s] = self.perf_agg[t - 1] * cum_ret
# Plot loss and perf
self._dynamic_plot(f, ax_loss=ax_loss, ax_perf=ax_perf)
return self
def aggregate(self, mat_pred, y, t=0, t_s=-1):
""" Method to aggregate predictions from several neural networks.
Parameters
----------
mat_pred : np.ndarray[np.float32, ndim=2] with shape=(T, n_NN)
Several time series of neural networks predictions.
y : np.ndarray[np.float32, ndim=2] with shape=(T, 1)
Time series of target to estimate or predict.
t : int, optional
First observation, default is first one.
t_s : int, optional
Last observation, default is last one.
Returns
-------
ramnn : RollAggrMultiNeuralNet
"""
self.agg_y[t: t_s, 0] = self._aggregate(mat_pred, y)
return self
def _aggregate(self, mat_pred, y):
""" """
# TODO : find a better aggregation method
if self.agg_fun == 'mean':
return np.mean(mat_pred, axis=1)
elif self.agg_fun == 'sum':
return np.sum(mat_pred, axis=1)
elif self.agg_fun == 'best':
i = np.argmax(self.perf_estim[self.t])
return mat_pred[:, i]
elif self.agg_fun == 'bests':
perfs = self.perf_estim[self.t]
perf_list = []
arg_list = []
for i in range(self.n_NN):
if len(perf_list) < 3:
perf_list += [perfs[i]]
arg_list += [i]
elif perfs[i] > min(perf_list):
j = np.argmin(perf_list)
perf_list[j] = perfs[i]
arg_list[j] = i
else:
pass
y = mat_pred[:, arg_list[0]]
y += mat_pred[:, arg_list[1]]
y += mat_pred[:, arg_list[2]]
y /= 3
return y
# TODO : Make method to customize aggregation function
def set_aggregate(self, *args):
""" Set your own aggregation method.
Parameters
----------
args : tuple of function
Any function such that the final value is a numpy array.
Returns
-------
ramnn : RollAggrMultiNeuralNet
"""
self._aggregate = lambda x: x
for arg in args:
self._aggregate = lambda x: arg(self._aggregate(x))
return self
def plot_perf(self, f, ax):
""" Plot performances method
Parameters
----------
fig : matplotlib.figure.Figure
Figure to display backtest.
ax : matplotlib.axes
Axe(s) to display a part of backtest.
Returns
-------
ramnn : RollAggrMultiNeuralNet
"""
t, t_s = self.t, min(self.t + self.s, self.T)
dpbt = DynaPlotBackTest(
fig=f, ax=ax, title='Model performance', ylabel='Perf.',
xlabel='Date', yscale='log',
tick_params={'axis': 'x', 'rotation': 30, 'labelsize': 10}
)
# Set graphs
dpbt.plot(
self.perf_estim[: t_s], x=self.x_axis[: t_s],
names='Estim NN', col='GnBu', lw=1.7, unit='perf',
)
dpbt.plot(
self.perf_agg[: t_s], x=self.x_axis[: t_s],
names='Aggr NN', col='Reds', lw=2., unit='perf'
)
dpbt.plot(
self.perf_train[: t], x=self.x_axis[: t],
names='Train NN', col='OrRd', lw=1.2, unit='perf'
)
ax.legend(loc='upper left', ncol=2, fontsize=10,
handlelength=0.8, columnspacing=0.5, frameon=True)
return self | [
"numpy.mean",
"fynance.neural_networks.roll_multi_neural_networks.RollMultiNeuralNet.__call__",
"numpy.ones",
"fynance.backtest.dynamic_plot_backtest.DynaPlotBackTest",
"matplotlib.pyplot.style.use",
"fynance.neural_networks.roll_multi_neural_networks.RollMultiNeuralNet.__init__",
"numpy.argmax",
"num... | [((357, 381), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (370, 381), True, 'from matplotlib import pyplot as plt\n'), ((2211, 2261), 'fynance.neural_networks.roll_multi_neural_networks.RollMultiNeuralNet.__init__', 'RollMultiNeuralNet.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (2238, 2261), False, 'from fynance.neural_networks.roll_multi_neural_networks import RollMultiNeuralNet\n'), ((3095, 3180), 'fynance.neural_networks.roll_multi_neural_networks.RollMultiNeuralNet.__call__', 'RollMultiNeuralNet.__call__', (['self', 'y', 'X', 'NN'], {'start': 'start', 'end': 'end', 'x_axis': 'x_axis'}), '(self, y, X, NN, start=start, end=end, x_axis=x_axis\n )\n', (3122, 3180), False, 'from fynance.neural_networks.roll_multi_neural_networks import RollMultiNeuralNet\n'), ((3219, 3240), 'numpy.zeros', 'np.zeros', (['[self.T, 1]'], {}), '([self.T, 1])\n', (3227, 3240), True, 'import numpy as np\n'), ((8585, 8755), 'fynance.backtest.dynamic_plot_backtest.DynaPlotBackTest', 'DynaPlotBackTest', ([], {'fig': 'f', 'ax': 'ax', 'title': '"""Model performance"""', 'ylabel': '"""Perf."""', 'xlabel': '"""Date"""', 'yscale': '"""log"""', 'tick_params': "{'axis': 'x', 'rotation': 30, 'labelsize': 10}"}), "(fig=f, ax=ax, title='Model performance', ylabel='Perf.',\n xlabel='Date', yscale='log', tick_params={'axis': 'x', 'rotation': 30,\n 'labelsize': 10})\n", (8601, 8755), False, 'from fynance.backtest.dynamic_plot_backtest import DynaPlotBackTest\n'), ((4468, 4496), 'numpy.ones', 'np.ones', (['[y.size, self.n_NN]'], {}), '([y.size, self.n_NN])\n', (4475, 4496), True, 'import numpy as np\n'), ((4533, 4561), 'numpy.ones', 'np.ones', (['[y.size, self.n_NN]'], {}), '([y.size, self.n_NN])\n', (4540, 4561), True, 'import numpy as np\n'), ((4596, 4616), 'numpy.ones', 'np.ones', (['[y.size, 1]'], {}), '([y.size, 1])\n', (4603, 4616), True, 'import numpy as np\n'), ((6718, 6743), 'numpy.mean', 'np.mean', (['mat_pred'], {'axis': '(1)'}), '(mat_pred, axis=1)\n', (6725, 6743), True, 'import numpy as np\n'), ((4980, 4999), 'numpy.sign', 'np.sign', (['pred_train'], {}), '(pred_train)\n', (4987, 4999), True, 'import numpy as np\n'), ((5043, 5069), 'numpy.cumsum', 'np.cumsum', (['returns'], {'axis': '(0)'}), '(returns, axis=0)\n', (5052, 5069), True, 'import numpy as np\n'), ((5222, 5241), 'numpy.sign', 'np.sign', (['pred_estim'], {}), '(pred_estim)\n', (5229, 5241), True, 'import numpy as np\n'), ((5283, 5309), 'numpy.cumsum', 'np.cumsum', (['returns'], {'axis': '(0)'}), '(returns, axis=0)\n', (5292, 5309), True, 'import numpy as np\n'), ((5516, 5542), 'numpy.sign', 'np.sign', (['self.agg_y[t:t_s]'], {}), '(self.agg_y[t:t_s])\n', (5523, 5542), True, 'import numpy as np\n'), ((5585, 5611), 'numpy.cumsum', 'np.cumsum', (['returns'], {'axis': '(0)'}), '(returns, axis=0)\n', (5594, 5611), True, 'import numpy as np\n'), ((6799, 6823), 'numpy.sum', 'np.sum', (['mat_pred'], {'axis': '(1)'}), '(mat_pred, axis=1)\n', (6805, 6823), True, 'import numpy as np\n'), ((6877, 6911), 'numpy.argmax', 'np.argmax', (['self.perf_estim[self.t]'], {}), '(self.perf_estim[self.t])\n', (6886, 6911), True, 'import numpy as np\n'), ((7312, 7332), 'numpy.argmin', 'np.argmin', (['perf_list'], {}), '(perf_list)\n', (7321, 7332), True, 'import numpy as np\n')] |
"""
ckwg +31
Copyright 2016 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
Tests for class Feature, interfacing vital::feature functionality
"""
import ctypes
import random
import unittest
import nose.tools
import numpy
from vital.types import Covariance, EigenArray, Feature, RGBColor
class TestFeature (unittest.TestCase):
def test_new(self):
f1 = Feature()
f2 = Feature([1, 1], 1, 2, 1)
def test_get_typename(self):
# Returns C++ std::type_info.name values
f = Feature(ctype=ctypes.c_double)
nose.tools.assert_equal(f.type_name, 'd')
f = Feature(ctype=ctypes.c_float)
nose.tools.assert_equal(f.type_name, 'f')
def test_get_location(self):
f = Feature()
numpy.testing.assert_almost_equal(
f.location,
[[0], [0]]
)
expected = [[12.3], [643]]
f = Feature(loc=expected)
numpy.testing.assert_almost_equal(
f.location,
expected
)
# iterable form
f = Feature(loc=(12.3, 643))
numpy.testing.assert_almost_equal(
f.location,
expected
)
def test_get_mag(self):
f = Feature()
nose.tools.assert_equal(f.magnitude, 0)
f = Feature(mag=1.1)
nose.tools.assert_equal(f.magnitude, 1.1)
def test_get_scale(self):
f = Feature()
nose.tools.assert_equal(f.scale, 1)
f = Feature(scale=2.1)
nose.tools.assert_equal(f.scale, 2.1)
def test_get_angle(self):
f = Feature()
nose.tools.assert_equal(f.angle, 0)
f = Feature(angle=1.1)
nose.tools.assert_equal(f.angle, 1.1)
def test_get_covar(self):
dflt_covar = Covariance(2)
f = Feature()
nose.tools.assert_equal(f.covariance, dflt_covar)
# No constructor slot to initialize non-default covariance
def test_get_color(self):
dflt_color = RGBColor()
f = Feature()
nose.tools.assert_equal(f.color, dflt_color)
c = RGBColor(5, 32, 10)
f = Feature(rgb_color=c)
nose.tools.assert_equal(f.color, c)
def test_set_location(self):
f = Feature(ctype=ctypes.c_double)
expected = [[random.random()],
[random.random()]]
f.location = expected
# making sure that we went through the setter, and not just setting the
# exact value to the property
nose.tools.assert_is_instance(f.location, EigenArray)
numpy.testing.assert_almost_equal(f.location, expected, 16)
f = Feature(ctype=ctypes.c_float)
expected = [[random.random()],
[random.random()]]
f.location = expected
nose.tools.assert_is_instance(f.location, EigenArray)
numpy.testing.assert_almost_equal(f.location, expected, 6)
def test_set_magnitude(self):
f = Feature(ctype=ctypes.c_double)
nose.tools.assert_equal(f.magnitude, 0) # default value
expected = random.random()
f.magnitude = expected
nose.tools.assert_almost_equal(f.magnitude, expected, 16)
f = Feature(ctype=ctypes.c_float)
nose.tools.assert_equal(f.magnitude, 0) # default value
expected = random.random()
f.magnitude = expected
nose.tools.assert_almost_equal(f.magnitude, expected, 6)
def test_set_scale(self):
f = Feature(ctype=ctypes.c_double)
nose.tools.assert_equal(f.scale, 1) # default value
expected = random.random()
f.scale = expected
nose.tools.assert_almost_equal(f.scale, expected, 16)
f = Feature(ctype=ctypes.c_float)
nose.tools.assert_equal(f.scale, 1) # default value
expected = random.random()
f.scale = expected
nose.tools.assert_almost_equal(f.scale, expected, 6)
def test_set_angle(self):
f = Feature(ctype=ctypes.c_double)
nose.tools.assert_equal(f.angle, 0) # default value
expected = random.random()
f.angle = expected
nose.tools.assert_almost_equal(f.angle, expected, 16)
f = Feature(ctype=ctypes.c_float)
nose.tools.assert_equal(f.angle, 0) # default value
expected = random.random()
f.angle = expected
nose.tools.assert_almost_equal(f.angle, expected, 6)
def test_set_covar(self):
f = Feature(ctype=ctypes.c_double)
nose.tools.assert_equal(f.covariance, Covariance())
expected = [[1, 2],
[3, 4]]
c = Covariance(2, ctypes.c_double, expected)
f.covariance = c
nose.tools.assert_equal(f.covariance, c)
# Should also work if we just give it the raw iterable
f.covariance = expected
nose.tools.assert_equal(f.covariance, c)
# And for floats...
f = Feature(ctype=ctypes.c_float)
nose.tools.assert_equal(f.covariance, Covariance())
expected = [[1, 2],
[3, 4]]
c = Covariance(2, ctypes.c_float, expected)
f.covariance = c
nose.tools.assert_equal(f.covariance, c)
# Should also work if we just give it the raw iterable
f.covariance = expected
nose.tools.assert_equal(f.covariance, c)
def test_set_color(self):
expected = RGBColor(4, 20, 0)
f = Feature(ctype=ctypes.c_double)
nose.tools.assert_equal(f.color, RGBColor())
f.color = expected
nose.tools.assert_equal(f.color, expected)
f = Feature(ctype=ctypes.c_float)
nose.tools.assert_equal(f.color, RGBColor())
f.color = expected
nose.tools.assert_equal(f.color, expected)
| [
"vital.types.RGBColor",
"vital.types.Covariance",
"numpy.testing.assert_almost_equal",
"vital.types.Feature",
"random.random"
] | [((1866, 1875), 'vital.types.Feature', 'Feature', ([], {}), '()\n', (1873, 1875), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((1889, 1913), 'vital.types.Feature', 'Feature', (['[1, 1]', '(1)', '(2)', '(1)'], {}), '([1, 1], 1, 2, 1)\n', (1896, 1913), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((2009, 2039), 'vital.types.Feature', 'Feature', ([], {'ctype': 'ctypes.c_double'}), '(ctype=ctypes.c_double)\n', (2016, 2039), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((2103, 2132), 'vital.types.Feature', 'Feature', ([], {'ctype': 'ctypes.c_float'}), '(ctype=ctypes.c_float)\n', (2110, 2132), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((2229, 2238), 'vital.types.Feature', 'Feature', ([], {}), '()\n', (2236, 2238), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((2247, 2304), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['f.location', '[[0], [0]]'], {}), '(f.location, [[0], [0]])\n', (2280, 2304), False, 'import numpy\n'), ((2387, 2408), 'vital.types.Feature', 'Feature', ([], {'loc': 'expected'}), '(loc=expected)\n', (2394, 2408), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((2417, 2472), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['f.location', 'expected'], {}), '(f.location, expected)\n', (2450, 2472), False, 'import numpy\n'), ((2543, 2567), 'vital.types.Feature', 'Feature', ([], {'loc': '(12.3, 643)'}), '(loc=(12.3, 643))\n', (2550, 2567), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((2576, 2631), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['f.location', 'expected'], {}), '(f.location, expected)\n', (2609, 2631), False, 'import numpy\n'), ((2707, 2716), 'vital.types.Feature', 'Feature', ([], {}), '()\n', (2714, 2716), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((2778, 2794), 'vital.types.Feature', 'Feature', ([], {'mag': '(1.1)'}), '(mag=1.1)\n', (2785, 2794), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((2888, 2897), 'vital.types.Feature', 'Feature', ([], {}), '()\n', (2895, 2897), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((2955, 2973), 'vital.types.Feature', 'Feature', ([], {'scale': '(2.1)'}), '(scale=2.1)\n', (2962, 2973), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((3063, 3072), 'vital.types.Feature', 'Feature', ([], {}), '()\n', (3070, 3072), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((3130, 3148), 'vital.types.Feature', 'Feature', ([], {'angle': '(1.1)'}), '(angle=1.1)\n', (3137, 3148), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((3247, 3260), 'vital.types.Covariance', 'Covariance', (['(2)'], {}), '(2)\n', (3257, 3260), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((3273, 3282), 'vital.types.Feature', 'Feature', ([], {}), '()\n', (3280, 3282), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((3460, 3470), 'vital.types.RGBColor', 'RGBColor', ([], {}), '()\n', (3468, 3470), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((3483, 3492), 'vital.types.Feature', 'Feature', ([], {}), '()\n', (3490, 3492), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((3559, 3578), 'vital.types.RGBColor', 'RGBColor', (['(5)', '(32)', '(10)'], {}), '(5, 32, 10)\n', (3567, 3578), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((3591, 3611), 'vital.types.Feature', 'Feature', ([], {'rgb_color': 'c'}), '(rgb_color=c)\n', (3598, 3611), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((3702, 3732), 'vital.types.Feature', 'Feature', ([], {'ctype': 'ctypes.c_double'}), '(ctype=ctypes.c_double)\n', (3709, 3732), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((4029, 4088), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['f.location', 'expected', '(16)'], {}), '(f.location, expected, 16)\n', (4062, 4088), False, 'import numpy\n'), ((4102, 4131), 'vital.types.Feature', 'Feature', ([], {'ctype': 'ctypes.c_float'}), '(ctype=ctypes.c_float)\n', (4109, 4131), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((4310, 4368), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['f.location', 'expected', '(6)'], {}), '(f.location, expected, 6)\n', (4343, 4368), False, 'import numpy\n'), ((4416, 4446), 'vital.types.Feature', 'Feature', ([], {'ctype': 'ctypes.c_double'}), '(ctype=ctypes.c_double)\n', (4423, 4446), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((4531, 4546), 'random.random', 'random.random', ([], {}), '()\n', (4544, 4546), False, 'import random\n'), ((4657, 4686), 'vital.types.Feature', 'Feature', ([], {'ctype': 'ctypes.c_float'}), '(ctype=ctypes.c_float)\n', (4664, 4686), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((4771, 4786), 'random.random', 'random.random', ([], {}), '()\n', (4784, 4786), False, 'import random\n'), ((4926, 4956), 'vital.types.Feature', 'Feature', ([], {'ctype': 'ctypes.c_double'}), '(ctype=ctypes.c_double)\n', (4933, 4956), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((5037, 5052), 'random.random', 'random.random', ([], {}), '()\n', (5050, 5052), False, 'import random\n'), ((5155, 5184), 'vital.types.Feature', 'Feature', ([], {'ctype': 'ctypes.c_float'}), '(ctype=ctypes.c_float)\n', (5162, 5184), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((5265, 5280), 'random.random', 'random.random', ([], {}), '()\n', (5278, 5280), False, 'import random\n'), ((5412, 5442), 'vital.types.Feature', 'Feature', ([], {'ctype': 'ctypes.c_double'}), '(ctype=ctypes.c_double)\n', (5419, 5442), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((5523, 5538), 'random.random', 'random.random', ([], {}), '()\n', (5536, 5538), False, 'import random\n'), ((5641, 5670), 'vital.types.Feature', 'Feature', ([], {'ctype': 'ctypes.c_float'}), '(ctype=ctypes.c_float)\n', (5648, 5670), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((5751, 5766), 'random.random', 'random.random', ([], {}), '()\n', (5764, 5766), False, 'import random\n'), ((5898, 5928), 'vital.types.Feature', 'Feature', ([], {'ctype': 'ctypes.c_double'}), '(ctype=ctypes.c_double)\n', (5905, 5928), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((6058, 6098), 'vital.types.Covariance', 'Covariance', (['(2)', 'ctypes.c_double', 'expected'], {}), '(2, ctypes.c_double, expected)\n', (6068, 6098), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((6358, 6387), 'vital.types.Feature', 'Feature', ([], {'ctype': 'ctypes.c_float'}), '(ctype=ctypes.c_float)\n', (6365, 6387), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((6517, 6556), 'vital.types.Covariance', 'Covariance', (['(2)', 'ctypes.c_float', 'expected'], {}), '(2, ctypes.c_float, expected)\n', (6527, 6556), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((6825, 6843), 'vital.types.RGBColor', 'RGBColor', (['(4)', '(20)', '(0)'], {}), '(4, 20, 0)\n', (6833, 6843), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((6857, 6887), 'vital.types.Feature', 'Feature', ([], {'ctype': 'ctypes.c_double'}), '(ctype=ctypes.c_double)\n', (6864, 6887), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((7032, 7061), 'vital.types.Feature', 'Feature', ([], {'ctype': 'ctypes.c_float'}), '(ctype=ctypes.c_float)\n', (7039, 7061), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((5975, 5987), 'vital.types.Covariance', 'Covariance', ([], {}), '()\n', (5985, 5987), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((6434, 6446), 'vital.types.Covariance', 'Covariance', ([], {}), '()\n', (6444, 6446), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((6929, 6939), 'vital.types.RGBColor', 'RGBColor', ([], {}), '()\n', (6937, 6939), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((7103, 7113), 'vital.types.RGBColor', 'RGBColor', ([], {}), '()\n', (7111, 7113), False, 'from vital.types import Covariance, EigenArray, Feature, RGBColor\n'), ((3754, 3769), 'random.random', 'random.random', ([], {}), '()\n', (3767, 3769), False, 'import random\n'), ((3793, 3808), 'random.random', 'random.random', ([], {}), '()\n', (3806, 3808), False, 'import random\n'), ((4153, 4168), 'random.random', 'random.random', ([], {}), '()\n', (4166, 4168), False, 'import random\n'), ((4192, 4207), 'random.random', 'random.random', ([], {}), '()\n', (4205, 4207), False, 'import random\n')] |
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
from builtins import range
from future.utils import iteritems
import unittest
import numpy as np
import infer_util as iu
import test_util as tu
import os
np_dtype_string = np.dtype(object)
class SavedModelShapeTest(unittest.TestCase):
def _full_exact(self, input_dtype, output0_dtype, output1_dtype,
output0_raw, output1_raw, swap):
def _infer_exact_helper(tester, pf, tensor_shape, batch_size,
input_dtype, output0_dtype, output1_dtype,
output0_raw=True, output1_raw=True,
model_version=None, swap=False,
outputs=("OUTPUT0", "OUTPUT1"), use_http=True, use_grpc=True,
skip_request_id_check=False, use_streaming=True,
correlation_id=0):
for bs in (1, batch_size):
# model that does not support batching
if bs == 1:
iu.infer_exact(tester, "savedmodel_nobatch", tensor_shape, bs,
input_dtype, output0_dtype, output1_dtype,
output0_raw, output1_raw,
model_version, swap,
outputs, use_http, use_grpc,
skip_request_id_check, use_streaming,
correlation_id)
# model that supports batching
iu.infer_exact(tester, "savedmodel", (bs,) + tensor_shape, bs,
input_dtype, output0_dtype, output1_dtype,
output0_raw, output1_raw,
model_version, swap, outputs, use_http, use_grpc,
skip_request_id_check, use_streaming,
correlation_id)
input_size = 16
if tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype,
(input_size,), (input_size,), (input_size,)):
_infer_exact_helper(self, "savedmodel", (input_size,), 8,
input_dtype, output0_dtype, output1_dtype,
output0_raw=output0_raw, output1_raw=output1_raw, swap=swap)
def test_raw_bbb(self):
self._full_exact(np.int8, np.int8, np.int8,
output0_raw=True, output1_raw=True, swap=True)
def test_raw_sss(self):
self._full_exact(np.int16, np.int16, np.int16,
output0_raw=True, output1_raw=True, swap=True)
def test_raw_iii(self):
self._full_exact(np.int32, np.int32, np.int32,
output0_raw=True, output1_raw=True, swap=True)
def test_raw_lll(self):
self._full_exact(np.int64, np.int64, np.int64,
output0_raw=True, output1_raw=True, swap=False)
def test_raw_hhh(self):
self._full_exact(np.float16, np.float16, np.float16,
output0_raw=True, output1_raw=True, swap=False)
def test_raw_fff(self):
self._full_exact(np.float32, np.float32, np.float32,
output0_raw=True, output1_raw=True, swap=True)
def test_raw_hff(self):
self._full_exact(np.float16, np.float32, np.float32,
output0_raw=True, output1_raw=True, swap=False)
def test_raw_bii(self):
self._full_exact(np.int8, np.int32, np.int32,
output0_raw=True, output1_raw=True, swap=False)
def test_raw_ibb(self):
self._full_exact(np.int32, np.int8, np.int8,
output0_raw=True, output1_raw=True, swap=False)
def test_raw_ibs(self):
self._full_exact(np.int32, np.int8, np.int16,
output0_raw=True, output1_raw=True, swap=False)
def test_raw_iff(self):
self._full_exact(np.int32, np.float32, np.float32,
output0_raw=True, output1_raw=True, swap=False)
def test_raw_fii(self):
self._full_exact(np.float32, np.int32, np.int32,
output0_raw=True, output1_raw=True, swap=False)
def test_raw_ihs(self):
self._full_exact(np.int32, np.float16, np.int16,
output0_raw=True, output1_raw=True, swap=False)
if __name__ == '__main__':
unittest.main()
| [
"numpy.dtype",
"infer_util.infer_exact",
"unittest.main",
"test_util.validate_for_tf_model",
"sys.path.append"
] | [((1550, 1578), 'sys.path.append', 'sys.path.append', (['"""../common"""'], {}), "('../common')\n", (1565, 1578), False, 'import sys\n'), ((1753, 1769), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (1761, 1769), True, 'import numpy as np\n'), ((5887, 5902), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5900, 5902), False, 'import unittest\n'), ((3431, 3548), 'test_util.validate_for_tf_model', 'tu.validate_for_tf_model', (['input_dtype', 'output0_dtype', 'output1_dtype', '(input_size,)', '(input_size,)', '(input_size,)'], {}), '(input_dtype, output0_dtype, output1_dtype, (\n input_size,), (input_size,), (input_size,))\n', (3455, 3548), True, 'import test_util as tu\n'), ((3003, 3250), 'infer_util.infer_exact', 'iu.infer_exact', (['tester', '"""savedmodel"""', '((bs,) + tensor_shape)', 'bs', 'input_dtype', 'output0_dtype', 'output1_dtype', 'output0_raw', 'output1_raw', 'model_version', 'swap', 'outputs', 'use_http', 'use_grpc', 'skip_request_id_check', 'use_streaming', 'correlation_id'], {}), "(tester, 'savedmodel', (bs,) + tensor_shape, bs, input_dtype,\n output0_dtype, output1_dtype, output0_raw, output1_raw, model_version,\n swap, outputs, use_http, use_grpc, skip_request_id_check, use_streaming,\n correlation_id)\n", (3017, 3250), True, 'import infer_util as iu\n'), ((2488, 2735), 'infer_util.infer_exact', 'iu.infer_exact', (['tester', '"""savedmodel_nobatch"""', 'tensor_shape', 'bs', 'input_dtype', 'output0_dtype', 'output1_dtype', 'output0_raw', 'output1_raw', 'model_version', 'swap', 'outputs', 'use_http', 'use_grpc', 'skip_request_id_check', 'use_streaming', 'correlation_id'], {}), "(tester, 'savedmodel_nobatch', tensor_shape, bs, input_dtype,\n output0_dtype, output1_dtype, output0_raw, output1_raw, model_version,\n swap, outputs, use_http, use_grpc, skip_request_id_check, use_streaming,\n correlation_id)\n", (2502, 2735), True, 'import infer_util as iu\n')] |
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize
def plotdata(f1,f2,line=False,killme=False):
x1,y1,x2,y2=[],[],[],[]
for i in range(m):
if y[i]:x1.append(f1[i]),y1.append(f2[i])
else:x2.append(f1[i]),y2.append(f2[i])
plt.plot(x1, y1, 'rx')
plt.plot(x2, y2, 'bo')
plt.ylabel('exame 1')
plt.xlabel('exame 2')
plt.xticks(np.arange(min(f1), max(f1)+1, 50))
plt.yticks(np.arange(min(f2), max(f2)+1, 50))
plt.legend(['ex1','ex2'])
if line:
l1 = np.array([min(f1),max(f1)])
l2=(-1./theta[2])*(theta[1]*l1 + theta[0])
plt.plot(l1, l2, '-g')
if killme: # haha fix this
u = np.linspace(-1, 1.5, 50)
v = np.linspace(-1, 1.5, 50)
z=np.zeros((len(u),len(v)))
for i in range(1,len(u)+1):
for j in range(1,len(v)+1):
z[i,j]=mapFeature(u[i],v[j])*theta
z=z.transpose()
plt.contour(u, v, z, [0, 0], 'LineWidth', 2)
plt.show()
def sigmoid(z):
return 1/(1+np.exp(-z))
def costFunction(theta, X, y):
H=np.array(sigmoid(theta*X.transpose()))
cost=(-1/m)*np.sum( y*np.log(H) + (1-y)*np.log(1-H) )
grad=(1/m)*(H-y)*X
return cost,grad
def predict(x):
pred=sigmoid(theta*np.matrix(x).transpose()).tolist()[0]
pred=[1 if i>=.5 else 0 for i in pred]
return pred
def mapFeature(X1,X2):
degree = 6
out=np.matrix(np.ones((m,1)))
for i in range(1,degree+1):
for j in range(i+1):
out=np.concatenate( ( out, np.multiply( np.power(X1,i-j),np.power(X2,j) ) ) ,axis=1)
return out
def costFunctionReg(theta, X, y, lambd):
H=np.array(sigmoid(theta*X.transpose()))
j=costFunction(theta, X, y)[0]+(lambd/(2*m))*np.sum(np.square(theta)[1:])
reg=(lambd/m)*theta
reg[0]=0
grad=(1/m)*(H-y)*X+reg
return j,grad
data = open('ex2data2.txt', 'r').read().replace('\n',',').split(',') # 2 ,118 for multi and 1 ,100
data =np.matrix( [float(i) for i in data]).reshape((118,3))
y=np.array(data[:,-1]).flatten()
X=data[:,:-1]
(m,n)=X.shape
#X=np.concatenate(( np.ones((m,1)) ,X), axis=1) #one or the other
X=mapFeature(X[:,0] ,X[:,1]) # one or the other for multi
(m,n)=X.shape
#plotdata(X[:,1].flatten().tolist()[0],X[:,2].flatten().tolist()[0])
theta=np.zeros(n)
lambd = 1
cost,grad=costFunctionReg(theta, X, y, lambd)
options= {'maxiter': 400}
#opt=scipy.optimize.minimize(costFunction,theta,(X,y),method='TNC',jac=True,options=options)#one or the other
opt=scipy.optimize.minimize(costFunctionReg,theta,(X,y,lambd),method='TNC',jac=True,options=options)# one or the other for multi
cost=opt.fun
theta = opt.x
p = predict(X)
print('Train Accuracy: %.1f %%' % (np.mean(p == y) * 100))
print('Expected accuracy (with lambda = 1): 83.1 % (approx)\n')
#plotdata(X[:,1].flatten().tolist()[0],X[:,2].flatten().tolist()[0],True)
| [
"numpy.mean",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"numpy.power",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.log",
"numpy.square",
"numpy.exp",
"matplotlib.pyplot.contour",
"numpy.zeros",
"numpy.linspace",
"numpy.array",
"numpy.matrix",
"matplotlib.pyplot.legend",
... | [((2474, 2485), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2482, 2485), True, 'import numpy as np\n'), ((280, 302), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', 'y1', '"""rx"""'], {}), "(x1, y1, 'rx')\n", (288, 302), True, 'import matplotlib.pyplot as plt\n'), ((308, 330), 'matplotlib.pyplot.plot', 'plt.plot', (['x2', 'y2', '"""bo"""'], {}), "(x2, y2, 'bo')\n", (316, 330), True, 'import matplotlib.pyplot as plt\n'), ((336, 357), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""exame 1"""'], {}), "('exame 1')\n", (346, 357), True, 'import matplotlib.pyplot as plt\n'), ((363, 384), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""exame 2"""'], {}), "('exame 2')\n", (373, 384), True, 'import matplotlib.pyplot as plt\n'), ((492, 518), 'matplotlib.pyplot.legend', 'plt.legend', (["['ex1', 'ex2']"], {}), "(['ex1', 'ex2'])\n", (502, 518), True, 'import matplotlib.pyplot as plt\n'), ((1022, 1032), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1030, 1032), True, 'import matplotlib.pyplot as plt\n'), ((635, 657), 'matplotlib.pyplot.plot', 'plt.plot', (['l1', 'l2', '"""-g"""'], {}), "(l1, l2, '-g')\n", (643, 657), True, 'import matplotlib.pyplot as plt\n'), ((704, 728), 'numpy.linspace', 'np.linspace', (['(-1)', '(1.5)', '(50)'], {}), '(-1, 1.5, 50)\n', (715, 728), True, 'import numpy as np\n'), ((742, 766), 'numpy.linspace', 'np.linspace', (['(-1)', '(1.5)', '(50)'], {}), '(-1, 1.5, 50)\n', (753, 766), True, 'import numpy as np\n'), ((969, 1013), 'matplotlib.pyplot.contour', 'plt.contour', (['u', 'v', 'z', '[0, 0]', '"""LineWidth"""', '(2)'], {}), "(u, v, z, [0, 0], 'LineWidth', 2)\n", (980, 1013), True, 'import matplotlib.pyplot as plt\n'), ((1474, 1489), 'numpy.ones', 'np.ones', (['(m, 1)'], {}), '((m, 1))\n', (1481, 1489), True, 'import numpy as np\n'), ((2123, 2144), 'numpy.array', 'np.array', (['data[:, -1]'], {}), '(data[:, -1])\n', (2131, 2144), True, 'import numpy as np\n'), ((1079, 1089), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (1085, 1089), True, 'import numpy as np\n'), ((2907, 2922), 'numpy.mean', 'np.mean', (['(p == y)'], {}), '(p == y)\n', (2914, 2922), True, 'import numpy as np\n'), ((1196, 1205), 'numpy.log', 'np.log', (['H'], {}), '(H)\n', (1202, 1205), True, 'import numpy as np\n'), ((1214, 1227), 'numpy.log', 'np.log', (['(1 - H)'], {}), '(1 - H)\n', (1220, 1227), True, 'import numpy as np\n'), ((1836, 1852), 'numpy.square', 'np.square', (['theta'], {}), '(theta)\n', (1845, 1852), True, 'import numpy as np\n'), ((1607, 1626), 'numpy.power', 'np.power', (['X1', '(i - j)'], {}), '(X1, i - j)\n', (1615, 1626), True, 'import numpy as np\n'), ((1624, 1639), 'numpy.power', 'np.power', (['X2', 'j'], {}), '(X2, j)\n', (1632, 1639), True, 'import numpy as np\n'), ((1316, 1328), 'numpy.matrix', 'np.matrix', (['x'], {}), '(x)\n', (1325, 1328), True, 'import numpy as np\n')] |
import numpy as np
from scipy.special import expit
from librosa.core import midi_to_hz
from omnizart.constants.midi import LOWEST_MIDI_NOTE
def inference(feature, model, timestep=128, batch_size=10, feature_num=384):
assert len(feature.shape) == 2
# Padding
total_samples = len(feature)
pad_bottom = (feature_num - feature.shape[1]) // 2
pad_top = feature_num - feature.shape[1] - pad_bottom
pad_len = timestep - 1
feature = np.pad(feature, ((pad_len, pad_len), (pad_bottom, pad_top)))
# Prepare for prediction
output = np.zeros(feature.shape + (2,))
total_batches = int(np.ceil(total_samples / batch_size))
last_batch_idx = len(feature) - pad_len
for bidx in range(total_batches):
print(f"batch: {bidx+1}/{total_batches}", end="\r")
# Collect batch feature
start_idx = bidx * batch_size
end_idx = min(start_idx + batch_size, last_batch_idx)
batch = np.array([feature[idx:idx+timestep] for idx in range(start_idx, end_idx)]) # noqa: E226
batch = np.expand_dims(batch, axis=3)
# Predict contour
batch_pred = model.predict(batch)
batch_pred = 1 / (1 + np.exp(-expit(batch_pred)))
# Add the batch results to the output container.
for idx, pred in enumerate(batch_pred):
slice_start = start_idx + idx
slice_end = slice_start + timestep
output[slice_start:slice_end] += pred
output = output[pad_len:-pad_len, pad_bottom:-pad_top, 1] # Remove padding
# Filter values
avg_max_val = np.mean(np.max(output, axis=1))
output = np.where(output > avg_max_val, output, 0)
# Generate final output F0
f0 = [] # pylint: disable=invalid-name
for pitches in output:
if np.sum(pitches) > 0:
pidx = np.argmax(pitches)
f0.append(midi_to_hz(pidx / 4 + LOWEST_MIDI_NOTE))
else:
f0.append(0)
return np.array(f0)
| [
"numpy.ceil",
"numpy.where",
"librosa.core.midi_to_hz",
"numpy.argmax",
"numpy.max",
"scipy.special.expit",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.expand_dims",
"numpy.pad"
] | [((456, 516), 'numpy.pad', 'np.pad', (['feature', '((pad_len, pad_len), (pad_bottom, pad_top))'], {}), '(feature, ((pad_len, pad_len), (pad_bottom, pad_top)))\n', (462, 516), True, 'import numpy as np\n'), ((560, 590), 'numpy.zeros', 'np.zeros', (['(feature.shape + (2,))'], {}), '(feature.shape + (2,))\n', (568, 590), True, 'import numpy as np\n'), ((1614, 1655), 'numpy.where', 'np.where', (['(output > avg_max_val)', 'output', '(0)'], {}), '(output > avg_max_val, output, 0)\n', (1622, 1655), True, 'import numpy as np\n'), ((1943, 1955), 'numpy.array', 'np.array', (['f0'], {}), '(f0)\n', (1951, 1955), True, 'import numpy as np\n'), ((615, 650), 'numpy.ceil', 'np.ceil', (['(total_samples / batch_size)'], {}), '(total_samples / batch_size)\n', (622, 650), True, 'import numpy as np\n'), ((1048, 1077), 'numpy.expand_dims', 'np.expand_dims', (['batch'], {'axis': '(3)'}), '(batch, axis=3)\n', (1062, 1077), True, 'import numpy as np\n'), ((1577, 1599), 'numpy.max', 'np.max', (['output'], {'axis': '(1)'}), '(output, axis=1)\n', (1583, 1599), True, 'import numpy as np\n'), ((1770, 1785), 'numpy.sum', 'np.sum', (['pitches'], {}), '(pitches)\n', (1776, 1785), True, 'import numpy as np\n'), ((1810, 1828), 'numpy.argmax', 'np.argmax', (['pitches'], {}), '(pitches)\n', (1819, 1828), True, 'import numpy as np\n'), ((1851, 1890), 'librosa.core.midi_to_hz', 'midi_to_hz', (['(pidx / 4 + LOWEST_MIDI_NOTE)'], {}), '(pidx / 4 + LOWEST_MIDI_NOTE)\n', (1861, 1890), False, 'from librosa.core import midi_to_hz\n'), ((1185, 1202), 'scipy.special.expit', 'expit', (['batch_pred'], {}), '(batch_pred)\n', (1190, 1202), False, 'from scipy.special import expit\n')] |
import scipy.io as sio
import os
import numpy as np
import pickle
def readMat(filename, folder, mode):
filename = os.path.join(os.getcwd(), os.path.join(folder, filename))
print(filename)
f = sio.loadmat(filename)
# import h5py
# with h5py.File(filename, 'r') as f:
#
# items = f['data']
# print(list(items.keys()))
# f = f[mode[:2]][0]
f = f['data'][0]
R = []
t = []
x1_list = []
x2_list = []
flag = []
idx1 = []
idx2 = []
for item in f:
R.append(item['T'][0:3,0:3])
t.append(item['T'][0:3,3])
# x1 = item['x1']
# x1 = x1/10
x1_list.append(item['x1'])
x2_list.append(item['x2'])
fl = np.array(item['flag'])
flag.append(fl.reshape(fl.shape[0]))
idx1.append(item['idx1'])
idx2.append(item['idx2'])
data = {}
data['R'] = R
data['t'] = t
data['x1'] = x1_list
data['x2'] = x2_list
data['flag'] = flag
data['idx1'] = idx1
data['idx2'] = idx2
return data
def createFlags(x1, x2, T):
ones = np.ones((x1.shape[0], 1))
flag = ones.reshape(x1.shape[0])
error = 0.1
x1_h = np.concatenate([x1, ones], axis=1)
err = np.matmul(T, np.transpose(x1_h))
err = x2 - np.transpose(err[:3])
err = np.linalg.norm(err, axis=1)
flag[err > error] = 0
print('Number of Inliers = {}\tPercentage = {}'.format(np.sum(flag), np.sum(flag) / x1.shape[0]))
return np.array(flag, dtype=int), np.sum(flag) / x1.shape[0]
def readPickle(filename, folder):
filename = os.path.join(os.getcwd(), os.path.join(folder, filename))
print(filename)
f = open(filename, 'rb')
data_original = pickle.load(f)
f.close()
R = []
t = []
x1_list = []
x2_list = []
flag = []
idx1 = []
idx2 = []
ransacT = []
ransacUT = []
fgrT = []
ransacDelta = []
ransacUDelta = []
fgrDelta = []
per = []
for frame in data_original:
if frame['flag'].size != 0:
R.append(frame['R'])
t.append(frame['t'])
x1_list.append(frame['x1'])
x2_list.append(frame['x2'])
T = np.eye(4)
T[:3,:3] = frame['R']
T[:3,3] = frame['t']
f, p = createFlags(frame['x1'], frame['x2'], T)
per.append(p)
flag.append(f)
idx1.append(frame['idx1'])
idx2.append(frame['idx2'])
ransacT.append(frame['ransacT'])
ransacUT.append(frame['ransacUT'])
fgrT.append(frame['ransacUT'])
ransacDelta.append(frame['ransacDelta'])
ransacUDelta.append(frame['ransacUDelta'])
fgrDelta.append(frame['fgrDelta'])
print(np.mean(per))
data = {}
data['R'] = R
data['t'] = t
data['x1'] = x1_list
data['x2'] = x2_list
data['flag'] = flag
data['idx1'] = idx1
data['idx2'] = idx2
data['ransacT'] = ransacT
data['ransacUT'] = ransacUT
data['fgrT'] = fgrT
data['ransacDelta'] = ransacDelta
data['ransacUDelta'] = ransacUDelta
data['fgrDelta'] = fgrDelta
return data, np.mean(per) | [
"numpy.mean",
"numpy.eye",
"numpy.ones",
"scipy.io.loadmat",
"pickle.load",
"os.path.join",
"os.getcwd",
"numpy.array",
"numpy.sum",
"numpy.concatenate",
"numpy.linalg.norm",
"numpy.transpose"
] | [((207, 228), 'scipy.io.loadmat', 'sio.loadmat', (['filename'], {}), '(filename)\n', (218, 228), True, 'import scipy.io as sio\n'), ((1098, 1123), 'numpy.ones', 'np.ones', (['(x1.shape[0], 1)'], {}), '((x1.shape[0], 1))\n', (1105, 1123), True, 'import numpy as np\n'), ((1190, 1224), 'numpy.concatenate', 'np.concatenate', (['[x1, ones]'], {'axis': '(1)'}), '([x1, ones], axis=1)\n', (1204, 1224), True, 'import numpy as np\n'), ((1315, 1342), 'numpy.linalg.norm', 'np.linalg.norm', (['err'], {'axis': '(1)'}), '(err, axis=1)\n', (1329, 1342), True, 'import numpy as np\n'), ((1720, 1734), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1731, 1734), False, 'import pickle\n'), ((133, 144), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (142, 144), False, 'import os\n'), ((146, 176), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (158, 176), False, 'import os\n'), ((730, 752), 'numpy.array', 'np.array', (["item['flag']"], {}), "(item['flag'])\n", (738, 752), True, 'import numpy as np\n'), ((1248, 1266), 'numpy.transpose', 'np.transpose', (['x1_h'], {}), '(x1_h)\n', (1260, 1266), True, 'import numpy as np\n'), ((1283, 1304), 'numpy.transpose', 'np.transpose', (['err[:3]'], {}), '(err[:3])\n', (1295, 1304), True, 'import numpy as np\n'), ((1484, 1509), 'numpy.array', 'np.array', (['flag'], {'dtype': 'int'}), '(flag, dtype=int)\n', (1492, 1509), True, 'import numpy as np\n'), ((1605, 1616), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1614, 1616), False, 'import os\n'), ((1618, 1648), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (1630, 1648), False, 'import os\n'), ((2774, 2786), 'numpy.mean', 'np.mean', (['per'], {}), '(per)\n', (2781, 2786), True, 'import numpy as np\n'), ((3176, 3188), 'numpy.mean', 'np.mean', (['per'], {}), '(per)\n', (3183, 3188), True, 'import numpy as np\n'), ((1429, 1441), 'numpy.sum', 'np.sum', (['flag'], {}), '(flag)\n', (1435, 1441), True, 'import numpy as np\n'), ((1511, 1523), 'numpy.sum', 'np.sum', (['flag'], {}), '(flag)\n', (1517, 1523), True, 'import numpy as np\n'), ((2204, 2213), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2210, 2213), True, 'import numpy as np\n'), ((1443, 1455), 'numpy.sum', 'np.sum', (['flag'], {}), '(flag)\n', (1449, 1455), True, 'import numpy as np\n')] |
import numpy as np
from numpy.linalg import norm
from joblib import Parallel, delayed
import pandas
from bcdsugar.utils import Monitor
from sparse_ho.ho import grad_search
from itertools import product
from sparse_ho.criterion import HeldOutSmoothedHinge
from sparse_ho.models import SVM
from sparse_ho.forward import Forward
from sparse_ho.implicit_forward import ImplicitForward
from sparse_ho.implicit import Implicit
from sparse_ho.datasets.real import get_data
from sparse_ho.grid_search import grid_search
# from my_data import get_data
dataset_names = ["real-sim"]
# methods = ["implicit_forward", "implicit"]
methods = ["forward", "implicit_forward"]
# "grid_search",
tolerance_decreases = ["constant"]
tols = 1e-5
n_outers = [1]
dict_t_max = {}
dict_t_max["rcv1"] = 50
dict_t_max["real-sim"] = 100
dict_t_max["leukemia"] = 10
dict_t_max["20news"] = 500
def parallel_function(
dataset_name, method, tol=1e-5, n_outer=50,
tolerance_decrease='exponential'):
# load data
X_train, X_val, X_test, y_train, y_val, y_test = get_data(dataset_name, csr=True)
n_samples, n_features = X_train.shape
print('n_samples', n_samples)
print('n_features', n_features)
y_train[y_train == 0.0] = -1.0
y_val[y_val == 0.0] = -1.0
y_test[y_test == 0.0] = -1.0
C_max = 100
logC = np.log(1e-2)
n_outer = 5
if dataset_name == "rcv1":
size_loop = 1
else:
size_loop = 1
model = SVM(
X_train, y_train, logC, max_iter=10000, tol=tol)
for i in range(size_loop):
monitor = Monitor()
if method == "implicit_forward":
criterion = HeldOutSmoothedHinge(X_val, y_val, model, X_test=X_test, y_test=y_test)
algo = ImplicitForward(criterion, tol_jac=1e-3, n_iter_jac=100)
_, _, _ = grad_search(
algo=algo, verbose=False,
log_alpha0=logC, tol=tol,
n_outer=n_outer, monitor=monitor,
t_max=dict_t_max[dataset_name],
tolerance_decrease=tolerance_decrease)
elif method == "forward":
criterion = HeldOutSmoothedHinge(X_val, y_val, model, X_test=X_test, y_test=y_test)
algo = Forward(criterion)
_, _, _ = grad_search(
algo=algo,
log_alpha0=logC, tol=tol,
n_outer=n_outer, monitor=monitor,
t_max=dict_t_max[dataset_name],
tolerance_decrease=tolerance_decrease)
elif method == "implicit":
criterion = HeldOutSmoothedHinge(X_val, y_val, model, X_test=X_test, y_test=y_test)
algo = Implicit(criterion)
_, _, _ = grad_search(
algo=algo,
log_alpha0=logC, tol=tol,
n_outer=n_outer, monitor=monitor,
t_max=dict_t_max[dataset_name],
tolerance_decrease=tolerance_decrease)
elif method == "grid_search":
criterion = HeldOutSmoothedHinge(X_val, y_val, model, X_test=X_test, y_test=y_test)
algo = Forward(criterion)
log_alpha_min = np.log(1e-2)
log_alpha_opt, min_g_func = grid_search(
algo, log_alpha_min, np.log(C_max), monitor, max_evals=25,
tol=tol, samp="grid")
print(log_alpha_opt)
elif method == "random":
criterion = HeldOutSmoothedHinge(X_val, y_val, model, X_test=X_test, y_test=y_test)
algo = Forward(criterion)
log_alpha_min = np.log(1e-2)
log_alpha_opt, min_g_func = grid_search(
algo, log_alpha_min, np.log(C_max), monitor, max_evals=25,
tol=tol, samp="random")
print(log_alpha_opt)
elif method == "lhs":
criterion = HeldOutSmoothedHinge(X_val, y_val, model, X_test=X_test, y_test=y_test)
algo = Forward(criterion)
log_alpha_min = np.log(1e-2)
log_alpha_opt, min_g_func = grid_search(
algo, log_alpha_min, np.log(C_max), monitor, max_evals=25,
tol=tol, samp="lhs")
print(log_alpha_opt)
monitor.times = np.array(monitor.times)
monitor.objs = np.array(monitor.objs)
monitor.objs_test = np.array(monitor.objs_test)
monitor.log_alphas = np.array(monitor.log_alphas)
return (dataset_name, method, tol, n_outer, tolerance_decrease,
monitor.times, monitor.objs, monitor.objs_test,
monitor.log_alphas, norm(y_val), norm(y_test))
print("enter parallel")
backend = 'loky'
n_jobs = 1
results = Parallel(n_jobs=n_jobs, verbose=100, backend=backend)(
delayed(parallel_function)(
dataset_name, method, n_outer=n_outer,
tolerance_decrease=tolerance_decrease, tol=tols)
for dataset_name, method, n_outer,
tolerance_decrease in product(
dataset_names, methods, n_outers, tolerance_decreases))
print('OK finished parallel')
df = pandas.DataFrame(results)
df.columns = [
'dataset', 'method', 'tol', 'n_outer', 'tolerance_decrease',
'times', 'objs', 'objs_test', 'log_alphas', 'norm y_val',
'norm y_test']
for dataset_name in dataset_names:
df[df['dataset'] == dataset_name].to_pickle(
"%s.pkl" % dataset_name)
| [
"sparse_ho.implicit.Implicit",
"sparse_ho.criterion.HeldOutSmoothedHinge",
"sparse_ho.implicit_forward.ImplicitForward",
"numpy.log",
"bcdsugar.utils.Monitor",
"sparse_ho.ho.grad_search",
"itertools.product",
"joblib.Parallel",
"numpy.array",
"sparse_ho.forward.Forward",
"numpy.linalg.norm",
"... | [((4952, 4977), 'pandas.DataFrame', 'pandas.DataFrame', (['results'], {}), '(results)\n', (4968, 4977), False, 'import pandas\n'), ((1056, 1088), 'sparse_ho.datasets.real.get_data', 'get_data', (['dataset_name'], {'csr': '(True)'}), '(dataset_name, csr=True)\n', (1064, 1088), False, 'from sparse_ho.datasets.real import get_data\n'), ((1328, 1340), 'numpy.log', 'np.log', (['(0.01)'], {}), '(0.01)\n', (1334, 1340), True, 'import numpy as np\n'), ((1455, 1507), 'sparse_ho.models.SVM', 'SVM', (['X_train', 'y_train', 'logC'], {'max_iter': '(10000)', 'tol': 'tol'}), '(X_train, y_train, logC, max_iter=10000, tol=tol)\n', (1458, 1507), False, 'from sparse_ho.models import SVM\n'), ((4164, 4187), 'numpy.array', 'np.array', (['monitor.times'], {}), '(monitor.times)\n', (4172, 4187), True, 'import numpy as np\n'), ((4207, 4229), 'numpy.array', 'np.array', (['monitor.objs'], {}), '(monitor.objs)\n', (4215, 4229), True, 'import numpy as np\n'), ((4254, 4281), 'numpy.array', 'np.array', (['monitor.objs_test'], {}), '(monitor.objs_test)\n', (4262, 4281), True, 'import numpy as np\n'), ((4307, 4335), 'numpy.array', 'np.array', (['monitor.log_alphas'], {}), '(monitor.log_alphas)\n', (4315, 4335), True, 'import numpy as np\n'), ((4587, 4640), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs', 'verbose': '(100)', 'backend': 'backend'}), '(n_jobs=n_jobs, verbose=100, backend=backend)\n', (4595, 4640), False, 'from joblib import Parallel, delayed\n'), ((1566, 1575), 'bcdsugar.utils.Monitor', 'Monitor', ([], {}), '()\n', (1573, 1575), False, 'from bcdsugar.utils import Monitor\n'), ((4496, 4507), 'numpy.linalg.norm', 'norm', (['y_val'], {}), '(y_val)\n', (4500, 4507), False, 'from numpy.linalg import norm\n'), ((4509, 4521), 'numpy.linalg.norm', 'norm', (['y_test'], {}), '(y_test)\n', (4513, 4521), False, 'from numpy.linalg import norm\n'), ((1642, 1713), 'sparse_ho.criterion.HeldOutSmoothedHinge', 'HeldOutSmoothedHinge', (['X_val', 'y_val', 'model'], {'X_test': 'X_test', 'y_test': 'y_test'}), '(X_val, y_val, model, X_test=X_test, y_test=y_test)\n', (1662, 1713), False, 'from sparse_ho.criterion import HeldOutSmoothedHinge\n'), ((1733, 1790), 'sparse_ho.implicit_forward.ImplicitForward', 'ImplicitForward', (['criterion'], {'tol_jac': '(0.001)', 'n_iter_jac': '(100)'}), '(criterion, tol_jac=0.001, n_iter_jac=100)\n', (1748, 1790), False, 'from sparse_ho.implicit_forward import ImplicitForward\n'), ((1812, 1989), 'sparse_ho.ho.grad_search', 'grad_search', ([], {'algo': 'algo', 'verbose': '(False)', 'log_alpha0': 'logC', 'tol': 'tol', 'n_outer': 'n_outer', 'monitor': 'monitor', 't_max': 'dict_t_max[dataset_name]', 'tolerance_decrease': 'tolerance_decrease'}), '(algo=algo, verbose=False, log_alpha0=logC, tol=tol, n_outer=\n n_outer, monitor=monitor, t_max=dict_t_max[dataset_name],\n tolerance_decrease=tolerance_decrease)\n', (1823, 1989), False, 'from sparse_ho.ho import grad_search\n'), ((4646, 4672), 'joblib.delayed', 'delayed', (['parallel_function'], {}), '(parallel_function)\n', (4653, 4672), False, 'from joblib import Parallel, delayed\n'), ((4843, 4905), 'itertools.product', 'product', (['dataset_names', 'methods', 'n_outers', 'tolerance_decreases'], {}), '(dataset_names, methods, n_outers, tolerance_decreases)\n', (4850, 4905), False, 'from itertools import product\n'), ((2121, 2192), 'sparse_ho.criterion.HeldOutSmoothedHinge', 'HeldOutSmoothedHinge', (['X_val', 'y_val', 'model'], {'X_test': 'X_test', 'y_test': 'y_test'}), '(X_val, y_val, model, X_test=X_test, y_test=y_test)\n', (2141, 2192), False, 'from sparse_ho.criterion import HeldOutSmoothedHinge\n'), ((2212, 2230), 'sparse_ho.forward.Forward', 'Forward', (['criterion'], {}), '(criterion)\n', (2219, 2230), False, 'from sparse_ho.forward import Forward\n'), ((2253, 2416), 'sparse_ho.ho.grad_search', 'grad_search', ([], {'algo': 'algo', 'log_alpha0': 'logC', 'tol': 'tol', 'n_outer': 'n_outer', 'monitor': 'monitor', 't_max': 'dict_t_max[dataset_name]', 'tolerance_decrease': 'tolerance_decrease'}), '(algo=algo, log_alpha0=logC, tol=tol, n_outer=n_outer, monitor=\n monitor, t_max=dict_t_max[dataset_name], tolerance_decrease=\n tolerance_decrease)\n', (2264, 2416), False, 'from sparse_ho.ho import grad_search\n'), ((2548, 2619), 'sparse_ho.criterion.HeldOutSmoothedHinge', 'HeldOutSmoothedHinge', (['X_val', 'y_val', 'model'], {'X_test': 'X_test', 'y_test': 'y_test'}), '(X_val, y_val, model, X_test=X_test, y_test=y_test)\n', (2568, 2619), False, 'from sparse_ho.criterion import HeldOutSmoothedHinge\n'), ((2639, 2658), 'sparse_ho.implicit.Implicit', 'Implicit', (['criterion'], {}), '(criterion)\n', (2647, 2658), False, 'from sparse_ho.implicit import Implicit\n'), ((2681, 2844), 'sparse_ho.ho.grad_search', 'grad_search', ([], {'algo': 'algo', 'log_alpha0': 'logC', 'tol': 'tol', 'n_outer': 'n_outer', 'monitor': 'monitor', 't_max': 'dict_t_max[dataset_name]', 'tolerance_decrease': 'tolerance_decrease'}), '(algo=algo, log_alpha0=logC, tol=tol, n_outer=n_outer, monitor=\n monitor, t_max=dict_t_max[dataset_name], tolerance_decrease=\n tolerance_decrease)\n', (2692, 2844), False, 'from sparse_ho.ho import grad_search\n'), ((2979, 3050), 'sparse_ho.criterion.HeldOutSmoothedHinge', 'HeldOutSmoothedHinge', (['X_val', 'y_val', 'model'], {'X_test': 'X_test', 'y_test': 'y_test'}), '(X_val, y_val, model, X_test=X_test, y_test=y_test)\n', (2999, 3050), False, 'from sparse_ho.criterion import HeldOutSmoothedHinge\n'), ((3070, 3088), 'sparse_ho.forward.Forward', 'Forward', (['criterion'], {}), '(criterion)\n', (3077, 3088), False, 'from sparse_ho.forward import Forward\n'), ((3117, 3129), 'numpy.log', 'np.log', (['(0.01)'], {}), '(0.01)\n', (3123, 3129), True, 'import numpy as np\n'), ((3220, 3233), 'numpy.log', 'np.log', (['C_max'], {}), '(C_max)\n', (3226, 3233), True, 'import numpy as np\n'), ((3387, 3458), 'sparse_ho.criterion.HeldOutSmoothedHinge', 'HeldOutSmoothedHinge', (['X_val', 'y_val', 'model'], {'X_test': 'X_test', 'y_test': 'y_test'}), '(X_val, y_val, model, X_test=X_test, y_test=y_test)\n', (3407, 3458), False, 'from sparse_ho.criterion import HeldOutSmoothedHinge\n'), ((3478, 3496), 'sparse_ho.forward.Forward', 'Forward', (['criterion'], {}), '(criterion)\n', (3485, 3496), False, 'from sparse_ho.forward import Forward\n'), ((3525, 3537), 'numpy.log', 'np.log', (['(0.01)'], {}), '(0.01)\n', (3531, 3537), True, 'import numpy as np\n'), ((3628, 3641), 'numpy.log', 'np.log', (['C_max'], {}), '(C_max)\n', (3634, 3641), True, 'import numpy as np\n'), ((3794, 3865), 'sparse_ho.criterion.HeldOutSmoothedHinge', 'HeldOutSmoothedHinge', (['X_val', 'y_val', 'model'], {'X_test': 'X_test', 'y_test': 'y_test'}), '(X_val, y_val, model, X_test=X_test, y_test=y_test)\n', (3814, 3865), False, 'from sparse_ho.criterion import HeldOutSmoothedHinge\n'), ((3885, 3903), 'sparse_ho.forward.Forward', 'Forward', (['criterion'], {}), '(criterion)\n', (3892, 3903), False, 'from sparse_ho.forward import Forward\n'), ((3932, 3944), 'numpy.log', 'np.log', (['(0.01)'], {}), '(0.01)\n', (3938, 3944), True, 'import numpy as np\n'), ((4035, 4048), 'numpy.log', 'np.log', (['C_max'], {}), '(C_max)\n', (4041, 4048), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import mplfinance as mpf # pip install mplfinance
import akshare as ak
import talib as ta
# 第一步,获取数据并且计算指标
def get_data(item_code):
stock_df = ak.stock_zh_a_hist(symbol=item_code, adjust="qfq")
stock_df['日期'] = pd.to_datetime(stock_df['日期'])
stock_df.rename(columns={"日期": "date", '开盘': "open", "收盘": "close", "最高": "high", "最低": "low",
"成交量": "volume", "成交额": "value", "振幅": "amplitude", "涨跌额": "change",
"涨跌幅": "pct_change", "换手率": "turnover_rate"},
inplace=True)
stock_df.set_index("date", inplace=True)
stock_df['average'] = round(stock_df['high'] + stock_df['low'] / 2, 2)
stock_df['upper_lim'] = round(stock_df['open'] * 1.1, 2)
stock_df['lower_lim'] = round(stock_df['open'] * 0.9, 2)
stock_df['last_close'] = stock_df['close'].shift(1)
stock_df['MA5'] = ta.MA(stock_df['close'], timeperiod=5)
stock_df['MA10'] = ta.MA(stock_df['close'], timeperiod=10)
stock_df['MA20'] = ta.MA(stock_df['close'], timeperiod=20)
stock_df['MA60'] = ta.MA(stock_df['close'], timeperiod=60)
# 计算macd
stock_df['macd-m'], stock_df['macd-s'], stock_df['macd-h'] = ta.MACD(stock_df['close'], fastperiod=12,
slowperiod=26, signalperiod=9)
# 计算布林线
stock_df['bb-upper'], stock_df['bb-middle'], stock_df['bb-lower'] = ta.BBANDS(stock_df['close'], timeperiod=5,
nbdevup=2, nbdevdn=2, matype=0)
# dema
stock_df['dema'] = ta.DEMA(stock_df['close'], timeperiod=30)
# rsi
stock_df['rsi'] = ta.RSI(stock_df['close'], timeperiod=14)
return stock_df
# 绘制K线图
symbol = "000001"
stock_name = "平安银行"
data = get_data(symbol)
# 取一段数据绘图
data = data.loc["2021-05-20":, :]
my_color = mpf.make_marketcolors(up='r',
down='g',
edge='inherit',
wick='inherit',
volume='inherit')
my_style = mpf.make_mpf_style(marketcolors=my_color,
rc={'font.family': 'SimHei', 'axes.unicode_minus': 'False'},
figcolor='(0.82, 0.83, 0.85)',
gridcolor='(0.82, 0.83, 0.85)')
title_font = {
'size': '16',
'color': 'black',
'weight': 'bold',
'va': 'bottom',
'ha': 'center'}
large_red_font = {
'fontname': 'Arial',
'size': '24',
'color': 'red',
'weight': 'bold',
'va': 'bottom'}
large_green_font = {
'fontname': 'Arial',
'size': '24',
'color': 'green',
'weight': 'bold',
'va': 'bottom'}
small_red_font = {
'fontname': 'Arial',
'size': '12',
'color': 'red',
'weight': 'bold',
'va': 'bottom'}
small_green_font = {
'fontname': 'Arial',
'size': '12',
'color': 'green',
'weight': 'bold',
'va': 'bottom'}
normal_label_font = {
'size': '12',
'color': 'black',
'weight': 'normal',
'va': 'bottom',
'ha': 'right'}
normal_font = {
'fontname': 'Arial',
'size': '12',
'color': 'black',
'weight': 'normal',
'va': 'bottom',
'ha': 'left'}
# 初始化figure对象,在figure上建立三个Axes对象并分别设置好它们的位置和基本属性
fig = mpf.figure(style=my_style, figsize=(12, 8), facecolor=(0.82, 0.83, 0.85))
ax1 = fig.add_axes([0.08, 0.25, 0.88, 0.60])
ax2 = fig.add_axes([0.08, 0.15, 0.88, 0.10], sharex=ax1)
ax2.set_ylabel('volume')
ax3 = fig.add_axes([0.08, 0.05, 0.88, 0.10], sharex=ax1)
ax3.set_ylabel('macd')
# 初始化figure对象,在figure上预先放置文本并设置格式,文本内容根据需要显示的数据实时更新
t1 = fig.text(0.50, 0.94, '{} - {}'.format(symbol, stock_name), **title_font)
t2 = fig.text(0.12, 0.90, '开/收: ', **normal_label_font)
t3 = fig.text(0.14, 0.89, f'', **large_red_font)
t4 = fig.text(0.14, 0.86, f'', **small_red_font)
t5 = fig.text(0.22, 0.86, f'', **small_red_font)
t6 = fig.text(0.12, 0.86, f'', **normal_label_font)
t7 = fig.text(0.40, 0.90, '高: ', **normal_label_font)
t8 = fig.text(0.40, 0.90, f'', **small_red_font)
t9 = fig.text(0.40, 0.86, '低: ', **normal_label_font)
t10 = fig.text(0.40, 0.86, f'', **small_green_font)
t11 = fig.text(0.55, 0.90, '量(万手): ', **normal_label_font)
t12 = fig.text(0.55, 0.90, f'', **normal_font)
t13 = fig.text(0.55, 0.86, '额(亿元): ', **normal_label_font)
t14 = fig.text(0.55, 0.86, f'', **normal_font)
t15 = fig.text(0.70, 0.90, '涨停: ', **normal_label_font)
t16 = fig.text(0.70, 0.90, f'', **small_red_font)
t17 = fig.text(0.70, 0.86, '跌停: ', **normal_label_font)
t18 = fig.text(0.70, 0.86, f'', **small_green_font)
t19 = fig.text(0.85, 0.90, '换手: ', **normal_label_font)
t20 = fig.text(0.85, 0.90, f'', **normal_font)
t21 = fig.text(0.85, 0.86, '昨收: ', **normal_label_font)
t22 = fig.text(0.85, 0.86, f'', **normal_font)
""" 更新K线图上的价格文本
"""
# data.iloc[-1]是一个交易日内的所有数据,将这些数据分别填入figure对象上的文本中
t3.set_text(f'{np.round(data.iloc[-1]["open"], 3)} / {np.round(data.iloc[-1]["close"], 3)}')
t4.set_text(f'{np.round(data.iloc[-1]["change"], 3)}')
t5.set_text(f'[{np.round(data.iloc[-1]["pct_change"], 3)}%]')
t6.set_text(f'{data.iloc[-1].name.date()}')
t8.set_text(f'{np.round(data.iloc[-1]["high"], 3)}')
t10.set_text(f'{np.round(data.iloc[-1]["low"], 3)}')
t12.set_text(f'{np.round(data.iloc[-1]["volume"] / 10000, 3)}')
t14.set_text(f'{np.round(data.iloc[-1]["value"]/100000000, 3)}')
t16.set_text(f'{np.round(data.iloc[-1]["upper_lim"], 3)}')
t18.set_text(f'{np.round(data.iloc[-1]["lower_lim"], 3)}')
t20.set_text(f'{np.round(data.iloc[-1]["turnover_rate"], 3)}')
t22.set_text(f'{np.round(data.iloc[-1]["last_close"], 3)}')
# 根据本交易日的价格变动值确定开盘价、收盘价的显示颜色
if data.iloc[-1]['change'] > 0: # 如果今日变动额大于0,即今天价格高于昨天,今天价格显示为红色
close_number_color = 'red'
elif data.iloc[-1]['change'] < 0: # 如果今日变动额小于0,即今天价格低于昨天,今天价格显示为绿色
close_number_color = 'green'
else:
close_number_color = 'black'
t3.set_color(close_number_color)
t4.set_color(close_number_color)
t5.set_color(close_number_color)
avg_type = "bb"
indicator = "macd"
ap = []
# 添加K线图重叠均线,根据均线类型添加移动均线或布林带线
if avg_type == 'ma':
ap.append(mpf.make_addplot(data['MA5'], ax=ax1, color="#000000"))
ap.append(mpf.make_addplot(data['MA10'], ax=ax1, color="#ff0000"))
ap.append(mpf.make_addplot(data['MA20'], ax=ax1, color="#00ff00"))
ap.append(mpf.make_addplot(data['MA60'], ax=ax1, color="#0000ff"))
elif avg_type == 'bb':
ap.append(mpf.make_addplot(data[['bb-upper', 'bb-middle', 'bb-lower']], ax=ax1))
# 添加指标,根据指标类型添加MACD或RSI或DEMA
if indicator == 'macd':
ap.append(mpf.make_addplot(data[['macd-m', 'macd-s']], ylabel='macd', ax=ax3))
bar_r = np.where(data['macd-h'] > 0, data['macd-h'], 0)
bar_g = np.where(data['macd-h'] <= 0, data['macd-h'], 0)
ap.append(mpf.make_addplot(bar_r, type='bar', color='red', ax=ax3))
ap.append(mpf.make_addplot(bar_g, type='bar', color='green', ax=ax3))
elif indicator == 'rsi':
ap.append(mpf.make_addplot([75] * len(data), color=(0.75, 0.6, 0.6), ax=ax3))
ap.append(mpf.make_addplot([30] * len(data), color=(0.6, 0.75, 0.6), ax=ax3))
ap.append(mpf.make_addplot(data['rsi'], ylabel='rsi', ax=ax3))
else: # 'dema'
ap.append(mpf.make_addplot(data['dema'], ylabel='dema', ax=ax3))
# 绘制图表
mpf.plot(data,
ax=ax1,
volume=ax2,
addplot=ap,
type='candle',
style=my_style,
datetime_format='%Y-%m-%d',
xrotation=0)
# fig.show()
mpf.show()
# 保存到本地
# fig.savefig('a.png')
| [
"mplfinance.show",
"talib.DEMA",
"numpy.where",
"numpy.round",
"mplfinance.figure",
"talib.RSI",
"mplfinance.plot",
"mplfinance.make_addplot",
"talib.MACD",
"mplfinance.make_mpf_style",
"akshare.stock_zh_a_hist",
"talib.BBANDS",
"talib.MA",
"pandas.to_datetime",
"mplfinance.make_marketco... | [((1916, 2009), 'mplfinance.make_marketcolors', 'mpf.make_marketcolors', ([], {'up': '"""r"""', 'down': '"""g"""', 'edge': '"""inherit"""', 'wick': '"""inherit"""', 'volume': '"""inherit"""'}), "(up='r', down='g', edge='inherit', wick='inherit',\n volume='inherit')\n", (1937, 2009), True, 'import mplfinance as mpf\n'), ((2150, 2323), 'mplfinance.make_mpf_style', 'mpf.make_mpf_style', ([], {'marketcolors': 'my_color', 'rc': "{'font.family': 'SimHei', 'axes.unicode_minus': 'False'}", 'figcolor': '"""(0.82, 0.83, 0.85)"""', 'gridcolor': '"""(0.82, 0.83, 0.85)"""'}), "(marketcolors=my_color, rc={'font.family': 'SimHei',\n 'axes.unicode_minus': 'False'}, figcolor='(0.82, 0.83, 0.85)',\n gridcolor='(0.82, 0.83, 0.85)')\n", (2168, 2323), True, 'import mplfinance as mpf\n'), ((3358, 3431), 'mplfinance.figure', 'mpf.figure', ([], {'style': 'my_style', 'figsize': '(12, 8)', 'facecolor': '(0.82, 0.83, 0.85)'}), '(style=my_style, figsize=(12, 8), facecolor=(0.82, 0.83, 0.85))\n', (3368, 3431), True, 'import mplfinance as mpf\n'), ((7272, 7395), 'mplfinance.plot', 'mpf.plot', (['data'], {'ax': 'ax1', 'volume': 'ax2', 'addplot': 'ap', 'type': '"""candle"""', 'style': 'my_style', 'datetime_format': '"""%Y-%m-%d"""', 'xrotation': '(0)'}), "(data, ax=ax1, volume=ax2, addplot=ap, type='candle', style=\n my_style, datetime_format='%Y-%m-%d', xrotation=0)\n", (7280, 7395), True, 'import mplfinance as mpf\n'), ((7467, 7477), 'mplfinance.show', 'mpf.show', ([], {}), '()\n', (7475, 7477), True, 'import mplfinance as mpf\n'), ((189, 239), 'akshare.stock_zh_a_hist', 'ak.stock_zh_a_hist', ([], {'symbol': 'item_code', 'adjust': '"""qfq"""'}), "(symbol=item_code, adjust='qfq')\n", (207, 239), True, 'import akshare as ak\n'), ((265, 295), 'pandas.to_datetime', 'pd.to_datetime', (["stock_df['日期']"], {}), "(stock_df['日期'])\n", (279, 295), True, 'import pandas as pd\n'), ((922, 960), 'talib.MA', 'ta.MA', (["stock_df['close']"], {'timeperiod': '(5)'}), "(stock_df['close'], timeperiod=5)\n", (927, 960), True, 'import talib as ta\n'), ((984, 1023), 'talib.MA', 'ta.MA', (["stock_df['close']"], {'timeperiod': '(10)'}), "(stock_df['close'], timeperiod=10)\n", (989, 1023), True, 'import talib as ta\n'), ((1047, 1086), 'talib.MA', 'ta.MA', (["stock_df['close']"], {'timeperiod': '(20)'}), "(stock_df['close'], timeperiod=20)\n", (1052, 1086), True, 'import talib as ta\n'), ((1110, 1149), 'talib.MA', 'ta.MA', (["stock_df['close']"], {'timeperiod': '(60)'}), "(stock_df['close'], timeperiod=60)\n", (1115, 1149), True, 'import talib as ta\n'), ((1229, 1301), 'talib.MACD', 'ta.MACD', (["stock_df['close']"], {'fastperiod': '(12)', 'slowperiod': '(26)', 'signalperiod': '(9)'}), "(stock_df['close'], fastperiod=12, slowperiod=26, signalperiod=9)\n", (1236, 1301), True, 'import talib as ta\n'), ((1459, 1533), 'talib.BBANDS', 'ta.BBANDS', (["stock_df['close']"], {'timeperiod': '(5)', 'nbdevup': '(2)', 'nbdevdn': '(2)', 'matype': '(0)'}), "(stock_df['close'], timeperiod=5, nbdevup=2, nbdevdn=2, matype=0)\n", (1468, 1533), True, 'import talib as ta\n'), ((1650, 1691), 'talib.DEMA', 'ta.DEMA', (["stock_df['close']"], {'timeperiod': '(30)'}), "(stock_df['close'], timeperiod=30)\n", (1657, 1691), True, 'import talib as ta\n'), ((1725, 1765), 'talib.RSI', 'ta.RSI', (["stock_df['close']"], {'timeperiod': '(14)'}), "(stock_df['close'], timeperiod=14)\n", (1731, 1765), True, 'import talib as ta\n'), ((6668, 6715), 'numpy.where', 'np.where', (["(data['macd-h'] > 0)", "data['macd-h']", '(0)'], {}), "(data['macd-h'] > 0, data['macd-h'], 0)\n", (6676, 6715), True, 'import numpy as np\n'), ((6728, 6776), 'numpy.where', 'np.where', (["(data['macd-h'] <= 0)", "data['macd-h']", '(0)'], {}), "(data['macd-h'] <= 0, data['macd-h'], 0)\n", (6736, 6776), True, 'import numpy as np\n'), ((6142, 6196), 'mplfinance.make_addplot', 'mpf.make_addplot', (["data['MA5']"], {'ax': 'ax1', 'color': '"""#000000"""'}), "(data['MA5'], ax=ax1, color='#000000')\n", (6158, 6196), True, 'import mplfinance as mpf\n'), ((6212, 6267), 'mplfinance.make_addplot', 'mpf.make_addplot', (["data['MA10']"], {'ax': 'ax1', 'color': '"""#ff0000"""'}), "(data['MA10'], ax=ax1, color='#ff0000')\n", (6228, 6267), True, 'import mplfinance as mpf\n'), ((6283, 6338), 'mplfinance.make_addplot', 'mpf.make_addplot', (["data['MA20']"], {'ax': 'ax1', 'color': '"""#00ff00"""'}), "(data['MA20'], ax=ax1, color='#00ff00')\n", (6299, 6338), True, 'import mplfinance as mpf\n'), ((6354, 6409), 'mplfinance.make_addplot', 'mpf.make_addplot', (["data['MA60']"], {'ax': 'ax1', 'color': '"""#0000ff"""'}), "(data['MA60'], ax=ax1, color='#0000ff')\n", (6370, 6409), True, 'import mplfinance as mpf\n'), ((6587, 6654), 'mplfinance.make_addplot', 'mpf.make_addplot', (["data[['macd-m', 'macd-s']]"], {'ylabel': '"""macd"""', 'ax': 'ax3'}), "(data[['macd-m', 'macd-s']], ylabel='macd', ax=ax3)\n", (6603, 6654), True, 'import mplfinance as mpf\n'), ((6791, 6847), 'mplfinance.make_addplot', 'mpf.make_addplot', (['bar_r'], {'type': '"""bar"""', 'color': '"""red"""', 'ax': 'ax3'}), "(bar_r, type='bar', color='red', ax=ax3)\n", (6807, 6847), True, 'import mplfinance as mpf\n'), ((6863, 6921), 'mplfinance.make_addplot', 'mpf.make_addplot', (['bar_g'], {'type': '"""bar"""', 'color': '"""green"""', 'ax': 'ax3'}), "(bar_g, type='bar', color='green', ax=ax3)\n", (6879, 6921), True, 'import mplfinance as mpf\n'), ((4953, 4987), 'numpy.round', 'np.round', (["data.iloc[-1]['open']", '(3)'], {}), "(data.iloc[-1]['open'], 3)\n", (4961, 4987), True, 'import numpy as np\n'), ((4992, 5027), 'numpy.round', 'np.round', (["data.iloc[-1]['close']", '(3)'], {}), "(data.iloc[-1]['close'], 3)\n", (5000, 5027), True, 'import numpy as np\n'), ((5046, 5082), 'numpy.round', 'np.round', (["data.iloc[-1]['change']", '(3)'], {}), "(data.iloc[-1]['change'], 3)\n", (5054, 5082), True, 'import numpy as np\n'), ((5102, 5142), 'numpy.round', 'np.round', (["data.iloc[-1]['pct_change']", '(3)'], {}), "(data.iloc[-1]['pct_change'], 3)\n", (5110, 5142), True, 'import numpy as np\n'), ((5207, 5241), 'numpy.round', 'np.round', (["data.iloc[-1]['high']", '(3)'], {}), "(data.iloc[-1]['high'], 3)\n", (5215, 5241), True, 'import numpy as np\n'), ((5261, 5294), 'numpy.round', 'np.round', (["data.iloc[-1]['low']", '(3)'], {}), "(data.iloc[-1]['low'], 3)\n", (5269, 5294), True, 'import numpy as np\n'), ((5314, 5358), 'numpy.round', 'np.round', (["(data.iloc[-1]['volume'] / 10000)", '(3)'], {}), "(data.iloc[-1]['volume'] / 10000, 3)\n", (5322, 5358), True, 'import numpy as np\n'), ((5378, 5425), 'numpy.round', 'np.round', (["(data.iloc[-1]['value'] / 100000000)", '(3)'], {}), "(data.iloc[-1]['value'] / 100000000, 3)\n", (5386, 5425), True, 'import numpy as np\n'), ((5443, 5482), 'numpy.round', 'np.round', (["data.iloc[-1]['upper_lim']", '(3)'], {}), "(data.iloc[-1]['upper_lim'], 3)\n", (5451, 5482), True, 'import numpy as np\n'), ((5502, 5541), 'numpy.round', 'np.round', (["data.iloc[-1]['lower_lim']", '(3)'], {}), "(data.iloc[-1]['lower_lim'], 3)\n", (5510, 5541), True, 'import numpy as np\n'), ((5561, 5604), 'numpy.round', 'np.round', (["data.iloc[-1]['turnover_rate']", '(3)'], {}), "(data.iloc[-1]['turnover_rate'], 3)\n", (5569, 5604), True, 'import numpy as np\n'), ((5624, 5664), 'numpy.round', 'np.round', (["data.iloc[-1]['last_close']", '(3)'], {}), "(data.iloc[-1]['last_close'], 3)\n", (5632, 5664), True, 'import numpy as np\n'), ((6448, 6517), 'mplfinance.make_addplot', 'mpf.make_addplot', (["data[['bb-upper', 'bb-middle', 'bb-lower']]"], {'ax': 'ax1'}), "(data[['bb-upper', 'bb-middle', 'bb-lower']], ax=ax1)\n", (6464, 6517), True, 'import mplfinance as mpf\n'), ((7126, 7177), 'mplfinance.make_addplot', 'mpf.make_addplot', (["data['rsi']"], {'ylabel': '"""rsi"""', 'ax': 'ax3'}), "(data['rsi'], ylabel='rsi', ax=ax3)\n", (7142, 7177), True, 'import mplfinance as mpf\n'), ((7209, 7262), 'mplfinance.make_addplot', 'mpf.make_addplot', (["data['dema']"], {'ylabel': '"""dema"""', 'ax': 'ax3'}), "(data['dema'], ylabel='dema', ax=ax3)\n", (7225, 7262), True, 'import mplfinance as mpf\n')] |
"""edges.py - Sobel edge filter
Originally part of CellProfiler, code licensed under both GPL and BSD licenses.
Website: http://www.cellprofiler.org
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2011 Broad Institute
All rights reserved.
Original author: <NAME>
"""
import numpy as np
from skimage import img_as_float
from scipy.ndimage import convolve, binary_erosion, generate_binary_structure
EROSION_SELEM = generate_binary_structure(2, 2)
def _mask_filter_result(result, mask):
"""Return result after masking.
Input masks are eroded so that mask areas in the original image don't
affect values in the result.
"""
if mask is None:
result[0, :] = 0
result[-1, :] = 0
result[:, 0] = 0
result[:, -1] = 0
return result
else:
mask = binary_erosion(mask, EROSION_SELEM, border_value=0)
return result * mask
def sobel(image, mask=None):
"""Calculate the absolute magnitude Sobel to find edges.
Parameters
----------
image : array_like, dtype=float
Image to process.
mask : array_like, dtype=bool, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : ndarray
The Sobel edge map.
Notes
-----
Take the square root of the sum of the squares of the horizontal and
vertical Sobels to get a magnitude that's somewhat insensitive to
direction.
Note that ``scipy.ndimage.sobel`` returns a directional Sobel which
has to be further processed to perform edge detection.
"""
return np.sqrt(hsobel(image, mask)**2 + vsobel(image, mask)**2)
def hsobel(image, mask=None):
"""Find the horizontal edges of an image using the Sobel transform.
Parameters
----------
image : array_like, dtype=float
Image to process.
mask : array_like, dtype=bool, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : ndarray
The Sobel edge map.
Notes
-----
We use the following kernel and return the absolute value of the
result at each point::
1 2 1
0 0 0
-1 -2 -1
"""
image = img_as_float(image)
result = np.abs(convolve(image,
np.array([[ 1, 2, 1],
[ 0, 0, 0],
[-1,-2,-1]]).astype(float) / 4.0))
return _mask_filter_result(result, mask)
def vsobel(image, mask=None):
"""Find the vertical edges of an image using the Sobel transform.
Parameters
----------
image : array_like, dtype=float
Image to process
mask : array_like, dtype=bool, optional
An optional mask to limit the application to a certain area
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : ndarray
The Sobel edge map.
Notes
-----
We use the following kernel and return the absolute value of the
result at each point::
1 0 -1
2 0 -2
1 0 -1
"""
image = img_as_float(image)
result = np.abs(convolve(image,
np.array([[1, 0, -1],
[2, 0, -2],
[1, 0, -1]]).astype(float) / 4.0))
return _mask_filter_result(result, mask)
def prewitt(image, mask=None):
"""Find the edge magnitude using the Prewitt transform.
Parameters
----------
image : array_like, dtype=float
Image to process.
mask : array_like, dtype=bool, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : ndarray
The Prewitt edge map.
Notes
-----
Return the square root of the sum of squares of the horizontal
and vertical Prewitt transforms.
"""
return np.sqrt(hprewitt(image, mask)**2 + vprewitt(image, mask)**2)
def hprewitt(image, mask=None):
"""Find the horizontal edges of an image using the Prewitt transform.
Parameters
----------
image : array_like, dtype=float
Image to process.
mask : array_like, dtype=bool, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : ndarray
The Prewitt edge map.
Notes
-----
We use the following kernel and return the absolute value of the
result at each point::
1 1 1
0 0 0
-1 -1 -1
"""
image = img_as_float(image)
result = np.abs(convolve(image,
np.array([[ 1, 1, 1],
[ 0, 0, 0],
[-1,-1,-1]]).astype(float) / 3.0))
return _mask_filter_result(result, mask)
def vprewitt(image, mask=None):
"""Find the vertical edges of an image using the Prewitt transform.
Parameters
----------
image : array_like, dtype=float
Image to process.
mask : array_like, dtype=bool, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : ndarray
The Prewitt edge map.
Notes
-----
We use the following kernel and return the absolute value of the
result at each point::
1 0 -1
1 0 -1
1 0 -1
"""
image = img_as_float(image)
result = np.abs(convolve(image,
np.array([[1, 0, -1],
[1, 0, -1],
[1, 0, -1]]).astype(float) / 3.0))
return _mask_filter_result(result, mask)
| [
"scipy.ndimage.binary_erosion",
"scipy.ndimage.generate_binary_structure",
"numpy.array",
"skimage.img_as_float"
] | [((450, 481), 'scipy.ndimage.generate_binary_structure', 'generate_binary_structure', (['(2)', '(2)'], {}), '(2, 2)\n', (475, 481), False, 'from scipy.ndimage import convolve, binary_erosion, generate_binary_structure\n'), ((2510, 2529), 'skimage.img_as_float', 'img_as_float', (['image'], {}), '(image)\n', (2522, 2529), False, 'from skimage import img_as_float\n'), ((3481, 3500), 'skimage.img_as_float', 'img_as_float', (['image'], {}), '(image)\n', (3493, 3500), False, 'from skimage import img_as_float\n'), ((5169, 5188), 'skimage.img_as_float', 'img_as_float', (['image'], {}), '(image)\n', (5181, 5188), False, 'from skimage import img_as_float\n'), ((6148, 6167), 'skimage.img_as_float', 'img_as_float', (['image'], {}), '(image)\n', (6160, 6167), False, 'from skimage import img_as_float\n'), ((845, 896), 'scipy.ndimage.binary_erosion', 'binary_erosion', (['mask', 'EROSION_SELEM'], {'border_value': '(0)'}), '(mask, EROSION_SELEM, border_value=0)\n', (859, 896), False, 'from scipy.ndimage import convolve, binary_erosion, generate_binary_structure\n'), ((2595, 2641), 'numpy.array', 'np.array', (['[[1, 2, 1], [0, 0, 0], [-1, -2, -1]]'], {}), '([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])\n', (2603, 2641), True, 'import numpy as np\n'), ((3566, 3612), 'numpy.array', 'np.array', (['[[1, 0, -1], [2, 0, -2], [1, 0, -1]]'], {}), '([[1, 0, -1], [2, 0, -2], [1, 0, -1]])\n', (3574, 3612), True, 'import numpy as np\n'), ((5254, 5300), 'numpy.array', 'np.array', (['[[1, 1, 1], [0, 0, 0], [-1, -1, -1]]'], {}), '([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])\n', (5262, 5300), True, 'import numpy as np\n'), ((6233, 6279), 'numpy.array', 'np.array', (['[[1, 0, -1], [1, 0, -1], [1, 0, -1]]'], {}), '([[1, 0, -1], [1, 0, -1], [1, 0, -1]])\n', (6241, 6279), True, 'import numpy as np\n')] |
"""
test_chemistry_hydrogen.py
Author: <NAME>
Affiliation: University of Colorado at Boulder
Created on: Mon Feb 16 12:50:43 MST 2015
Description:
"""
import ares
import numpy as np
import matplotlib.pyplot as pl
def test():
pf = \
{
'grid_cells': 64,
'include_He': True,
'isothermal': True,
'stop_time': 1e2,
'radiative_transfer': False,
'density_units': 1.0,
'initial_timestep': 1,
'max_timestep': 1e2,
'initial_temperature': np.logspace(4, 6, 64),
'initial_ionization': [1.-1e-8, 1e-8, 1-2e-8, 1e-8, 1e-8], # neutral
}
sim = ares.simulations.GasParcel(**pf)
sim.run()
data = sim.history
# Plot last time snapshot
pl.loglog(data['Tk'][0], data['h_1'][-1,:], color='k')
pl.loglog(data['Tk'][0], data['h_2'][-1,:], color='k', ls='--')
pl.loglog(data['Tk'][0], data['he_1'][-1,:], color='b')
pl.loglog(data['Tk'][0], data['he_2'][-1,:], color='b', ls='--')
pl.loglog(data['Tk'][0], data['he_3'][-1,:], color='b', ls=':')
pl.ylim(1e-8, 1)
pl.savefig('{!s}.png'.format(__file__.rstrip('.py')))
pl.close()
if __name__ == '__main__':
test()
| [
"ares.simulations.GasParcel",
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.close",
"matplotlib.pyplot.ylim",
"numpy.logspace"
] | [((606, 638), 'ares.simulations.GasParcel', 'ares.simulations.GasParcel', ([], {}), '(**pf)\n', (632, 638), False, 'import ares\n'), ((720, 775), 'matplotlib.pyplot.loglog', 'pl.loglog', (["data['Tk'][0]", "data['h_1'][-1, :]"], {'color': '"""k"""'}), "(data['Tk'][0], data['h_1'][-1, :], color='k')\n", (729, 775), True, 'import matplotlib.pyplot as pl\n'), ((779, 843), 'matplotlib.pyplot.loglog', 'pl.loglog', (["data['Tk'][0]", "data['h_2'][-1, :]"], {'color': '"""k"""', 'ls': '"""--"""'}), "(data['Tk'][0], data['h_2'][-1, :], color='k', ls='--')\n", (788, 843), True, 'import matplotlib.pyplot as pl\n'), ((852, 908), 'matplotlib.pyplot.loglog', 'pl.loglog', (["data['Tk'][0]", "data['he_1'][-1, :]"], {'color': '"""b"""'}), "(data['Tk'][0], data['he_1'][-1, :], color='b')\n", (861, 908), True, 'import matplotlib.pyplot as pl\n'), ((912, 977), 'matplotlib.pyplot.loglog', 'pl.loglog', (["data['Tk'][0]", "data['he_2'][-1, :]"], {'color': '"""b"""', 'ls': '"""--"""'}), "(data['Tk'][0], data['he_2'][-1, :], color='b', ls='--')\n", (921, 977), True, 'import matplotlib.pyplot as pl\n'), ((981, 1045), 'matplotlib.pyplot.loglog', 'pl.loglog', (["data['Tk'][0]", "data['he_3'][-1, :]"], {'color': '"""b"""', 'ls': '""":"""'}), "(data['Tk'][0], data['he_3'][-1, :], color='b', ls=':')\n", (990, 1045), True, 'import matplotlib.pyplot as pl\n'), ((1049, 1066), 'matplotlib.pyplot.ylim', 'pl.ylim', (['(1e-08)', '(1)'], {}), '(1e-08, 1)\n', (1056, 1066), True, 'import matplotlib.pyplot as pl\n'), ((1129, 1139), 'matplotlib.pyplot.close', 'pl.close', ([], {}), '()\n', (1137, 1139), True, 'import matplotlib.pyplot as pl\n'), ((488, 509), 'numpy.logspace', 'np.logspace', (['(4)', '(6)', '(64)'], {}), '(4, 6, 64)\n', (499, 509), True, 'import numpy as np\n')] |
# imports shared throughout the project
import sys
import importlib
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# CONSTANTS
PJ_TO_GWH = 277.7778 # [GWh / PJ]
GWH_TO_PJ = 1/PJ_TO_GWH #[PJ/GWH]
# HELPER
import FLUCCOplus.config as config
EM_TO_EXCEL_colnames = {
"power_production_wind_avg": "Windkraft",
"power_production_solar_avg": "Photovoltaik",
"power_production_hydro_avg": "Laufkraft",
"total_consumption_avg": "Strombedarf",
"total_production_avg": "Erzeugung",
"power_consumption_hydro_discharge_avg": "Pumpspeicher"}
EXCEL_TO_EM_colnames = {v: k for k, v in EM_TO_EXCEL_colnames.items()}
EXCEL_TO_EM_colnames["Volatile EE"] = "power_production_volatile_avg"
EXCEL_TO_EM_colnames["Nicht-Volatile"] = "power_production_non-volatile_avg"
EXCEL_TO_EM_colnames["Pumpspeicher"] = "power_consumption_hydro_discharge_avg"
EXCEL_TO_EM_colnames["Wasserkraft"] = "power_production_hydro_and_discharge_avg"
def log(f):
logger = config.logging.getLogger(f.__module__)
def wrapper(*args, **kwargs):
tic = time.time()*1000
result = f(*args, **kwargs)
toc = time.time()*1000
logger.info(f"{f.__name__} - {round(toc-tic,2)}ms")
return result
return wrapper
def logg(f):
logger = config.logging.getLogger(f.__module__)
def wrapper(dataframe, *args, **kwargs):
result = log(f)(dataframe, *args, **kwargs)
ro, co = result.shape
logger.debug(f"{f.__name__} df.shape = ({ro}, {co})")
return result
return wrapper
def plot_signal_bars(df, columns, ytick_average_max=False, cut_ylim=False, figsize=True):
"""takes a df series, with -1 and +1 denoting OFF and ON signals"""
desc_wind = pd.DataFrame()
df_step_wind = pd.DataFrame()
df_not_wind = pd.DataFrame()
# fig, ax = plt.subplots()
for c in columns:
df_step_wind[c] = df[c].shift(1).ne(df[c]).where(df[c] == 1).cumsum()
df_not_wind[c] = df[c].shift(1).ne(df[c]).where(df[c] == -1).cumsum()
df_step_wind.iloc[0, :] = 0
desc_wind["Zeitraum mit Signal [h]"] = df.where(df > 0).sum()
desc_wind["Nicht-Signal-Zeitraum [h]"] = len(df) - desc_wind["Zeitraum mit Signal [h]"]
desc_wind["Anzahl Signal-Perioden"] = df_step_wind.max()
desc_wind["Durchschnittliche Dauer Signal [h]"] = (
desc_wind["Zeitraum mit Signal [h]"] / desc_wind["Anzahl Signal-Perioden"])
desc_wind["Durchschnittliche Dauer Nicht-Signal [h]"] = desc_wind["Nicht-Signal-Zeitraum [h]"] / desc_wind[
"Anzahl Signal-Perioden"]
fig, ax = plt.subplots(1, 2, figsize=figsize)
desc_wind.loc[columns][["Zeitraum mit Signal [h]", "Nicht-Signal-Zeitraum [h]"]] \
.plot(kind="bar", color=["cyan", "black"], stacked=True, ax=ax[0]).set(ylabel="Stunden")
desc_wind.loc[columns][["Durchschnittliche Dauer Signal [h]", "Durchschnittliche Dauer Nicht-Signal [h]"]] \
.plot(kind="bar", color=["orange", "grey"], stacked=False, ax=ax[1]).set(ylabel="Stunden")
for p in ax[0].patches:
ax[0].annotate("{:.1f}%".format(p.get_height() * 100 / len(df)),
(p.get_x() + p.get_width() / 2., p.get_height() + p.get_y() - 5), ha='center', va='center',
fontsize=7, color='black', xytext=(0, -8), textcoords='offset points')
for p in ax[1].patches:
ax[1].annotate("{:.0f}".format(p.get_height()), (p.get_x() + p.get_width() / 2., p.get_height()), ha='center',
va='center', fontsize=7, color='black', xytext=(0, -8), textcoords='offset points')
if ytick_average_max:
ax[1].yaxis.set_ticks(np.arange(0, ytick_average_max, 24)) # TODO: as function parameters
if cut_ylim:
plt.ylim(top=cut_ylim)
plt.grid(axis="x")
return fig, ax
def Ueberschuesse_PVfirst(df):
df["Non_volatiles"] = df.Pumpspeicher + df.Laufkraft
df["RESohneWind"] = df.Laufkraft + df.Photovoltaik + df.Pumpspeicher
df["Residual_ohne_Wind"] = df.Strombedarf - df.Photovoltaik - df.Non_volatiles
df["Zero"] = 0
df["Wind_useful"] = (df[["Windkraft", "Residual_ohne_Wind"]]).min(axis=1).clip(0, None)
df["WindkraftUeSch"] = 0 # Überschuss
df["WindkraftDV"] = 0 # Direktverbrauch
df["WindkraftLast"] = 0
df["PVUeSch"] = 0 # Überschuss
df["PVDV"] = 0 # Direktverbrauch
for t in range(8760):
if (df.Photovoltaik[t] + df.Non_volatiles[t]) >= df.Strombedarf[t]:
df.WindkraftUeSch[t] = df.Windkraft[t]
df.PVDV[t] = df.Strombedarf[t] - df.Non_volatiles[t]
df.PVUeSch[t] = df.Photovoltaik[t] - df.PVDV[t]
else:
if df.RES[t] <= df.Strombedarf[t]:
df.WindkraftUeSch[t] = 0
df.PVUeSch[t] = 0
df.WindkraftDV[t] = df.Windkraft[t]
df.PVDV[t] = df.Photovoltaik[t]
else:
df.PVDV[t] = df.Photovoltaik[t]
df.WindkraftDV[t] = df.Strombedarf[t] - (df.PVDV[t] + df.Non_volatiles[t])
df.WindkraftUeSch[t] = df.Windkraft[t] - df.WindkraftDV[t]
if df.RES[t] > df.Strombedarf[t]:
df.WindkraftLast[t] = df.Strombedarf[t] - df.RES[t] + df.Windkraft[t]
return df
def Ueberschuesse_WINDfirst(df2):
df2["Non_volatiles"] = df2.Pumpspeicher + df2.Laufkraft
df2["Zero"] = 0
df2["Residual_ohne_Wind"] = df2.Strombedarf - df2.Non_volatiles
df2["Wind_useful"] = (df2[["Windkraft", "Residual_ohne_Wind"]]).min(axis=1).clip(0, None)
df2["WindkraftUeSch"] = 0 # Überschuss
df2["WindkraftDV"] = 0
df2["WindkraftLast"] = 0 # Direktverbrauch
df2["PVUeSch"] = 0 # Überschuss
df2["PVLast"] = 0 # Direktverbrauch
df2["PVDV"] = 0
for t in range(8760):
if (df2.Windkraft[t] + df2.Non_volatiles[t]) <= df2.Strombedarf[t]:
df2.WindkraftDV[t] = df2.Windkraft[t]
if (df2.Windkraft[t] + df2.Non_volatiles[t]) > df2.Strombedarf[t]:
if df2.Non_volatiles[t] > df2.Strombedarf[t]:
df2.WindkraftUeSch[t] = df2.Windkraft[t]
else:
df2.WindkraftUeSch[t] = df2.Windkraft[t] + df2.Non_volatiles[t] - df2.Strombedarf[t]
df2.WindkraftDV[t] = df2.Strombedarf[t] - df2.Non_volatiles[t]
if (df2.Windkraft[t] + df2.Non_volatiles[t]) >= df2.Strombedarf[t]:
df2.PVUeSch[t] = df2.Photovoltaik[t]
elif (df2.Windkraft[t] + df2.Non_volatiles[t]) < df2.Strombedarf[t]:
if df2.RES[t] < df2.Strombedarf[t]:
df2.PVUeSch[t] = 0
df2.PVDV[t] = df2.Photovoltaik[t]
else:
df2.PVDV[t] = df2.Strombedarf[t] - df2.Non_volatiles[t] - df2.WindkraftDV[t]
df2.PVUeSch[t] = df2.Photovoltaik[t] - df2.PVDV[t]
# if df.RES[t] > df.Strombedarf[t]:
# df.PVLast[t] = df.Strombedarf[t] - df.RES[t] + df.Photovoltaik[t]
# if df.RES[t] > df.Strombedarf[t]:
# df.WindkraftLast[t] = df.Strombedarf[t] - df.RES[t] + df.Windkraft[t]
df2["RESohnePV"] = df2.Laufkraft + df2.Windkraft + df2.Pumpspeicher
df2["Residual_ohne_PV"] = df2.Strombedarf - df2.Photovoltaik - df2.Non_volatiles
return df2
def maxnutz():
import FLUCCOplus.config as config
from pathlib import Path
df_nutz = pd.DataFrame()
df_nutz["Schaltsignal_REF"] = pd.read_csv(config.DATA_PROCESSED / Path("MANutz/Schaltsignal_REF.csv")).iloc[:, 1]
df_nutz["Schaltsignal_REG"] = pd.read_csv(config.DATA_PROCESSED / Path("MANutz/Schaltsignal_REG.csv")).iloc[:, 1]
df_nutz["Schaltsignal_UBA30"] = pd.read_csv(config.DATA_PROCESSED / Path("MANutz/Schaltsignal_uba30.csv")).iloc[:,
1]
df_nutz["Schaltsignal_UBA50"] = pd.read_csv(config.DATA_PROCESSED / Path("MANutz/Schaltsignal_uba50.csv")).iloc[:,
1]
df_nutz["Schaltsignal_VEIGL30"] = pd.read_csv(config.DATA_PROCESSED / Path("MANutz/Schaltsignal_veigl30.csv")).iloc[
:, 1]
df_nutz["Schaltsignal_VEIGL50"] = pd.read_csv(config.DATA_PROCESSED / Path("MANutz/Schaltsignal_veigl50.csv")).iloc[
:, 1]
df_nutz = df_nutz.replace(1, -1)
df_nutz = df_nutz.replace(0, 1).replace(-1, 0)
return df_nutz.to_csv("../data/processed/MANutz/maxnutz_normalized.csv", sep=";", decimal=",")
if __name__ == "__main__":
@log
def test():
pass
test()
| [
"matplotlib.pyplot.grid",
"numpy.arange",
"pathlib.Path",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"time.time",
"matplotlib.pyplot.subplots",
"FLUCCOplus.config.logging.getLogger"
] | [((1010, 1048), 'FLUCCOplus.config.logging.getLogger', 'config.logging.getLogger', (['f.__module__'], {}), '(f.__module__)\n', (1034, 1048), True, 'import FLUCCOplus.config as config\n'), ((1309, 1347), 'FLUCCOplus.config.logging.getLogger', 'config.logging.getLogger', (['f.__module__'], {}), '(f.__module__)\n', (1333, 1347), True, 'import FLUCCOplus.config as config\n'), ((1760, 1774), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1772, 1774), True, 'import pandas as pd\n'), ((1794, 1808), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1806, 1808), True, 'import pandas as pd\n'), ((1827, 1841), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1839, 1841), True, 'import pandas as pd\n'), ((2620, 2655), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': 'figsize'}), '(1, 2, figsize=figsize)\n', (2632, 2655), True, 'import matplotlib.pyplot as plt\n'), ((3793, 3811), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""x"""'}), "(axis='x')\n", (3801, 3811), True, 'import matplotlib.pyplot as plt\n'), ((7328, 7342), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7340, 7342), True, 'import pandas as pd\n'), ((3766, 3788), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'top': 'cut_ylim'}), '(top=cut_ylim)\n', (3774, 3788), True, 'import matplotlib.pyplot as plt\n'), ((1097, 1108), 'time.time', 'time.time', ([], {}), '()\n', (1106, 1108), False, 'import time\n'), ((1164, 1175), 'time.time', 'time.time', ([], {}), '()\n', (1173, 1175), False, 'import time\n'), ((3672, 3707), 'numpy.arange', 'np.arange', (['(0)', 'ytick_average_max', '(24)'], {}), '(0, ytick_average_max, 24)\n', (3681, 3707), True, 'import numpy as np\n'), ((7413, 7448), 'pathlib.Path', 'Path', (['"""MANutz/Schaltsignal_REF.csv"""'], {}), "('MANutz/Schaltsignal_REF.csv')\n", (7417, 7448), False, 'from pathlib import Path\n'), ((7531, 7566), 'pathlib.Path', 'Path', (['"""MANutz/Schaltsignal_REG.csv"""'], {}), "('MANutz/Schaltsignal_REG.csv')\n", (7535, 7566), False, 'from pathlib import Path\n'), ((7651, 7688), 'pathlib.Path', 'Path', (['"""MANutz/Schaltsignal_uba30.csv"""'], {}), "('MANutz/Schaltsignal_uba30.csv')\n", (7655, 7688), False, 'from pathlib import Path\n'), ((7808, 7845), 'pathlib.Path', 'Path', (['"""MANutz/Schaltsignal_uba50.csv"""'], {}), "('MANutz/Schaltsignal_uba50.csv')\n", (7812, 7845), False, 'from pathlib import Path\n'), ((7967, 8006), 'pathlib.Path', 'Path', (['"""MANutz/Schaltsignal_veigl30.csv"""'], {}), "('MANutz/Schaltsignal_veigl30.csv')\n", (7971, 8006), False, 'from pathlib import Path\n'), ((8131, 8170), 'pathlib.Path', 'Path', (['"""MANutz/Schaltsignal_veigl50.csv"""'], {}), "('MANutz/Schaltsignal_veigl50.csv')\n", (8135, 8170), False, 'from pathlib import Path\n')] |
import os
import pytest
import math
from astropy.io import fits
from astropy.table import Table
import numpy as np
import stwcs
from stwcs import updatewcs
from stsci.tools import fileutil
from ci_watson.artifactory_helpers import get_bigdata_root
from ci_watson.hst_helpers import raw_from_asn, ref_from_image, download_crds
try:
from ci_watson.artifactory_helpers import check_url
except ImportError:
from ci_watson.artifactory_helpers import _is_url as check_url
from .base_classes import BaseTest
__all__ = ['BaseHLATest', 'BaseHLAParTest', 'centroid_compare', 'BaseUnit']
@pytest.mark.usefixtures('_jail')
class BaseHLATest(BaseTest):
ignore_hdus = []
input_repo = 'hst-hla-pipeline'
results_root = 'hst-hla-pipeline-results'
output_shift_file = None
fit_limit = 0.010 # 10 milli-arcseconds
docopy = False # Do not make additional copy by default
rtol = 1e-6
refstr = 'jref'
prevref = os.environ.get(refstr)
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
reffile_lookup = ['IDCTAB', 'OFFTAB', 'NPOLFILE', 'D2IMFILE', 'DGEOFILE']
def set_environ(self):
# Enforce copies of data when TEST_BIGDATA is URL
input_dir = get_bigdata_root()
if input_dir and check_url(input_dir):
self.docopy = True
# NOTE: This could be explicitly controlled using pytest fixture
# but too many ways to do the same thing would be confusing.
# Refine this logic if using pytest fixture.
# HSTCAL cannot open remote CRDS on FTP but central storage is okay.
# So use central storage if available to avoid FTP.
if self.prevref is None or self.prevref.startswith(('ftp', 'http')):
os.environ[self.refstr] = self.curdir + os.sep
self.use_ftp_crds = True
# Turn off Astrometry updates
os.environ['ASTROMETRY_STEP_CONTROL'] = 'OFF'
def raw_from_asn(self, asn_file, suffix='_flt.fits'):
return raw_from_asn(asn_file, suffix='_flt.fits')
def get_input_file(self, *args, refsep='$', **kwargs):
# If user has specified action for docopy, apply it with
# default behavior being whatever was defined in the base class.
docopy = kwargs.get('docopy', self.docopy)
# Download or copy input file (e.g., RAW) into the working directory.
# The associated CRDS reference files in ``refstr`` are also
# downloaded, if necessary.
curdir = os.getcwd()
filenames = self.get_data(*args, docopy=docopy)
for filename in filenames:
ref_files = ref_from_image(filename, reffile_lookup=self.reffile_lookup)
print("Looking for {} REF_FILES: {}".format(filename, ref_files))
for ref_file in ref_files:
if ref_file.strip() == '':
continue
if refsep not in ref_file: # Local file
self.get_data('customRef', ref_file, docopy=docopy)
else:
# Start by checking to see whether IRAF variable *ref/*tab
# has been added to os.environ
refdir, refname = ref_file.split(refsep)
refdir_parent = os.path.split(refdir)[0]
# Define refdir to point to current directory if:
# i. refdir is not defined in environment already
# ii. refdir in os.environ points to another test directory
# This logic should leave refdir unchanged if it already
# points to a globally defined directory.
if refdir not in os.environ or refdir_parent in curdir:
os.environ[refdir] = curdir + os.sep
# Download from FTP, if applicable
if self.use_ftp_crds:
download_crds(ref_file, timeout=self.timeout)
return filenames
# Pytest function to support the parameterization of these classes
def pytest_generate_tests(metafunc):
# called once per each test function
funcarglist = metafunc.cls.params[metafunc.function.__name__]
argnames = sorted(funcarglist[0])
idlist = [funcargs['id'] for funcargs in funcarglist]
del argnames[argnames.index('id')]
metafunc.parametrize(argnames, [[funcargs[name] for name in argnames]
for funcargs in funcarglist], ids=idlist)
@pytest.mark.usefixtures('_jail')
class BaseHLAParTest(BaseHLATest):
params = {'test_modes':[dict(input="",
test_dir=None,
step_class=None,
step_pars=dict(),
output_truth="",
output_hdus=[])
]
}
def test_modes(self, input, test_dir, step_class, step_pars,
output_truth, output_hdus):
"""
Template method for parameterizing some tests based on JWST code.
"""
if test_dir is None:
return
self.test_dir = test_dir
self.ref_loc = [self.test_dir, 'truth']
# can be removed once all truth files have been updated
self.ignore_keywords += ['FILENAME']
input_file = self.get_data(self.test_dir, input)
result = step_class.call(input_file, **step_pars)
output_file = result.meta.filename
result.save(output_file)
result.close()
output_pars = None
if isinstance(output_truth, tuple):
output_pars = output_truth[1]
output_truth = output_truth[0]
if not output_pars:
if output_hdus:
output_spec = (output_file, output_truth, output_hdus)
else:
output_spec = (output_file, output_truth)
else:
output_spec = {'files':(output_file, output_truth),
'pars':output_pars}
outputs = [output_spec]
self.compare_outputs(outputs)
def centroid_compare(centroid):
return centroid[1]
class BaseUnit(BaseHLATest):
buff = 0
refstr = 'jref'
prevref = os.environ.get(refstr)
input_loc = 'acs'
ref_loc = 'acs'
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
atol = 1.0e-5
def bound_image(self, image):
"""
Compute region where image is non-zero
"""
coords = np.nonzero(image)
ymin = coords[0].min()
ymax = coords[0].max()
xmin = coords[1].min()
xmax = coords[1].max()
return (ymin, ymax, xmin, xmax)
def centroid(self, image, size, center):
"""
Compute the centroid of a rectangular area
"""
ylo = int(center[0]) - size // 2
yhi = min(ylo + size, image.shape[0])
xlo = int(center[1]) - size // 2
xhi = min(xlo + size, image.shape[1])
center = [0.0, 0.0, 0.0]
for y in range(ylo, yhi):
for x in range(xlo, xhi):
center[0] += y * image[y,x]
center[1] += x * image[y,x]
center[2] += image[y,x]
if center[2] == 0.0: return None
center[0] /= center[2]
center[1] /= center[2]
return center
def centroid_close(self, list_of_centroids, size, point):
"""
Find if any centroid is close to a point
"""
for i in range(len(list_of_centroids)-1, -1, -1):
if (abs(list_of_centroids[i][0] - point[0]) < size / 2 and
abs(list_of_centroids[i][1] - point[1]) < size / 2):
return 1
return 0
def centroid_distances(self, image1, image2, amp, size):
"""
Compute a list of centroids and the distances between them in two images
"""
distances = []
list_of_centroids, lst_pts = self.centroid_list(image2, amp, size)
for center2, pt in zip(list_of_centroids, lst_pts):
center1 = self.centroid(image1, size, pt)
if center1 is None: continue
disty = center2[0] - center1[0]
distx = center2[1] - center1[1]
dist = math.sqrt(disty * disty + distx * distx)
dflux = abs(center2[2] - center1[2])
distances.append([dist, dflux, center1, center2])
distances.sort(key=centroid_compare)
return distances
def centroid_list(self, image, amp, size):
"""
Find the next centroid
"""
list_of_centroids = []
list_of_points = []
points = np.transpose(np.nonzero(image > amp))
for point in points:
if not self.centroid_close(list_of_centroids, size, point):
center = self.centroid(image, size, point)
list_of_centroids.append(center)
list_of_points.append(point)
return list_of_centroids, list_of_points
def centroid_statistics(self, title, fname, image1, image2, amp, size):
"""
write centroid statistics to compare differences btw two images
"""
stats = ("minimum", "median", "maximum")
images = (None, None, image1, image2)
im_type = ("", "", "test", "reference")
diff = []
distances = self.centroid_distances(image1, image2, amp, size)
indexes = (0, len(distances)//2, len(distances)-1)
fd = open(fname, 'w')
fd.write("*** %s ***\n" % title)
if len(distances) == 0:
diff = [0.0, 0.0, 0.0]
fd.write("No matches!!\n")
elif len(distances) == 1:
diff = [distances[0][0], distances[0][0], distances[0][0]]
fd.write("1 match\n")
fd.write("distance = %f flux difference = %f\n" % (distances[0][0], distances[0][1]))
for j in range(2, 4):
ylo = int(distances[0][j][0]) - (1+self.buff)
yhi = int(distances[0][j][0]) + (2+self.buff)
xlo = int(distances[0][j][1]) - (1+self.buff)
xhi = int(distances[0][j][1]) + (2+self.buff)
subimage = images[j][ylo:yhi,xlo:xhi]
fd.write("\n%s image centroid = (%f,%f) image flux = %f\n" %
(im_type[j], distances[0][j][0], distances[0][j][1], distances[0][j][2]))
fd.write(str(subimage) + "\n")
else:
fd.write("%d matches\n" % len(distances))
for k in range(0,3):
i = indexes[k]
diff.append(distances[i][0])
fd.write("\n%s distance = %f flux difference = %f\n" % (stats[k], distances[i][0], distances[i][1]))
for j in range(2, 4):
ylo = int(distances[i][j][0]) - (1+self.buff)
yhi = int(distances[i][j][0]) + (2+self.buff)
xlo = int(distances[i][j][1]) - (1+self.buff)
xhi = int(distances[i][j][1]) + (2+self.buff)
subimage = images[j][ylo:yhi,xlo:xhi]
fd.write("\n%s %s image centroid = (%f,%f) image flux = %f\n" %
(stats[k], im_type[j], distances[i][j][0], distances[i][j][1], distances[i][j][2]))
fd.write(str(subimage) + "\n")
fd.close()
return tuple(diff)
def make_point_image(self, input_image, point, value):
"""
Create an image with a single point set
"""
output_image = np.zeros(input_image.shape, dtype=input_image.dtype)
output_image[point] = value
return output_image
def make_grid_image(self, input_image, spacing, value):
"""
Create an image with points on a grid set
"""
output_image = np.zeros(input_image.shape, dtype=input_image.dtype)
shape = output_image.shape
for y in range(spacing//2, shape[0], spacing):
for x in range(spacing//2, shape[1], spacing):
output_image[y,x] = value
return output_image
def print_wcs(self, title, wcs):
"""
Print the wcs header cards
"""
print("=== %s ===" % title)
print(wcs.to_header_string())
def read_image(self, filename):
"""
Read the image from a fits file
"""
hdu = fits.open(filename)
image = hdu[1].data
hdu.close()
return image
def read_wcs(self, filename):
"""
Read the wcs of a fits file
"""
hdu = fits.open(filename)
wcs = stwcs.wcsutil.HSTWCS(hdu, 1)
hdu.close()
return wcs
def write_wcs(self, hdu, image_wcs):
"""
Update header with WCS keywords
"""
hdu.header['ORIENTAT'] = image_wcs.orientat
hdu.header['CD1_1'] = image_wcs.wcs.cd[0][0]
hdu.header['CD1_2'] = image_wcs.wcs.cd[0][1]
hdu.header['CD2_1'] = image_wcs.wcs.cd[1][0]
hdu.header['CD2_2'] = image_wcs.wcs.cd[1][1]
hdu.header['CRVAL1'] = image_wcs.wcs.crval[0]
hdu.header['CRVAL2'] = image_wcs.wcs.crval[1]
hdu.header['CRPIX1'] = image_wcs.wcs.crpix[0]
hdu.header['CRPIX2'] = image_wcs.wcs.crpix[1]
hdu.header['CTYPE1'] = image_wcs.wcs.ctype[0]
hdu.header['CTYPE2'] = image_wcs.wcs.ctype[1]
hdu.header['VAFACTOR'] = 1.0
def write_image(self, filename, wcs, *args):
"""
Read the image from a fits file
"""
extarray = ['SCI', 'WHT', 'CTX']
pimg = fits.HDUList()
phdu = fits.PrimaryHDU()
phdu.header['NDRIZIM'] = 1
phdu.header['ROOTNAME'] = filename
pimg.append(phdu)
for img in args:
# Create a MEF file with the specified extname
extn = extarray.pop(0)
extname = fileutil.parseExtn(extn)
ehdu = fits.ImageHDU(data=img)
ehdu.header['EXTNAME'] = extname[0]
ehdu.header['EXTVER'] = extname[1]
self.write_wcs(ehdu, wcs)
pimg.append(ehdu)
pimg.writeto(filename)
del pimg
| [
"ci_watson.artifactory_helpers._is_url",
"astropy.io.fits.HDUList",
"astropy.io.fits.PrimaryHDU",
"astropy.io.fits.ImageHDU",
"os.environ.get",
"ci_watson.artifactory_helpers.get_bigdata_root",
"math.sqrt",
"os.getcwd",
"stwcs.wcsutil.HSTWCS",
"os.path.split",
"numpy.zeros",
"ci_watson.hst_hel... | [((592, 624), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""_jail"""'], {}), "('_jail')\n", (615, 624), False, 'import pytest\n'), ((4585, 4617), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""_jail"""'], {}), "('_jail')\n", (4608, 4617), False, 'import pytest\n'), ((947, 969), 'os.environ.get', 'os.environ.get', (['refstr'], {}), '(refstr)\n', (961, 969), False, 'import os\n'), ((6348, 6370), 'os.environ.get', 'os.environ.get', (['refstr'], {}), '(refstr)\n', (6362, 6370), False, 'import os\n'), ((1360, 1378), 'ci_watson.artifactory_helpers.get_bigdata_root', 'get_bigdata_root', ([], {}), '()\n', (1376, 1378), False, 'from ci_watson.artifactory_helpers import get_bigdata_root\n'), ((2143, 2185), 'ci_watson.hst_helpers.raw_from_asn', 'raw_from_asn', (['asn_file'], {'suffix': '"""_flt.fits"""'}), "(asn_file, suffix='_flt.fits')\n", (2155, 2185), False, 'from ci_watson.hst_helpers import raw_from_asn, ref_from_image, download_crds\n'), ((2634, 2645), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2643, 2645), False, 'import os\n'), ((6758, 6775), 'numpy.nonzero', 'np.nonzero', (['image'], {}), '(image)\n', (6768, 6775), True, 'import numpy as np\n'), ((11796, 11848), 'numpy.zeros', 'np.zeros', (['input_image.shape'], {'dtype': 'input_image.dtype'}), '(input_image.shape, dtype=input_image.dtype)\n', (11804, 11848), True, 'import numpy as np\n'), ((12071, 12123), 'numpy.zeros', 'np.zeros', (['input_image.shape'], {'dtype': 'input_image.dtype'}), '(input_image.shape, dtype=input_image.dtype)\n', (12079, 12123), True, 'import numpy as np\n'), ((12632, 12651), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (12641, 12651), False, 'from astropy.io import fits\n'), ((12831, 12850), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (12840, 12850), False, 'from astropy.io import fits\n'), ((12866, 12894), 'stwcs.wcsutil.HSTWCS', 'stwcs.wcsutil.HSTWCS', (['hdu', '(1)'], {}), '(hdu, 1)\n', (12886, 12894), False, 'import stwcs\n'), ((13836, 13850), 'astropy.io.fits.HDUList', 'fits.HDUList', ([], {}), '()\n', (13848, 13850), False, 'from astropy.io import fits\n'), ((13866, 13883), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (13881, 13883), False, 'from astropy.io import fits\n'), ((1405, 1425), 'ci_watson.artifactory_helpers._is_url', 'check_url', (['input_dir'], {}), '(input_dir)\n', (1414, 1425), True, 'from ci_watson.artifactory_helpers import _is_url as check_url\n'), ((2761, 2821), 'ci_watson.hst_helpers.ref_from_image', 'ref_from_image', (['filename'], {'reffile_lookup': 'self.reffile_lookup'}), '(filename, reffile_lookup=self.reffile_lookup)\n', (2775, 2821), False, 'from ci_watson.hst_helpers import raw_from_asn, ref_from_image, download_crds\n'), ((8502, 8542), 'math.sqrt', 'math.sqrt', (['(disty * disty + distx * distx)'], {}), '(disty * disty + distx * distx)\n', (8511, 8542), False, 'import math\n'), ((8917, 8940), 'numpy.nonzero', 'np.nonzero', (['(image > amp)'], {}), '(image > amp)\n', (8927, 8940), True, 'import numpy as np\n'), ((14130, 14154), 'stsci.tools.fileutil.parseExtn', 'fileutil.parseExtn', (['extn'], {}), '(extn)\n', (14148, 14154), False, 'from stsci.tools import fileutil\n'), ((14175, 14198), 'astropy.io.fits.ImageHDU', 'fits.ImageHDU', ([], {'data': 'img'}), '(data=img)\n', (14188, 14198), False, 'from astropy.io import fits\n'), ((3390, 3411), 'os.path.split', 'os.path.split', (['refdir'], {}), '(refdir)\n', (3403, 3411), False, 'import os\n'), ((4036, 4081), 'ci_watson.hst_helpers.download_crds', 'download_crds', (['ref_file'], {'timeout': 'self.timeout'}), '(ref_file, timeout=self.timeout)\n', (4049, 4081), False, 'from ci_watson.hst_helpers import raw_from_asn, ref_from_image, download_crds\n')] |
# -*- coding: utf-8 -*-
"""baseline.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ELj28VKxQe8E1h-VnIbU6HMb9ezVIUaB
"""
import numpy as np
import sklearn
from statistics import mean
from sklearn import metrics
from sklearn import model_selection
from sklearn.preprocessing import OneHotEncoder #ONE HOT ENCODING
from sklearn.ensemble import RandomForestClassifier #Build model - Random Forest Classifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
class BaselineClasifier():
"""
Baseline classifier that predicts the class base on the mode of the labels.
"""
def __init__(self, np):
self.central_tendency = None
self.np = np
def fit(self, data, y, central_t='mode'):
# Count labels and find the most frequent one
label, counts = self.np.unique(y, return_counts=True)
if central_t == 'mode':
self.central_tendency = label[counts.argmax()]
elif central_t == 'mean':
self.central_tendency = round(self.np.sum(y)/len(y))
# Return an array with size equal to the data size and each element setted to the mode.
return self
def predict(self, data):
result = self.np.full(data.shape[0], self.central_tendency)
return result
def run_clasifier(X_train, y_train, X_test, numpy, class_type='mode'):
baseline_clasifier = BaselineClasifier(numpy)
classifier = baseline_clasifier.fit(X_train, y_train, class_type)
return baseline_clasifier.predict(X_test)
def compute_accuracy(validation, prediction):
comp = prediction == validation
match_counts = np.count_nonzero(comp == True)
clasifier_accuracy = match_counts/len(validation)
return clasifier_accuracy
def compute_AUC(y, prediction):
auc = None
try:
auc = roc_auc_score(y, prediction)
except ValueError:
pass
return auc
def Acceptance_baseline(X,y):
"""
Baseline classifier function used for prdicting acceptance/rejection
"""
accuracies = []
AUCs = []
kf = sklearn.model_selection.KFold(n_splits=4, random_state=1, shuffle=True)# Testing with K-folds
for train_idx, test_idx in kf.split(X):
X_train, X_test, y_train, y_test = X[train_idx], X[test_idx], y[train_idx], y[test_idx]
prediction = run_clasifier(X_train, y_train, X_test, np)
fold_accuracy = compute_accuracy(y_test, prediction)
fold_AUC = compute_AUC(y_test, prediction)
accuracies.append(fold_accuracy)
if fold_AUC != None: AUCs.append(fold_AUC)
baseline_clasifier_accuracy = mean(accuracies)
print('Baseline accuracy (K-fold): ', baseline_clasifier_accuracy)
print('AUC K-fold: ', mean(AUCs))
return baseline_clasifier_accuracy,mean(AUCs)
def baseline_wage(X,y):
"""
Baseline classifier function used for predicting wage rate
"""
accuracies = []
AUCs = []
kf = sklearn.model_selection.KFold(n_splits=4, random_state=1, shuffle=True)# Testing with K-folds
for train_idx, test_idx in kf.split(X):
X_train, X_test, y_train, y_test = X[train_idx], X[test_idx], y[train_idx], y[test_idx]
prediction = run_clasifier(X_train, y_train, X_test, np)
fold_accuracy = compute_accuracy(y_test, prediction)
fold_AUC = compute_AUC(y_test, prediction)
accuracies.append(fold_accuracy)
if fold_AUC != None: AUCs.append(fold_AUC)
baseline_clasifier_accuracy = mean(accuracies)
return baseline_clasifier_accuracy
def baseline_wage_kfold(X,y):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)# Testing with regular split
prediction = run_clasifier(X_train, y_train, X_test, np)
split_accuracy = compute_accuracy(y_test, prediction)
return split_accuracy
| [
"statistics.mean",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.roc_auc_score",
"numpy.count_nonzero",
"sklearn.model_selection.KFold"
] | [((1793, 1823), 'numpy.count_nonzero', 'np.count_nonzero', (['(comp == True)'], {}), '(comp == True)\n', (1809, 1823), True, 'import numpy as np\n'), ((2243, 2314), 'sklearn.model_selection.KFold', 'sklearn.model_selection.KFold', ([], {'n_splits': '(4)', 'random_state': '(1)', 'shuffle': '(True)'}), '(n_splits=4, random_state=1, shuffle=True)\n', (2272, 2314), False, 'import sklearn\n'), ((2790, 2806), 'statistics.mean', 'mean', (['accuracies'], {}), '(accuracies)\n', (2794, 2806), False, 'from statistics import mean\n'), ((3125, 3196), 'sklearn.model_selection.KFold', 'sklearn.model_selection.KFold', ([], {'n_splits': '(4)', 'random_state': '(1)', 'shuffle': '(True)'}), '(n_splits=4, random_state=1, shuffle=True)\n', (3154, 3196), False, 'import sklearn\n'), ((3672, 3688), 'statistics.mean', 'mean', (['accuracies'], {}), '(accuracies)\n', (3676, 3688), False, 'from statistics import mean\n'), ((3802, 3839), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (3818, 3839), False, 'from sklearn.model_selection import train_test_split\n'), ((1989, 2017), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y', 'prediction'], {}), '(y, prediction)\n', (2002, 2017), False, 'from sklearn.metrics import roc_auc_score\n'), ((2906, 2916), 'statistics.mean', 'mean', (['AUCs'], {}), '(AUCs)\n', (2910, 2916), False, 'from statistics import mean\n'), ((2958, 2968), 'statistics.mean', 'mean', (['AUCs'], {}), '(AUCs)\n', (2962, 2968), False, 'from statistics import mean\n')] |
import numpy as np
np.random.seed(10)
import tensorflow as tf
tf.random.set_seed(10)
tf.keras.backend.set_floatx('float64')
import matplotlib.pyplot as plt
from tensorflow.keras import Model
from tensorflow.keras import optimizers, models, regularizers
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.models import load_model, Sequential, Model
from tensorflow.keras.regularizers import l1
from CAE_Layers import Encoder_Block, Decoder_Block, Latent_Block
from sklearn.metrics import r2_score
def coeff_determination(y_pred, y_true): #Order of function inputs is important here
SS_res = K.sum(K.square( y_true-y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
class CAE_Model(Model):
def __init__(self,data,num_latent,npca=False):
super(CAE_Model, self).__init__()
# Set up data for CAE
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
# Reshape for scaling
data = data.reshape(1000,-1)
self.num_latent = num_latent
self.preproc = Pipeline([('stdscaler', StandardScaler())])
self.ntrain = 700
self.nvalid = 200
self.ntest = 100
self.swe_train = self.preproc.fit_transform(data[:self.ntrain])
self.swe_valid = self.preproc.transform(data[self.ntrain:self.ntrain+self.nvalid])
self.swe_test = self.preproc.transform(data[self.ntrain+self.nvalid:])
self.swe_train = self.swe_train.reshape(self.ntrain,64,64,3)
self.swe_valid = self.swe_valid.reshape(self.nvalid,64,64,3)
self.swe_test = self.swe_test.reshape(self.ntest,64,64,3)
# Shuffle - to preserve the order of the initial dataset
self.swe_train_shuffled = np.copy(self.swe_train)
self.swe_valid_shuffled = np.copy(self.swe_valid)
np.random.shuffle(self.swe_train_shuffled)
np.random.shuffle(self.swe_valid_shuffled)
# Define architecture
self.b1 = Encoder_Block()
self.b2 = Latent_Block(self.num_latent)
self.b3 = Decoder_Block()
self.train_op = tf.keras.optimizers.Adam(learning_rate=0.001)
# Hierarchical PCA
self.npca = npca
# Running the model
def call(self,X):
if self.npca:
h1 = self.b1(X)
h2 = self.b2(h1)
out_list = []
for i in range(1,self.num_latent):
h2_temp = h2.numpy()
h2_temp[:,i:] = 0.0
out_list.append(self.b3(tf.Variable(h2_temp)))
out_list.append(self.b3(h2))
return out_list
else:
h1 = self.b1(X)
h2 = self.b2(h1)
out = self.b3(h2)
return out
# Regular MSE
def get_loss(self,X,Y):
if self.npca:
op_list = self.call(X)
loss_val = tf.reduce_mean(tf.math.square(op_list[0]-Y))
for i in range(1,self.num_latent):
loss_val = loss_val + tf.reduce_mean(tf.math.square(op_list[i]-Y))
return loss_val
else:
op=self.call(X)
return tf.reduce_mean(tf.math.square(op-Y))
# get gradients - regular
def get_grad(self,X,Y):
with tf.GradientTape() as tape:
tape.watch(self.trainable_variables)
L = self.get_loss(X,Y)
g = tape.gradient(L, self.trainable_variables)
return g
# perform gradient descent - regular
def network_learn(self,X,Y):
g = self.get_grad(X,Y)
self.train_op.apply_gradients(zip(g, self.trainable_variables))
# Train the model
def train_model(self):
plot_iter = 0
stop_iter = 0
patience = 10
best_valid_loss = 999999.0 # Some large number
self.num_batches = 50
self.train_batch_size = int(self.ntrain/self.num_batches)
self.valid_batch_size = int((self.nvalid)/self.num_batches)
for i in range(100):
# Training loss
print('Training iteration:',i)
for batch in range(self.num_batches):
input_batch = self.swe_train[batch*self.train_batch_size:(batch+1)*self.train_batch_size]
output_batch = self.swe_train[batch*self.train_batch_size:(batch+1)*self.train_batch_size]
self.network_learn(input_batch,output_batch)
# Validation loss
valid_loss = 0.0
valid_r2 = 0.0
for batch in range(self.num_batches):
input_batch = self.swe_valid[batch*self.valid_batch_size:(batch+1)*self.valid_batch_size]
output_batch = self.swe_valid[batch*self.valid_batch_size:(batch+1)*self.valid_batch_size]
valid_loss = valid_loss + self.get_loss(input_batch,output_batch).numpy()
if self.npca:
predictions = self.call(self.swe_valid)[-1].numpy()
else:
predictions = self.call(self.swe_valid).numpy()
valid_r2 = valid_r2 + r2_score(predictions.reshape(self.valid_batch_size,-1),self.swe_valid.reshape(self.valid_batch_size,-1))
valid_r2 = valid_r2/(batch+1)
# Check early stopping criteria
if valid_loss < best_valid_loss:
print('Improved validation loss from:',best_valid_loss,' to:', valid_loss)
print('Validation R2:',valid_r2)
best_valid_loss = valid_loss
if self.npca:
self.save_weights('./npca_checkpoints/my_checkpoint')
else:
self.save_weights('./cae_checkpoints/my_checkpoint')
stop_iter = 0
else:
print('Validation loss (no improvement):',valid_loss)
print('Validation R2:',valid_r2)
stop_iter = stop_iter + 1
if stop_iter == patience:
break
# Check accuracy on test
if self.npca:
predictions = self.call(self.swe_test)[-1].numpy()
else:
predictions = self.call(self.swe_test).numpy()
print('Test loss:',self.get_loss(self.swe_test,self.swe_test).numpy())
r2 = r2_score(predictions.reshape(self.valid_batch_size,-1),self.swe_test.reshape(self.valid_batch_size,-1))
print('Test R2:',r2)
r2_iter = 0
# Load weights
def restore_model(self):
if self.npca:
self.load_weights('./npca_checkpoints/my_checkpoint') # Load pretrained model
else:
self.load_weights('./cae_checkpoints/my_checkpoint') # Load pretrained model
# Do some testing
def model_inference(self):
# Restore from checkpoint
self.restore_model()
# Reconstruct some test images
if self.npca:
for i in range(10):
predicted_list = self.call(self.swe_test[i:i+1])
# Rescale
for j in range(self.num_latent):
predicted_list[j] = predicted_list[j].numpy()
predicted_list[j] = self.preproc.inverse_transform(predicted_list[j].reshape(1,-1)).reshape(1,64,64,3)
true = self.preproc.inverse_transform(self.swe_test[i:i+1].reshape(1,-1)).reshape(1,64,64,3)
self.plot_npca_comparison(true,predicted_list)
else:
for i in range(10):
predicted = self.call(self.swe_test[i:i+1]).numpy()
# Rescale
predicted = self.preproc.inverse_transform(predicted.reshape(1,-1)).reshape(1,64,64,3)
true = self.preproc.inverse_transform(self.swe_test[i:i+1].reshape(1,-1)).reshape(1,64,64,3)
self.plot_standard_comparison(true,predicted)
def plot_standard_comparison(self,true,predicted):
fig, ax = plt.subplots(nrows=3,ncols=2,figsize=(6,8))
cs1 = ax[0,0].imshow(true[0,:,:,0],label='input')
cs2 = ax[0,1].imshow(predicted[0,:,:,0],label='decoded')
fig.colorbar(cs1,ax=ax[0,0],fraction=0.046, pad=0.04)
fig.colorbar(cs2,ax=ax[0,1],fraction=0.046, pad=0.04)
ax[0,0].set_title(r'True $q_1$')
ax[0,1].set_title(r'Reconstructed $q_1$')
cs1 = ax[1,0].imshow(true[0,:,:,1],label='input')
cs2 = ax[1,1].imshow(predicted[0,:,:,1],label='decoded')
fig.colorbar(cs1,ax=ax[1,0],fraction=0.046, pad=0.04)
fig.colorbar(cs2,ax=ax[1,1],fraction=0.046, pad=0.04)
ax[1,0].set_title(r'True $q_2$')
ax[1,1].set_title(r'Reconstructed $q_2$')
cs1 = ax[2,0].imshow(true[0,:,:,2],label='input')
cs2 = ax[2,1].imshow(predicted[0,:,:,2],label='decoded')
fig.colorbar(cs1,ax=ax[2,0],fraction=0.046, pad=0.04)
fig.colorbar(cs2,ax=ax[2,1],fraction=0.046, pad=0.04)
ax[2,0].set_title(r'True $q_3$')
ax[2,1].set_title(r'Reconstructed $q_3$')
for i in range(2):
for j in range(2):
ax[i,j].set_xlabel('x')
ax[i,j].set_ylabel('y')
plt.subplots_adjust(wspace=0.5,hspace=-0.3)
plt.tight_layout()
plt.show()
def plot_npca_comparison(self,true,predicted_list):
for i in range(self.num_latent):
predicted = predicted_list[i]
fig, ax = plt.subplots(nrows=3,ncols=2,figsize=(6,8))
cs1 = ax[0,0].imshow(true[0,:,:,0],label='input')
cs2 = ax[0,1].imshow(predicted[0,:,:,0],label='decoded')
fig.colorbar(cs1,ax=ax[0,0],fraction=0.046, pad=0.04)
fig.colorbar(cs2,ax=ax[0,1],fraction=0.046, pad=0.04)
ax[0,0].set_title(r'True $q_1$')
ax[0,1].set_title(r'Reconstructed $q_1$')
cs1 = ax[1,0].imshow(true[0,:,:,1],label='input')
cs2 = ax[1,1].imshow(predicted[0,:,:,1],label='decoded')
fig.colorbar(cs1,ax=ax[1,0],fraction=0.046, pad=0.04)
fig.colorbar(cs2,ax=ax[1,1],fraction=0.046, pad=0.04)
ax[1,0].set_title(r'True $q_2$')
ax[1,1].set_title(r'Reconstructed $q_2$')
cs1 = ax[2,0].imshow(true[0,:,:,2],label='input')
cs2 = ax[2,1].imshow(predicted[0,:,:,2],label='decoded')
fig.colorbar(cs1,ax=ax[2,0],fraction=0.046, pad=0.04)
fig.colorbar(cs2,ax=ax[2,1],fraction=0.046, pad=0.04)
ax[2,0].set_title(r'True $q_3$')
ax[2,1].set_title(r'Reconstructed $q_3$')
for i in range(2):
for j in range(2):
ax[i,j].set_xlabel('x')
ax[i,j].set_ylabel('y')
plt.subplots_adjust(wspace=0.5,hspace=-0.3)
plt.tight_layout()
plt.show()
if __name__ == '__main__':
print('CAE Model defined in this module') | [
"tensorflow.keras.backend.epsilon",
"tensorflow.GradientTape",
"CAE_Layers.Encoder_Block",
"tensorflow.keras.backend.mean",
"CAE_Layers.Decoder_Block",
"numpy.random.seed",
"tensorflow.keras.backend.square",
"tensorflow.Variable",
"tensorflow.keras.backend.set_floatx",
"CAE_Layers.Latent_Block",
... | [((19, 37), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (33, 37), True, 'import numpy as np\n'), ((62, 84), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(10)'], {}), '(10)\n', (80, 84), True, 'import tensorflow as tf\n'), ((85, 123), 'tensorflow.keras.backend.set_floatx', 'tf.keras.backend.set_floatx', (['"""float64"""'], {}), "('float64')\n", (112, 123), True, 'import tensorflow as tf\n'), ((695, 720), 'tensorflow.keras.backend.square', 'K.square', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (703, 720), True, 'from tensorflow.keras import backend as K\n'), ((1880, 1903), 'numpy.copy', 'np.copy', (['self.swe_train'], {}), '(self.swe_train)\n', (1887, 1903), True, 'import numpy as np\n'), ((1938, 1961), 'numpy.copy', 'np.copy', (['self.swe_valid'], {}), '(self.swe_valid)\n', (1945, 1961), True, 'import numpy as np\n'), ((1971, 2013), 'numpy.random.shuffle', 'np.random.shuffle', (['self.swe_train_shuffled'], {}), '(self.swe_train_shuffled)\n', (1988, 2013), True, 'import numpy as np\n'), ((2022, 2064), 'numpy.random.shuffle', 'np.random.shuffle', (['self.swe_valid_shuffled'], {}), '(self.swe_valid_shuffled)\n', (2039, 2064), True, 'import numpy as np\n'), ((2114, 2129), 'CAE_Layers.Encoder_Block', 'Encoder_Block', ([], {}), '()\n', (2127, 2129), False, 'from CAE_Layers import Encoder_Block, Decoder_Block, Latent_Block\n'), ((2148, 2177), 'CAE_Layers.Latent_Block', 'Latent_Block', (['self.num_latent'], {}), '(self.num_latent)\n', (2160, 2177), False, 'from CAE_Layers import Encoder_Block, Decoder_Block, Latent_Block\n'), ((2196, 2211), 'CAE_Layers.Decoder_Block', 'Decoder_Block', ([], {}), '()\n', (2209, 2211), False, 'from CAE_Layers import Encoder_Block, Decoder_Block, Latent_Block\n'), ((2236, 2281), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (2260, 2281), True, 'import tensorflow as tf\n'), ((8134, 8180), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'ncols': '(2)', 'figsize': '(6, 8)'}), '(nrows=3, ncols=2, figsize=(6, 8))\n', (8146, 8180), True, 'import matplotlib.pyplot as plt\n'), ((9373, 9417), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.5)', 'hspace': '(-0.3)'}), '(wspace=0.5, hspace=-0.3)\n', (9392, 9417), True, 'import matplotlib.pyplot as plt\n'), ((9425, 9443), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9441, 9443), True, 'import matplotlib.pyplot as plt\n'), ((9452, 9462), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9460, 9462), True, 'import matplotlib.pyplot as plt\n'), ((11067, 11077), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11075, 11077), True, 'import matplotlib.pyplot as plt\n'), ((3396, 3413), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3411, 3413), True, 'import tensorflow as tf\n'), ((9636, 9682), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'ncols': '(2)', 'figsize': '(6, 8)'}), '(nrows=3, ncols=2, figsize=(6, 8))\n', (9648, 9682), True, 'import matplotlib.pyplot as plt\n'), ((10975, 11019), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.5)', 'hspace': '(-0.3)'}), '(wspace=0.5, hspace=-0.3)\n', (10994, 11019), True, 'import matplotlib.pyplot as plt\n'), ((11031, 11049), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11047, 11049), True, 'import matplotlib.pyplot as plt\n'), ((761, 775), 'tensorflow.keras.backend.mean', 'K.mean', (['y_true'], {}), '(y_true)\n', (767, 775), True, 'from tensorflow.keras import backend as K\n'), ((814, 825), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (823, 825), True, 'from tensorflow.keras import backend as K\n'), ((3029, 3059), 'tensorflow.math.square', 'tf.math.square', (['(op_list[0] - Y)'], {}), '(op_list[0] - Y)\n', (3043, 3059), True, 'import tensorflow as tf\n'), ((3302, 3324), 'tensorflow.math.square', 'tf.math.square', (['(op - Y)'], {}), '(op - Y)\n', (3316, 3324), True, 'import tensorflow as tf\n'), ((1234, 1250), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1248, 1250), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2648, 2668), 'tensorflow.Variable', 'tf.Variable', (['h2_temp'], {}), '(h2_temp)\n', (2659, 2668), True, 'import tensorflow as tf\n'), ((3159, 3189), 'tensorflow.math.square', 'tf.math.square', (['(op_list[i] - Y)'], {}), '(op_list[i] - Y)\n', (3173, 3189), True, 'import tensorflow as tf\n')] |
from py_wake import IEA37SimpleBastankhahGaussian
from py_wake.deflection_models import JimenezWakeDeflection
from py_wake.examples.data.iea37._iea37 import IEA37Site
import numpy as np
import matplotlib.pyplot as plt
import pytest
from py_wake.flow_map import XYGrid
from py_wake.deflection_models.fuga_deflection import FugaDeflection
from py_wake.tests import npt
from py_wake.examples.data.hornsrev1 import V80
from py_wake.deflection_models.deflection_model import DeflectionModel
from py_wake.utils.model_utils import get_models
from py_wake.tests.test_files import tfp
@pytest.mark.parametrize('deflectionModel,dy10d', [
(JimenezWakeDeflection, 0.5672964),
((lambda: FugaDeflection(tfp + 'fuga/2MW/Z0=0.00001000Zi=00400Zeta0=0.00E+00/')), 0.4625591892703828),
((lambda: FugaDeflection(tfp + 'fuga/2MW/Z0=0.00408599Zi=00400Zeta0=0.00E+00/')), 0.37719329354768527),
((lambda: FugaDeflection(tfp + 'fuga/2MW/Z0=0.03000000Zi=00401Zeta0=0.00E+00/')), 0.32787746772608933),
])
def test_deflection_model(deflectionModel, dy10d):
site = IEA37Site(16)
x, y = [0], [0]
windTurbines = V80()
D = windTurbines.diameter()
wfm = IEA37SimpleBastankhahGaussian(site, windTurbines, deflectionModel=deflectionModel())
yaw_ilk = np.reshape([-30], (1, 1, 1))
sim_res = wfm(x, y, yaw=yaw_ilk, wd=270, ws=10)
fm = sim_res.flow_map(XYGrid(x=np.arange(-D, 10 * D + 10, 10)))
min_WS_line = fm.min_WS_eff()
if 0:
plt.figure(figsize=(14, 3))
fm.plot_wake_map()
min_WS_line.plot()
plt.plot(10 * D, dy10d * D, '.', label="Ref, 10D")
plt.legend()
plt.show()
npt.assert_almost_equal(min_WS_line.interp(x=10 * D).item() / D, dy10d)
@pytest.mark.parametrize('deflectionModel', [m for m in get_models(DeflectionModel) if m is not None])
def test_plot_deflection_grid(deflectionModel):
site = IEA37Site(16)
x, y = [0], [0]
windTurbines = V80()
D = windTurbines.diameter()
wfm = IEA37SimpleBastankhahGaussian(site, windTurbines, deflectionModel=deflectionModel())
yaw_ilk = np.reshape([-30], (1, 1, 1))
sim_res = wfm(x, y, yaw=yaw_ilk, wd=270, ws=10)
fm = sim_res.flow_map(XYGrid(x=np.arange(-D, 10 * D + 10, 10)))
plt.figure(figsize=(14, 3))
fm.plot_wake_map()
fm.plot_deflection_grid()
min_WS_line = fm.min_WS_eff()
min_WS_line.plot()
plt.legend()
plt.title(wfm.deflectionModel)
if 0:
plt.show()
plt.close('all')
| [
"numpy.reshape",
"numpy.arange",
"py_wake.examples.data.iea37._iea37.IEA37Site",
"matplotlib.pyplot.plot",
"py_wake.utils.model_utils.get_models",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"py_wake.examples.data.hornsrev1.V80",
"matplotlib.pyplot.title",
"py_wake.deflection_models.fug... | [((1058, 1071), 'py_wake.examples.data.iea37._iea37.IEA37Site', 'IEA37Site', (['(16)'], {}), '(16)\n', (1067, 1071), False, 'from py_wake.examples.data.iea37._iea37 import IEA37Site\n'), ((1111, 1116), 'py_wake.examples.data.hornsrev1.V80', 'V80', ([], {}), '()\n', (1114, 1116), False, 'from py_wake.examples.data.hornsrev1 import V80\n'), ((1259, 1287), 'numpy.reshape', 'np.reshape', (['[-30]', '(1, 1, 1)'], {}), '([-30], (1, 1, 1))\n', (1269, 1287), True, 'import numpy as np\n'), ((1883, 1896), 'py_wake.examples.data.iea37._iea37.IEA37Site', 'IEA37Site', (['(16)'], {}), '(16)\n', (1892, 1896), False, 'from py_wake.examples.data.iea37._iea37 import IEA37Site\n'), ((1936, 1941), 'py_wake.examples.data.hornsrev1.V80', 'V80', ([], {}), '()\n', (1939, 1941), False, 'from py_wake.examples.data.hornsrev1 import V80\n'), ((2084, 2112), 'numpy.reshape', 'np.reshape', (['[-30]', '(1, 1, 1)'], {}), '([-30], (1, 1, 1))\n', (2094, 2112), True, 'import numpy as np\n'), ((2239, 2266), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 3)'}), '(figsize=(14, 3))\n', (2249, 2266), True, 'import matplotlib.pyplot as plt\n'), ((2381, 2393), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2391, 2393), True, 'import matplotlib.pyplot as plt\n'), ((2398, 2428), 'matplotlib.pyplot.title', 'plt.title', (['wfm.deflectionModel'], {}), '(wfm.deflectionModel)\n', (2407, 2428), True, 'import matplotlib.pyplot as plt\n'), ((2462, 2478), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (2471, 2478), True, 'import matplotlib.pyplot as plt\n'), ((1461, 1488), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 3)'}), '(figsize=(14, 3))\n', (1471, 1488), True, 'import matplotlib.pyplot as plt\n'), ((1551, 1601), 'matplotlib.pyplot.plot', 'plt.plot', (['(10 * D)', '(dy10d * D)', '"""."""'], {'label': '"""Ref, 10D"""'}), "(10 * D, dy10d * D, '.', label='Ref, 10D')\n", (1559, 1601), True, 'import matplotlib.pyplot as plt\n'), ((1610, 1622), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1620, 1622), True, 'import matplotlib.pyplot as plt\n'), ((1631, 1641), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1639, 1641), True, 'import matplotlib.pyplot as plt\n'), ((2447, 2457), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2455, 2457), True, 'import matplotlib.pyplot as plt\n'), ((1777, 1804), 'py_wake.utils.model_utils.get_models', 'get_models', (['DeflectionModel'], {}), '(DeflectionModel)\n', (1787, 1804), False, 'from py_wake.utils.model_utils import get_models\n'), ((1376, 1406), 'numpy.arange', 'np.arange', (['(-D)', '(10 * D + 10)', '(10)'], {}), '(-D, 10 * D + 10, 10)\n', (1385, 1406), True, 'import numpy as np\n'), ((684, 753), 'py_wake.deflection_models.fuga_deflection.FugaDeflection', 'FugaDeflection', (["(tfp + 'fuga/2MW/Z0=0.00001000Zi=00400Zeta0=0.00E+00/')"], {}), "(tfp + 'fuga/2MW/Z0=0.00001000Zi=00400Zeta0=0.00E+00/')\n", (698, 753), False, 'from py_wake.deflection_models.fuga_deflection import FugaDeflection\n'), ((791, 860), 'py_wake.deflection_models.fuga_deflection.FugaDeflection', 'FugaDeflection', (["(tfp + 'fuga/2MW/Z0=0.00408599Zi=00400Zeta0=0.00E+00/')"], {}), "(tfp + 'fuga/2MW/Z0=0.00408599Zi=00400Zeta0=0.00E+00/')\n", (805, 860), False, 'from py_wake.deflection_models.fuga_deflection import FugaDeflection\n'), ((899, 968), 'py_wake.deflection_models.fuga_deflection.FugaDeflection', 'FugaDeflection', (["(tfp + 'fuga/2MW/Z0=0.03000000Zi=00401Zeta0=0.00E+00/')"], {}), "(tfp + 'fuga/2MW/Z0=0.03000000Zi=00401Zeta0=0.00E+00/')\n", (913, 968), False, 'from py_wake.deflection_models.fuga_deflection import FugaDeflection\n'), ((2201, 2231), 'numpy.arange', 'np.arange', (['(-D)', '(10 * D + 10)', '(10)'], {}), '(-D, 10 * D + 10, 10)\n', (2210, 2231), True, 'import numpy as np\n')] |
## imports
import os, time
import numpy as np
from colour import Color
import matplotlib.pyplot as plt
import skimage.io as io
import utilities
def localizationErrors( coco_analyze, imgs_info, saveDir ):
loc_dir = saveDir + '/localization_errors/keypoints_breakdown'
if not os.path.exists(loc_dir):
os.makedirs(loc_dir)
f = open('%s/std_out.txt'%loc_dir, 'w')
f.write("Running Analysis: [Localization Errors Breakdown]\n\n")
tic = time.time()
paths = {}
# set parameters for keypoint localization analysis
coco_analyze.params.areaRng = [[32 ** 2, 1e5 ** 2]]
coco_analyze.params.areaRngLbl = ['all']
coco_analyze.cocoEval.params.useGtIgnore = 0
coco_analyze.cocoEval.params.gtIgnoreIds = []
coco_analyze.analyze(check_kpts=True, check_scores=False, check_bckgd=False)
corrected_dts = coco_analyze.corrected_dts['all']
dt_gt_matches = coco_analyze.localization_matches['all',str(coco_analyze.params.oksLocThrs),'dts']
matched_dts = [cdt for cdt in corrected_dts if 'good' in cdt]
f.write("Number detections: [%d]\n"%len(corrected_dts))
f.write("Number matches: [%d]\n\n"%len(matched_dts))
good = 0; jitter = 0; inversion = 0; swap = 0; miss = 0; tot = 0.
good_keypoints = np.zeros(17)
jitt_keypoints = np.zeros(17); inv_keypoints = np.zeros(17)
swap_keypoints = np.zeros(17); miss_keypoints = np.zeros(17)
for dtm in matched_dts:
match = dt_gt_matches[dtm['id']][0]
gtm = coco_analyze.cocoGt.loadAnns(match['gtId'])[0]
good += sum(dtm['good'])
jitter += sum(dtm['jitter']); inversion += sum(dtm['inversion'])
swap += sum(dtm['swap']); miss += sum(dtm['miss'])
good_keypoints += np.array(dtm['good'])
jitt_keypoints += np.array(dtm['jitter'])
inv_keypoints += np.array(dtm['inversion'])
swap_keypoints += np.array(dtm['swap'])
miss_keypoints += np.array(dtm['miss'])
assert(sum(dtm['good'])+sum(dtm['jitter'])+
sum(dtm['inversion'])+sum(dtm['swap'])+
sum(dtm['miss'])==gtm['num_keypoints'])
tot += gtm['num_keypoints']
f.write("Total Num. keypoints: [%d]\n"%int(tot))
f.write("{:30} [{}]-[{}]\n".format(" - Good, [tot]-[perc]:", int(good), 100*(good/tot)))
f.write("{:30} [{}]-[{}]\n".format(" - Jitter, [tot]-[perc]:", int(jitter), 100*(jitter/tot)))
f.write("{:30} [{}]-[{}]\n".format(" - Inversion, [tot]-[perc]:", int(inversion), 100*(inversion/tot)))
f.write("{:30} [{}]-[{}]\n".format(" - Swap, [tot]-[perc]:", int(swap), 100*(swap/tot)))
f.write("{:30} [{}]-[{}]\n\n".format(" - Miss, [tot]-[perc]:", int(miss), 100*(miss/tot)))
# plot the pie charts with number of errors
COLORS = [ '#1ED88B','#8C4646','#D96459','#F2E394','#F2AE72']
LABELS = ['Good','Jit.','Inv.','Miss','Swap']
ERRORS = [(good/tot),(jitter/tot),(inversion/tot),(miss/tot),(swap/tot)]
TOT_LABELS = []
for lind, l in enumerate(LABELS):
label_str = '{:5s}: {:2.1f}'.format(l,ERRORS[lind]*100)
TOT_LABELS.append(label_str)
fig = plt.figure(figsize=(5,5))
rect = 0,0,0.9,0.9
ax1 = fig.add_axes(rect)
explode = (0.0,0.0,0.0,0.0,0.0)
patches, autotexts = ax1.pie( ERRORS, explode=explode, colors=COLORS)
lgd=fig.legend(patches, TOT_LABELS, loc="upper left",ncol=1,fancybox=True, shadow=True,fontsize=20)
paths['overall_kpts_errors'] = "%s/overall_keypoint_errors.pdf"%loc_dir
plt.savefig(paths['overall_kpts_errors'], bbox_inches='tight')
plt.close()
fig = plt.figure(figsize=(15,15)); plt.axis('off')
I = io.imread('./latex/manikin.jpg')
plt.imshow(I); ax = plt.gca(); ax.set_autoscale_on(False)
rects_d = {}
rects_d['nose'] = .47,.75,.07,.07
rects_d['left_eye'] = .5, .83,.07,.07; rects_d['right_eye'] = .44,.83,.07,.07
rects_d['left_ear'] = .54,.77,.07,.07; rects_d['right_ear'] = .4, .77,.07,.07
rects_d['left_shoulder'] = .58,.68,.1, .1; rects_d['right_shoulder'] = .32,.65,.1, .1
rects_d['left_elbow'] = .67,.6, .1, .1; rects_d['right_elbow'] = .27,.52,.1, .1
rects_d['left_wrist'] = .59,.49,.1, .1; rects_d['right_wrist'] = .34,.42,.1, .1
rects_d['left_hip'] = .48,.5, .1, .1; rects_d['right_hip'] = .39,.5, .1, .1
rects_d['left_knee'] = .55,.32,.1, .1; rects_d['right_knee'] = .4, .32,.1, .1
rects_d['left_ankle'] = .55,.15,.1, .1; rects_d['right_ankle'] = .4, .15,.1, .1
order = ['nose','left_eye','right_eye','left_ear','right_ear',
'left_shoulder','right_shoulder','left_elbow','right_elbow',
'left_wrist','right_wrist','left_hip','right_hip',
'left_knee','right_knee','left_ankle','right_ankle']
COLORS = ['#8C4646','#D96459','#F2AE72','#F2E394']
f.write("Per Keypoint breakdown: [jitter, inversion, swap, miss]\n")
for oi, ok in enumerate(order):
rect = rects_d[ok]
ax1 = fig.add_axes(rect)
explode = (0.0,0.0,0.0,0.0)
ERRORS = [jitt_keypoints[oi],inv_keypoints[oi],swap_keypoints[oi],miss_keypoints[oi]]
ERRORS /= sum(ERRORS)
f.write(" - %s: %s\n"%(ok,ERRORS))
patches, autotexts = ax1.pie( ERRORS, explode=explode, colors=COLORS[::-1])
lgd=fig.legend(patches, ['Jitter','Inversion','Swap','Miss'][::-1],
loc="upper center",ncol=len(patches),fancybox=True, shadow=True,fontsize=20)
paths['kpt_errors_breakdown'] = "%s/keypoint_breakdown.pdf"%loc_dir
plt.savefig(paths['kpt_errors_breakdown'], bbox_inches='tight')
plt.close()
f.write("\nPer Error breakdown: %s\n"%order)
f.write(" - Good: %s\n"%good_keypoints)
f.write(" - Jitter: %s\n"%jitt_keypoints)
f.write(" - Inversion: %s\n"%inv_keypoints)
f.write(" - Swap: %s\n"%swap_keypoints)
f.write(" - Miss: %s\n"%miss_keypoints)
KEYPOINTS_L = ['Nose','Eyes','Ears','Should.','Elbows','Wrists','Hips','Knees','Ankles']
KEYPOINTS_I = [[0],[1,2],[3,4],[5,6],[7,8],[9,10],[11,12],[13,14],[15,16]]
####################################
err_vecs = [jitt_keypoints,inv_keypoints,swap_keypoints,miss_keypoints]
for j, err_type in enumerate(['Jitter', 'Inversion', 'Swap', 'Miss']):
TOT_LABELS = []
ERRORS = []
for i in KEYPOINTS_I:
tot_errs = 0
for l in i:
tot_errs += err_vecs[j][l]
ERRORS.append(tot_errs/float(sum(err_vecs[j])))
for lind, l in enumerate(KEYPOINTS_L):
label_str = '{:7s}: {:2.1f}'.format(l,100*ERRORS[lind])
TOT_LABELS.append(label_str)
fig = plt.figure(figsize=(10,5))
rect = -.03,0,0.45,0.9
ax1 = fig.add_axes(rect)
colors = [c.rgb for c in list(Color("white").range_to(Color(COLORS[j]),len(KEYPOINTS_L)))]
patches, autotexts = ax1.pie( ERRORS, colors=colors)
lgd=fig.legend(patches, TOT_LABELS, bbox_to_anchor=(.45, .9),
loc="upper left",ncol=2,fancybox=True, shadow=True,fontsize=20)
plt.title(err_type,fontsize=20)
path = '%s_kpt_breakdown'%err_type
paths[path] = "%s/%s.pdf"%(loc_dir,path)
plt.savefig(paths[path], bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close()
############################################################################
## PLOT THE TOP DETECTIONS WITH ERRORS OF EACH TYPE
USE_VISIBILITY_FOR_PLOTS = False
for err in ['miss','swap','inversion','jitter']:
f.write("\nTop errors of type [%s]:\n"%(err))
err_dts = [d for d in coco_analyze.corrected_dts['all'] if err in d]
top_err_dts = sorted(err_dts, key=lambda k: -k['score'])
top_err_dts = sorted(top_err_dts, key=lambda k: -sum(k[err]))
for tind, t in enumerate(top_err_dts[0:7]):
sks = np.array(utilities.skeleton)-1
kp = np.array(t['keypoints'])
x = kp[0::3]; y = kp[1::3]; v = kp[2::3]
# show the image
I = io.imread(imgs_info[t['image_id']]['coco_url'])
plt.figure(figsize=(10,10)); plt.axis('off')
plt.imshow(I)
ax = plt.gca()
ax.set_autoscale_on(False)
# get the bounding box only based on the visible keypoints
if USE_VISIBILITY_FOR_PLOTS:
xs = x[v!=0]
ys = y[v!=0]
x_min = min(xs); x_max = max(xs)
y_min = min(ys); y_max = max(ys)
bbox = [x_min, y_min, x_max - x_min, y_max - y_min]
else:
bbox = t['bbox']
# plot the bounding box
rect = plt.Rectangle((bbox[0],bbox[1]),bbox[2],bbox[3],fill=False,edgecolor=[1, .6, 0],linewidth=3)
ax.add_patch(rect)
if USE_VISIBILITY_FOR_PLOTS:
err_str = "Visibilty flags can only be 0, 1, 2."
c_0 = len([iii for iii in v if iii==0])
c_1 = len([iii for iii in v if iii==1])
c_2 = len([iii for iii in v if iii==2])
assert c_0 + c_1 + c_2 == 17, err_str
for sk in sks:
if USE_VISIBILITY_FOR_PLOTS and v[sk[0]] * v[sk[1]] == 0:
# don't plot the skeleton link if either of the two connecting
# keypoints has Visibilty flag == 0 and USE_VISIBILITY_FOR_PLOTS == True
pass
else:
plt.plot(x[sk],y[sk], linewidth=3, color=utilities.colors[sk[0],sk[1]])
for kk in xrange(17):
if kk in [1,3,5,7,9,11,13,15]:
if USE_VISIBILITY_FOR_PLOTS and v[kk] == 0:
# don't plot the keypoints if it has Visibilty flag == 0
# and USE_VISIBILITY_FOR_PLOTS == True
pass
else:
# these are the indices of the left keypoints (in red)
plt.plot(x[kk], y[kk],'o',markersize=5, markerfacecolor='r',
markeredgecolor='r', markeredgewidth=3)
elif kk in [2,4,6,8,10,12,14,16]:
if USE_VISIBILITY_FOR_PLOTS and v[kk] == 0:
# don't plot the keypoints if it has Visibilty flag == 0
# and USE_VISIBILITY_FOR_PLOTS == True
pass
else:
# these are the indices of the right keypoints (in green)
plt.plot(x[kk], y[kk],'o',markersize=5, markerfacecolor='g',
markeredgecolor='g', markeredgewidth=3)
else:
if USE_VISIBILITY_FOR_PLOTS and v[kk] == 0:
# don't plot the keypoints if it has Visibilty flag == 0
# and USE_VISIBILITY_FOR_PLOTS == True
pass
else:
# these are the indices of the remaining keypoints (in blue)
plt.plot(x[kk], y[kk],'o',markersize=5, markerfacecolor='b',
markeredgecolor='b', markeredgewidth=3)
title = "[%d][%d][%.3f][%d]"%(t['image_id'],t['id'],t['score'],sum(t[err]))
f.write("%s\n"%title)
plt.title(title,fontsize=20)
path = '%s_%d'%(err,tind)
paths[path] = "%s/%s.pdf"%(loc_dir,path)
plt.savefig(paths[path], bbox_inches='tight',dpi=50)
plt.close()
f.write("\nDone, (t=%.2fs)."%(time.time()-tic))
f.close()
return paths
| [
"matplotlib.pyplot.imshow",
"os.path.exists",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"os.makedirs",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"skimage.io.imread",
"matplotlib.pyplot... | [((460, 471), 'time.time', 'time.time', ([], {}), '()\n', (469, 471), False, 'import os, time\n'), ((1266, 1278), 'numpy.zeros', 'np.zeros', (['(17)'], {}), '(17)\n', (1274, 1278), True, 'import numpy as np\n'), ((1300, 1312), 'numpy.zeros', 'np.zeros', (['(17)'], {}), '(17)\n', (1308, 1312), True, 'import numpy as np\n'), ((1331, 1343), 'numpy.zeros', 'np.zeros', (['(17)'], {}), '(17)\n', (1339, 1343), True, 'import numpy as np\n'), ((1365, 1377), 'numpy.zeros', 'np.zeros', (['(17)'], {}), '(17)\n', (1373, 1377), True, 'import numpy as np\n'), ((1396, 1408), 'numpy.zeros', 'np.zeros', (['(17)'], {}), '(17)\n', (1404, 1408), True, 'import numpy as np\n'), ((3150, 3176), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (3160, 3176), True, 'import matplotlib.pyplot as plt\n'), ((3522, 3584), 'matplotlib.pyplot.savefig', 'plt.savefig', (["paths['overall_kpts_errors']"], {'bbox_inches': '"""tight"""'}), "(paths['overall_kpts_errors'], bbox_inches='tight')\n", (3533, 3584), True, 'import matplotlib.pyplot as plt\n'), ((3589, 3600), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3598, 3600), True, 'import matplotlib.pyplot as plt\n'), ((3612, 3640), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (3622, 3640), True, 'import matplotlib.pyplot as plt\n'), ((3641, 3656), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3649, 3656), True, 'import matplotlib.pyplot as plt\n'), ((3665, 3697), 'skimage.io.imread', 'io.imread', (['"""./latex/manikin.jpg"""'], {}), "('./latex/manikin.jpg')\n", (3674, 3697), True, 'import skimage.io as io\n'), ((3702, 3715), 'matplotlib.pyplot.imshow', 'plt.imshow', (['I'], {}), '(I)\n', (3712, 3715), True, 'import matplotlib.pyplot as plt\n'), ((3722, 3731), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3729, 3731), True, 'import matplotlib.pyplot as plt\n'), ((5572, 5635), 'matplotlib.pyplot.savefig', 'plt.savefig', (["paths['kpt_errors_breakdown']"], {'bbox_inches': '"""tight"""'}), "(paths['kpt_errors_breakdown'], bbox_inches='tight')\n", (5583, 5635), True, 'import matplotlib.pyplot as plt\n'), ((5640, 5651), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5649, 5651), True, 'import matplotlib.pyplot as plt\n'), ((283, 306), 'os.path.exists', 'os.path.exists', (['loc_dir'], {}), '(loc_dir)\n', (297, 306), False, 'import os, time\n'), ((316, 336), 'os.makedirs', 'os.makedirs', (['loc_dir'], {}), '(loc_dir)\n', (327, 336), False, 'import os, time\n'), ((1758, 1779), 'numpy.array', 'np.array', (["dtm['good']"], {}), "(dtm['good'])\n", (1766, 1779), True, 'import numpy as np\n'), ((1806, 1829), 'numpy.array', 'np.array', (["dtm['jitter']"], {}), "(dtm['jitter'])\n", (1814, 1829), True, 'import numpy as np\n'), ((1856, 1882), 'numpy.array', 'np.array', (["dtm['inversion']"], {}), "(dtm['inversion'])\n", (1864, 1882), True, 'import numpy as np\n'), ((1909, 1930), 'numpy.array', 'np.array', (["dtm['swap']"], {}), "(dtm['swap'])\n", (1917, 1930), True, 'import numpy as np\n'), ((1957, 1978), 'numpy.array', 'np.array', (["dtm['miss']"], {}), "(dtm['miss'])\n", (1965, 1978), True, 'import numpy as np\n'), ((6710, 6737), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (6720, 6737), True, 'import matplotlib.pyplot as plt\n'), ((7115, 7147), 'matplotlib.pyplot.title', 'plt.title', (['err_type'], {'fontsize': '(20)'}), '(err_type, fontsize=20)\n', (7124, 7147), True, 'import matplotlib.pyplot as plt\n'), ((7247, 7319), 'matplotlib.pyplot.savefig', 'plt.savefig', (['paths[path]'], {'bbox_extra_artists': '(lgd,)', 'bbox_inches': '"""tight"""'}), "(paths[path], bbox_extra_artists=(lgd,), bbox_inches='tight')\n", (7258, 7319), True, 'import matplotlib.pyplot as plt\n'), ((7328, 7339), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7337, 7339), True, 'import matplotlib.pyplot as plt\n'), ((7953, 7977), 'numpy.array', 'np.array', (["t['keypoints']"], {}), "(t['keypoints'])\n", (7961, 7977), True, 'import numpy as np\n'), ((8077, 8124), 'skimage.io.imread', 'io.imread', (["imgs_info[t['image_id']]['coco_url']"], {}), "(imgs_info[t['image_id']]['coco_url'])\n", (8086, 8124), True, 'import skimage.io as io\n'), ((8137, 8165), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (8147, 8165), True, 'import matplotlib.pyplot as plt\n'), ((8166, 8181), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8174, 8181), True, 'import matplotlib.pyplot as plt\n'), ((8194, 8207), 'matplotlib.pyplot.imshow', 'plt.imshow', (['I'], {}), '(I)\n', (8204, 8207), True, 'import matplotlib.pyplot as plt\n'), ((8225, 8234), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8232, 8234), True, 'import matplotlib.pyplot as plt\n'), ((8717, 8821), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(bbox[0], bbox[1])', 'bbox[2]', 'bbox[3]'], {'fill': '(False)', 'edgecolor': '[1, 0.6, 0]', 'linewidth': '(3)'}), '((bbox[0], bbox[1]), bbox[2], bbox[3], fill=False, edgecolor=[\n 1, 0.6, 0], linewidth=3)\n', (8730, 8821), True, 'import matplotlib.pyplot as plt\n'), ((11449, 11478), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(20)'}), '(title, fontsize=20)\n', (11458, 11478), True, 'import matplotlib.pyplot as plt\n'), ((11581, 11634), 'matplotlib.pyplot.savefig', 'plt.savefig', (['paths[path]'], {'bbox_inches': '"""tight"""', 'dpi': '(50)'}), "(paths[path], bbox_inches='tight', dpi=50)\n", (11592, 11634), True, 'import matplotlib.pyplot as plt\n'), ((11646, 11657), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11655, 11657), True, 'import matplotlib.pyplot as plt\n'), ((7905, 7933), 'numpy.array', 'np.array', (['utilities.skeleton'], {}), '(utilities.skeleton)\n', (7913, 7933), True, 'import numpy as np\n'), ((11693, 11704), 'time.time', 'time.time', ([], {}), '()\n', (11702, 11704), False, 'import os, time\n'), ((9515, 9588), 'matplotlib.pyplot.plot', 'plt.plot', (['x[sk]', 'y[sk]'], {'linewidth': '(3)', 'color': 'utilities.colors[sk[0], sk[1]]'}), '(x[sk], y[sk], linewidth=3, color=utilities.colors[sk[0], sk[1]])\n', (9523, 9588), True, 'import matplotlib.pyplot as plt\n'), ((6863, 6879), 'colour.Color', 'Color', (['COLORS[j]'], {}), '(COLORS[j])\n', (6868, 6879), False, 'from colour import Color\n'), ((10035, 10141), 'matplotlib.pyplot.plot', 'plt.plot', (['x[kk]', 'y[kk]', '"""o"""'], {'markersize': '(5)', 'markerfacecolor': '"""r"""', 'markeredgecolor': '"""r"""', 'markeredgewidth': '(3)'}), "(x[kk], y[kk], 'o', markersize=5, markerfacecolor='r',\n markeredgecolor='r', markeredgewidth=3)\n", (10043, 10141), True, 'import matplotlib.pyplot as plt\n'), ((6839, 6853), 'colour.Color', 'Color', (['"""white"""'], {}), "('white')\n", (6844, 6853), False, 'from colour import Color\n'), ((10610, 10716), 'matplotlib.pyplot.plot', 'plt.plot', (['x[kk]', 'y[kk]', '"""o"""'], {'markersize': '(5)', 'markerfacecolor': '"""g"""', 'markeredgecolor': '"""g"""', 'markeredgewidth': '(3)'}), "(x[kk], y[kk], 'o', markersize=5, markerfacecolor='g',\n markeredgecolor='g', markeredgewidth=3)\n", (10618, 10716), True, 'import matplotlib.pyplot as plt\n'), ((11159, 11265), 'matplotlib.pyplot.plot', 'plt.plot', (['x[kk]', 'y[kk]', '"""o"""'], {'markersize': '(5)', 'markerfacecolor': '"""b"""', 'markeredgecolor': '"""b"""', 'markeredgewidth': '(3)'}), "(x[kk], y[kk], 'o', markersize=5, markerfacecolor='b',\n markeredgecolor='b', markeredgewidth=3)\n", (11167, 11265), True, 'import matplotlib.pyplot as plt\n')] |
import sys
import numpy as np
import datetime
import helper_functions.helper_functions as hf
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.svm import LinearSVC, SVC
import sec5b_ml_model_binaryclass_pipeline as ml_pipeline
local_control_panel = {
'done_switch': True,
}
# Main function
######################################################################
def main(on_switch=False):
if on_switch:
save_switch = False
run_on_subsampled_data = True
run_on_full_data = False
run_on_unfeatured_data = False
run_on_featured_data = True
# Eg, full list >> [1, 5, 10, 50, 100, 500]
nk_list = [5]
cv_repeat = 1
'''Eg, Full list >> ['dummy', 'sex_only', 'name_basic_only', 'name_substring_only', 'name_numeric_only',
'name_metaphone_only', 'name_all', 'loc_basic_only', 'loc_sep_entity_only',
'loc_substring_only', 'loc_all', 'name_all_loc_all', 'name_all_loc_all_reduced']'''
feature_set_list = ['name_all']
eval_score_list = ['macro f1 score']
# Eg, Full list >> ['ab', 'fn', 'metis', 'inuit', 'ch', 'ja', 'en', 'fr', 'ir', 'it', 'rus', 'sc', 'others']
target_label_list = ['fn']
ml_algo_param_dict = \
{
'LR_V1': { 'clf': LogisticRegression(),
'param': {
'logisticregression__solver': ['liblinear'],
'logisticregression__penalty': ['l1', 'l2'],
'logisticregression__C': np.logspace(-4, 4, 20),
'logisticregression__tol': np.logspace(-5, 5, 20),
'logisticregression__class_weight': [None, 'balanced'],
'logisticregression__max_iter': [50, 1000, 4000, 20000],
}},
'LR_V2': { 'clf': LogisticRegression(),
'param': {
'logisticregression__solver': ['newton-cg', 'lbfgs', 'sag', 'saga'],
'logisticregression__penalty': ['none', 'l2'],
'logisticregression__C': np.logspace(-4, 4, 20),
'logisticregression__tol': np.logspace(-5, 5, 20),
'logisticregression__class_weight': [None, 'balanced'],
'logisticregression__max_iter': [50, 1000, 4000, 20000],
}},
'SVC_LINEAR': { 'clf': LinearSVC(),
'param': {
'linearsvc__penalty': ['l2'],
'linearsvc__loss': ['hinge', 'squared_hinge'],
'linearsvc__C': np.logspace(-4, 4, 20),
'linearsvc__tol': [0.00001, 0.0001, 0.001, 0.01, 0.1, 1],
'linearsvc__class_weight': [None, 'balanced'],
'linearsvc__max_iter': [50, 1000, 4000, 20000],
}},
'SVC_NONLINEAR': { 'clf': SVC(),
'param': {
'svc__kernel': ['poly', 'rbf', 'sigmoid'],
'svc__C': np.logspace(-4, 4, 20),
'svc__tol': [0.00001, 0.0001, 0.001, 0.01, 0.1, 1],
'svc__class_weight': [None, 'balanced'],
'svc__decision_function_shape': ['ovo', 'ovr'],
'svc__max_iter': [50, 1000, 4000, 20000],
}},
'NB': { 'clf': BernoulliNB(),
'param': {
'bernoullinb__alpha': np.logspace(-4, 4, 20),
'bernoullinb__binarize': [None, 0, .2, .4, .6, .8, 1],
'bernoullinb__fit_prior': [True, False],
}},
}
if run_on_subsampled_data:
# Loop through subsampling n set with unfeatured data
if run_on_unfeatured_data:
for nk in nk_list:
for target_label in target_label_list:
for algo_key, algo_val in ml_algo_param_dict.items():
for eval_score in eval_score_list:
for i in range(1, cv_repeat+1):
print('>> Current time:', datetime.datetime.now())
obj = ml_pipeline.MachineLearningNameEthnicityProjectBinaryClass(control_panel = {
'save_result_switch': save_switch, # WARNING: Will overwrite existing
'use_subsampled_df_switch': False, # WARNING: Switch to False in production
'use_subsampled_df_nk': nk,
'use_featured_df_switch': False,
'use_feature_set': [],
'feature_selection_switch': False,
'cross_validation_switch': True,
'cross_validation_repeat': i,
'ml_process_on_test_data_switch': False,
'ml_process_on_training_data_switch': False,
'ml_process_on_ext_data_switch': False,
'ml_algo': None,
'ml_algo_param_grid': [algo_key, algo_val],
'binary_target_label': target_label,
'eval_score': eval_score,
'random_state': 888,
})
obj.machine_learning_steps()
# Loop through feature set and subsampling n set
if run_on_featured_data:
for feature_set in feature_set_list:
for nk in nk_list:
for target_label in target_label_list:
for algo_key, algo_val in ml_algo_param_dict.items():
for eval_score in eval_score_list:
for i in range(1, cv_repeat+1):
print('>> Current time:', datetime.datetime.now())
obj = ml_pipeline.MachineLearningNameEthnicityProjectBinaryClass(control_panel = {
'save_result_switch': save_switch, # WARNING: Will overwrite existing
'use_subsampled_df_switch': False, # WARNING: Switch to False in production
'use_subsampled_df_nk': nk,
'use_featured_df_switch': True,
'use_feature_set': feature_set,
'feature_selection_switch': False,
'cross_validation_switch': True,
'cross_validation_repeat': i,
'ml_process_on_test_data_switch': False,
'ml_process_on_training_data_switch': False,
'ml_process_on_ext_data_switch': False,
'ml_algo': None,
'ml_algo_param_grid': [algo_key, algo_val],
'binary_target_label': target_label,
'eval_score': eval_score,
'random_state': 888,
})
obj.machine_learning_steps()
if run_on_full_data:
# Run once using unfeatured, full dataset
if run_on_unfeatured_data:
for feature_set in feature_set_list:
for target_label in target_label_list:
for algo_key, algo_val in ml_algo_param_dict.items():
for eval_score in eval_score_list:
for i in range(1, cv_repeat+1):
print('>> Current time:', datetime.datetime.now())
obj = ml_pipeline.MachineLearningNameEthnicityProjectBinaryClass(control_panel = {
'save_result_switch': save_switch, # WARNING: Will overwrite existing
'use_subsampled_df_switch': False, # WARNING: Switch to False in production
'use_subsampled_df_nk': 'none',
'use_featured_df_switch': False,
'use_feature_set': [],
'feature_selection_switch': False,
'cross_validation_switch': True,
'cross_validation_repeat': i,
'ml_process_on_test_data_switch': False,
'ml_process_on_training_data_switch': False,
'ml_process_on_ext_data_switch': False,
'ml_algo': None,
'ml_algo_param_grid': [algo_key, algo_val],
'binary_target_label': target_label,
'eval_score': eval_score,
'random_state': 888,
})
obj.machine_learning_steps()
# Run once using featured, full dataset
if run_on_featured_data:
for feature_set in feature_set_list:
for target_label in target_label_list:
for algo_key, algo_val in ml_algo_param_dict.items():
for eval_score in eval_score_list:
for i in range(1, cv_repeat+1):
print('>> Current time:', datetime.datetime.now())
obj = ml_pipeline.MachineLearningNameEthnicityProjectBinaryClass(control_panel = {
'save_result_switch': save_switch, # WARNING: Will overwrite existing
'use_subsampled_df_switch': False, # WARNING: Switch to False in production
'use_subsampled_df_nk': [],
'use_featured_df_switch': True,
'use_feature_set': feature_set,
'feature_selection_switch': False,
'cross_validation_switch': True,
'cross_validation_repeat': i,
'ml_process_on_test_data_switch': False,
'ml_process_on_training_data_switch': False,
'ml_process_on_ext_data_switch': False,
'ml_algo': None,
'ml_algo_param_grid': [algo_key, algo_val],
'binary_target_label': target_label,
'eval_score': eval_score,
'random_state': 888,
})
obj.machine_learning_steps()
if local_control_panel['done_switch']:
hf.done_alert()
if __name__=='__main__':
main(on_switch=True) | [
"sec5b_ml_model_binaryclass_pipeline.MachineLearningNameEthnicityProjectBinaryClass",
"sklearn.svm.LinearSVC",
"sklearn.linear_model.LogisticRegression",
"datetime.datetime.now",
"sklearn.naive_bayes.BernoulliNB",
"numpy.logspace",
"helper_functions.helper_functions.done_alert",
"sklearn.svm.SVC"
] | [((8883, 8898), 'helper_functions.helper_functions.done_alert', 'hf.done_alert', ([], {}), '()\n', (8896, 8898), True, 'import helper_functions.helper_functions as hf\n'), ((1395, 1415), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1413, 1415), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1847, 1867), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1865, 1867), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2330, 2341), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (2339, 2341), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((2859, 2864), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (2862, 2864), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((3359, 3372), 'sklearn.naive_bayes.BernoulliNB', 'BernoulliNB', ([], {}), '()\n', (3370, 3372), False, 'from sklearn.naive_bayes import BernoulliNB\n'), ((1586, 1608), 'numpy.logspace', 'np.logspace', (['(-4)', '(4)', '(20)'], {}), '(-4, 4, 20)\n', (1597, 1608), True, 'import numpy as np\n'), ((1648, 1670), 'numpy.logspace', 'np.logspace', (['(-5)', '(5)', '(20)'], {}), '(-5, 5, 20)\n', (1659, 1670), True, 'import numpy as np\n'), ((2064, 2086), 'numpy.logspace', 'np.logspace', (['(-4)', '(4)', '(20)'], {}), '(-4, 4, 20)\n', (2075, 2086), True, 'import numpy as np\n'), ((2126, 2148), 'numpy.logspace', 'np.logspace', (['(-5)', '(5)', '(20)'], {}), '(-5, 5, 20)\n', (2137, 2148), True, 'import numpy as np\n'), ((2533, 2555), 'numpy.logspace', 'np.logspace', (['(-4)', '(4)', '(20)'], {}), '(-4, 4, 20)\n', (2544, 2555), True, 'import numpy as np\n'), ((2995, 3017), 'numpy.logspace', 'np.logspace', (['(-4)', '(4)', '(20)'], {}), '(-4, 4, 20)\n', (3006, 3017), True, 'import numpy as np\n'), ((3426, 3448), 'numpy.logspace', 'np.logspace', (['(-4)', '(4)', '(20)'], {}), '(-4, 4, 20)\n', (3437, 3448), True, 'import numpy as np\n'), ((3988, 4622), 'sec5b_ml_model_binaryclass_pipeline.MachineLearningNameEthnicityProjectBinaryClass', 'ml_pipeline.MachineLearningNameEthnicityProjectBinaryClass', ([], {'control_panel': "{'save_result_switch': save_switch, 'use_subsampled_df_switch': False,\n 'use_subsampled_df_nk': nk, 'use_featured_df_switch': False,\n 'use_feature_set': [], 'feature_selection_switch': False,\n 'cross_validation_switch': True, 'cross_validation_repeat': i,\n 'ml_process_on_test_data_switch': False,\n 'ml_process_on_training_data_switch': False,\n 'ml_process_on_ext_data_switch': False, 'ml_algo': None,\n 'ml_algo_param_grid': [algo_key, algo_val], 'binary_target_label':\n target_label, 'eval_score': eval_score, 'random_state': 888}"}), "(control_panel={\n 'save_result_switch': save_switch, 'use_subsampled_df_switch': False,\n 'use_subsampled_df_nk': nk, 'use_featured_df_switch': False,\n 'use_feature_set': [], 'feature_selection_switch': False,\n 'cross_validation_switch': True, 'cross_validation_repeat': i,\n 'ml_process_on_test_data_switch': False,\n 'ml_process_on_training_data_switch': False,\n 'ml_process_on_ext_data_switch': False, 'ml_algo': None,\n 'ml_algo_param_grid': [algo_key, algo_val], 'binary_target_label':\n target_label, 'eval_score': eval_score, 'random_state': 888})\n", (4046, 4622), True, 'import sec5b_ml_model_binaryclass_pipeline as ml_pipeline\n'), ((6640, 7278), 'sec5b_ml_model_binaryclass_pipeline.MachineLearningNameEthnicityProjectBinaryClass', 'ml_pipeline.MachineLearningNameEthnicityProjectBinaryClass', ([], {'control_panel': "{'save_result_switch': save_switch, 'use_subsampled_df_switch': False,\n 'use_subsampled_df_nk': 'none', 'use_featured_df_switch': False,\n 'use_feature_set': [], 'feature_selection_switch': False,\n 'cross_validation_switch': True, 'cross_validation_repeat': i,\n 'ml_process_on_test_data_switch': False,\n 'ml_process_on_training_data_switch': False,\n 'ml_process_on_ext_data_switch': False, 'ml_algo': None,\n 'ml_algo_param_grid': [algo_key, algo_val], 'binary_target_label':\n target_label, 'eval_score': eval_score, 'random_state': 888}"}), "(control_panel={\n 'save_result_switch': save_switch, 'use_subsampled_df_switch': False,\n 'use_subsampled_df_nk': 'none', 'use_featured_df_switch': False,\n 'use_feature_set': [], 'feature_selection_switch': False,\n 'cross_validation_switch': True, 'cross_validation_repeat': i,\n 'ml_process_on_test_data_switch': False,\n 'ml_process_on_training_data_switch': False,\n 'ml_process_on_ext_data_switch': False, 'ml_algo': None,\n 'ml_algo_param_grid': [algo_key, algo_val], 'binary_target_label':\n target_label, 'eval_score': eval_score, 'random_state': 888})\n", (6698, 7278), True, 'import sec5b_ml_model_binaryclass_pipeline as ml_pipeline\n'), ((7924, 8566), 'sec5b_ml_model_binaryclass_pipeline.MachineLearningNameEthnicityProjectBinaryClass', 'ml_pipeline.MachineLearningNameEthnicityProjectBinaryClass', ([], {'control_panel': "{'save_result_switch': save_switch, 'use_subsampled_df_switch': False,\n 'use_subsampled_df_nk': [], 'use_featured_df_switch': True,\n 'use_feature_set': feature_set, 'feature_selection_switch': False,\n 'cross_validation_switch': True, 'cross_validation_repeat': i,\n 'ml_process_on_test_data_switch': False,\n 'ml_process_on_training_data_switch': False,\n 'ml_process_on_ext_data_switch': False, 'ml_algo': None,\n 'ml_algo_param_grid': [algo_key, algo_val], 'binary_target_label':\n target_label, 'eval_score': eval_score, 'random_state': 888}"}), "(control_panel={\n 'save_result_switch': save_switch, 'use_subsampled_df_switch': False,\n 'use_subsampled_df_nk': [], 'use_featured_df_switch': True,\n 'use_feature_set': feature_set, 'feature_selection_switch': False,\n 'cross_validation_switch': True, 'cross_validation_repeat': i,\n 'ml_process_on_test_data_switch': False,\n 'ml_process_on_training_data_switch': False,\n 'ml_process_on_ext_data_switch': False, 'ml_algo': None,\n 'ml_algo_param_grid': [algo_key, algo_val], 'binary_target_label':\n target_label, 'eval_score': eval_score, 'random_state': 888})\n", (7982, 8566), True, 'import sec5b_ml_model_binaryclass_pipeline as ml_pipeline\n'), ((3948, 3971), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3969, 3971), False, 'import datetime\n'), ((5306, 5948), 'sec5b_ml_model_binaryclass_pipeline.MachineLearningNameEthnicityProjectBinaryClass', 'ml_pipeline.MachineLearningNameEthnicityProjectBinaryClass', ([], {'control_panel': "{'save_result_switch': save_switch, 'use_subsampled_df_switch': False,\n 'use_subsampled_df_nk': nk, 'use_featured_df_switch': True,\n 'use_feature_set': feature_set, 'feature_selection_switch': False,\n 'cross_validation_switch': True, 'cross_validation_repeat': i,\n 'ml_process_on_test_data_switch': False,\n 'ml_process_on_training_data_switch': False,\n 'ml_process_on_ext_data_switch': False, 'ml_algo': None,\n 'ml_algo_param_grid': [algo_key, algo_val], 'binary_target_label':\n target_label, 'eval_score': eval_score, 'random_state': 888}"}), "(control_panel={\n 'save_result_switch': save_switch, 'use_subsampled_df_switch': False,\n 'use_subsampled_df_nk': nk, 'use_featured_df_switch': True,\n 'use_feature_set': feature_set, 'feature_selection_switch': False,\n 'cross_validation_switch': True, 'cross_validation_repeat': i,\n 'ml_process_on_test_data_switch': False,\n 'ml_process_on_training_data_switch': False,\n 'ml_process_on_ext_data_switch': False, 'ml_algo': None,\n 'ml_algo_param_grid': [algo_key, algo_val], 'binary_target_label':\n target_label, 'eval_score': eval_score, 'random_state': 888})\n", (5364, 5948), True, 'import sec5b_ml_model_binaryclass_pipeline as ml_pipeline\n'), ((6600, 6623), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6621, 6623), False, 'import datetime\n'), ((7884, 7907), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7905, 7907), False, 'import datetime\n'), ((5265, 5288), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5286, 5288), False, 'import datetime\n')] |
import os, sys, logging
import json
import numpy as np
import random
from collections import defaultdict, Counter
import cPickle as pickle
import cProfile, pstats
import threading
import time
import multiprocessing
import math
from sklearn import metrics
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from src.datasets import data_utils
from src.datasets.data_utils import timed, TextTooShortException, DataSampler, WordVectorBuilder
from src.datasets.imdb import IMDB
from src.datasets.sentiment140 import Sentiment140
from src.datasets.amazon_reviews import AmazonReviews
from src.datasets.open_weiboscope import OpenWeibo
from src.datasets.arabic_twitter import ArabicTwitter
from src.datasets.word_vector_embedder import WordVectorEmbedder
data_fraction_test = 0.20
data_fraction_train = 0.80
num_threads = multiprocessing.cpu_count()
threadLock = threading.Lock()
# setup logging
logger = data_utils.syslogger(__name__)
# set output directory
dir_data = "/data"
try:
dir_results = os.path.join(dir_data, os.path.dirname(os.path.realpath(__file__)), 'results')
except NameError:
dir_results = os.path.join(dir_data, 'results')
# data inputs
datasets = [
# { 'sentiment140': {
# 'class': Sentiment140,
# 'path': os.path.join(dir_data, 'sentiment140.csv'),
# 'args': { 'load': { 'rng_seed': 13337 },
# 'embed': { 'type': 'averaged' },
# 'normalize': { 'min_length': 70,
# 'max_length': 150,
# 'reverse': False,
# 'pad_out': False
# },
# 'shuffle_after_load': False,
# 'models': [
# 'glove',
# 'word2vec'
# ]
# }
# }
# },
# { 'imdb': {
# 'class': IMDB,
# 'path': os.path.join(dir_data, 'imdb'),
# 'args': { 'load': { 'rng_seed': 13337 },
# 'embed': { 'type': 'averaged' },
# 'normalize': { 'encoding': None,
# 'reverse': False,
# 'pad_out': False,
# 'min_length': 0,
# 'max_length': 9999999
# },
# 'shuffle_after_load': False,
# 'models': [
# 'glove',
# 'word2vec'
# ]
# }
# }
# },
# { 'amazon': {
# 'class': AmazonReviews,
# 'path': os.path.join(dir_data, 'amazonreviews.gz'),
# 'args': { 'load': { 'rng_seed': 13337 },
# 'embed': { 'type': 'averaged' },
# 'normalize': { 'encoding': None,
# 'reverse': False,
# 'min_length': 0,
# 'max_length': 9999999,
# 'pad_out': False
# },
# 'shuffle_after_load': True,
# 'models': [
# 'glove',
# 'word2vec',
# {
# 'word2vec': { 'model': '/data/amazon/amazon_800000.bin' }
# }
# ]
# }
# }
# },
# { 'openweibo': {
# 'class': OpenWeibo,
# 'path': os.path.join(dir_data, 'openweibo'),
# 'args': { 'load': { 'rng_seed': 13337 },
# 'embed': { 'type': 'averaged' },
# 'shuffle_after_load': True,
# 'models': [
# 'glove',
# 'word2vec',
# {
# 'word2vec': { 'model': '/data/openweibo/openweibo_800000.bin' }
# }
# ]
# }
# }
# },
# { 'openweibo': {
# 'class': OpenWeibo,
# 'path': os.path.join(dir_data, 'openweibocensored'),
# 'args': { 'load': { 'form': 'hanzi',
# 'rng_seed': 13337,
# 'label_type': 'denied'
# },
# 'embed': { 'type': 'averaged' },
# 'shuffle_after_load': True,
# 'models': [
# 'glove',
# 'word2vec',
# {
# 'word2vec': { 'model': '/data/openweibo/openweibo_fullset_hanzi_CLEAN_vocab31357747.bin' }
# }
# ]
# }
# }
# },
# { 'openweibo': {
# 'class': OpenWeibo,
# 'path': os.path.join(dir_data, 'openweibo'),
# 'args': { 'load': { 'form': 'hanzi',
# 'rng_seed': 13337
# },
# 'embed': { 'type': 'averaged' },
# 'shuffle_after_load': True,
# 'models': [
# 'glove',
# 'word2vec',
# {
# 'word2vec': { 'model': '/data/openweibo/openweibo_fullset_hanzi_CLEAN_vocab31357747.bin' }
# }
# ]
# }
# }
# },
# { 'openweibo': {
# 'class': OpenWeibo,
# 'path': os.path.join(dir_data, 'openweibo'),
# 'args': { 'load': { 'form': 'hanzi',
# 'rng_seed': 13337
# },
# 'embed': { 'type': 'averaged' },
# 'shuffle_after_load': True,
# 'models': [
# {
# 'word2vec': { 'model': '/data/openweibo/openweibo_fullset_min10_hanzi_vocab2548911_binary_CLEAN.bin',
# 'train': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_train.bin',
# 'test': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_test.bin',
# 'args': { 'binary': 'True' }
# }
# },
# {
# 'word2vec': { 'model': '/data/GoogleNews-vectors-negative300.bin.gz',
# 'train': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_train.bin',
# 'test': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_test.bin'
# }
# },
# {
# 'glove': {
# 'train': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_train.bin',
# 'test': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_test.bin'
# }
# },
# {
# 'word2vec': {
# 'model': '/data/sentiment140_800000.bin',
# 'train': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_train.bin',
# 'test': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_test.bin'
# }
# }
# ]
# }
# }
# },
# { 'openweibo': {
# 'class': OpenWeibo,
# 'path': os.path.join(dir_data, 'openweibo'),
# 'args': { 'load': { 'form': 'hanzi',
# 'rng_seed': 13337,
# 'label_type': 'denied'
# },
# 'embed': { 'type': 'averaged' },
# 'shuffle_after_load': True,
# 'models': [
# {
# 'word2vec': { 'model': '/data/openweibo/openweibo_fullset_min10_hanzi_vocab2548911_binary_CLEAN.bin',
# 'train': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_train.bin',
# 'test': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_test.bin',
# 'args': { 'binary': 'True' }
# }
# },
# {
# 'word2vec': { 'model': '/data/GoogleNews-vectors-negative300.bin.gz',
# 'train': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_train.bin',
# 'test': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_test.bin',
# 'args': { 'binary': 'True' }
# }
# },
# {
# 'glove': {
# 'train': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_train.bin',
# 'test': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_test.bin',
# }
# },
# {
# 'word2vec': {
# 'model': '/data/sentiment140_800000.bin',
# 'train': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_train.bin',
# 'test': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_test.bin',
# }
# }
# ]
# }
# }
# },
{ 'arabic_twitter': {
'class': ArabicTwitter,
'path': os.path.join(dir_data, 'arabic_twitter'),
'args': { 'load': { 'form': 'arabic',
'rng_seed': 13337
},
'embed': { 'type': 'averaged' },
'shuffle_after_load': True,
'models': [
# {
# 'word2vec': { 'model': '/data/arabic_tweets/arabic_tweets_min10vocab_vocab1520226.bin',
# 'train': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_train.bin',
# 'test': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_test.bin',
# 'args': { 'binary': 'True' }
# }
# },
{
'word2vec': { 'model': '/data/GoogleNews-vectors-negative300.bin.gz',
'train': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_train.bin',
'test': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_test.bin',
'args': { 'binary': 'True' }
}
},
{
'glove': {
'train': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_train.bin',
'test': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_test.bin',
}
},
{
'word2vec': {
'model': '/data/sentiment140_800000.bin',
'train': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_train.bin',
'test': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_test.bin',
}
},
{
'word2vec': { 'model': '/data/arabic_tweets/arabic_tweets_NLTK_min10vocab_vocab981429.bin',
'train': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_train.bin',
'test': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_test.bin',
'args': { 'binary': 'True' }
}
}
]
}
}
}
]
def classifiers():
"""
Returns a list of classifier tuples (name, model)
for use in training
"""
return [("LogisticRegression", LogisticRegression(C=1.0,
class_weight=None,
dual=False,
fit_intercept=True,
intercept_scaling=1,
penalty='l2',
random_state=None,
tol=0.0001)),
("RandomForests", RandomForestClassifier(n_jobs=-1,
n_estimators = 15,
max_features = 'sqrt')),
("Gaussian NaiveBayes", GaussianNB())] #,
#("LinearSVM", svm.LinearSVC())]
# profiled methods
@timed
def timed_training(classifier, values, labels):
return classifier.fit(values, labels)
@timed
def timed_testing(classifier, values):
return classifier.predict(values)
@timed
def timed_dataload(loader, data, args, embedder, values, labels):
# use separate counter to account for invalid input along the way
counter = 0
for text,sentiment in data:
try:
if (counter % 10000 == 0):
print("Loading at {}".format(counter))
# normalize and tokenize if necessary
if args.has_key('normalize'):
text_normalized = data_utils.normalize(text, **args['normalize'])
else:
text_normalized = text
# tokenize
if args.get('load', {}).get('form', None) == 'hanzi':
tokens = data_utils.tokenize_hanzi(text_normalized)
elif args.get('load', {}).get('form', None) == 'arabic':
text_stripped = loader.twitter_strip(text_normalized)
tokens = loader.tokenize_arabic(text_stripped)
else:
tokens = data_utils.tokenize(text_normalized)
# choose embedding type
vector = None
if args['embed']['type'] == 'concatenated':
vector = embedder.embed_words_into_vectors_concatenated(tokens, **self.args['embed'])
elif args['embed']['type'] == 'averaged':
vector = embedder.embed_words_into_vectors_averaged(tokens)
else:
pass
# data labeled by sentiment score (thread-safe with lock)
if vector is not None:
values.append(vector)
labels.append(sentiment)
counter += 1
except TextTooShortException as e:
pass
# iterate all datasources
for dataset in datasets:
for data_source, data_params in dataset.iteritems():
# prepare data loader
klass = data_params['class']
loader = klass(data_params['path'])
data_args = data_params['args']
load_args = data_args.get('load', {})
data = loader.load_data(**load_args)
# test all vector models
for embedder_model in data_args['models']:
# identify prebuilt model if exists
if isinstance(embedder_model, dict):
# initialize word vector embedder
embedder_model, prebuilt_model_params = embedder_model.items().pop()
prebuilt_path_model = prebuilt_model_params.get('model', None)
model_args = prebuilt_model_params.get('args', {})
embedder = WordVectorEmbedder(embedder_model, model_fullpath=prebuilt_path_model, model_args=model_args)
# update embedder parameters
if prebuilt_path_model:
model_path_dir, model_path_filename, model_path_filext = WordVectorBuilder.filename_components(prebuilt_path_model)
embedder.model_subset = model_path_filename
# training data (custom or default)
if prebuilt_model_params.get('train', None):
prebuilt_path_train = prebuilt_model_params.get('train')
else:
prebuilt_path_train = WordVectorBuilder.filename_train(prebuilt_path_model)
with open(prebuilt_path_train, 'rb') as f:
data_train = pickle.load(f)
# testing data (custom or default)
if prebuilt_model_params.get('test', None):
prebuilt_path_test = prebuilt_model_params.get('test')
else:
prebuilt_path_test = WordVectorBuilder.filename_test(prebuilt_path_model)
with open(prebuilt_path_test, 'rb') as f:
data_test = pickle.load(f)
# initialize lists (will be converted later into numpy arrays)
values_train = []
labels_train = []
values_test = []
labels_test = []
# initialize timer
seconds_loading = 0
logger.info("processing {} samples from {}...".format(len(data_train)+len(data_test), prebuilt_path_model))
# load training dataset
profile_results = timed_dataload(loader, data_train, data_args, embedder, values_train, labels_train)
seconds_loading += profile_results.timer.total_tt
# load training dataset
profile_results = timed_dataload(loader, data_test, data_args, embedder, values_test, labels_test)
seconds_loading += profile_results.timer.total_tt
# shuffle if necessary
if data_args['shuffle_after_load']:
# store new lists
values_train_shuffled = []
labels_train_shuffled = []
values_test_shuffled = []
labels_test_shuffled = []
# generate subsample of random indices out of total available
random.seed(data_args.get('load', {}).get('rng_seed', None))
indices_train = range(len(values_train))
indices_test = range(len(values_test))
random.shuffle(indices_train)
random.shuffle(indices_test)
# keep entries at those random indices
for i in indices_train:
values_train_shuffled.append(values_train[i])
labels_train_shuffled.append(labels_train[i])
for i in indices_test:
values_test_shuffled.append(values_test[i])
labels_test_shuffled.append(labels_test[i])
# keep shuffled lists
values_train = values_train_shuffled
labels_train = labels_train_shuffled
values_test = values_test_shuffled
labels_test = labels_test_shuffled
# create numpy arrays for classifier input
values_train = np.array(values_train, dtype='float32')
labels_train = np.array(labels_train, dtype='float32')
values_test = np.array(values_test, dtype='float32')
labels_test = np.array(labels_test, dtype='float32')
else:
# initialize word vector embedder
embedder = WordVectorEmbedder(embedder_model)
# initialize lists (will be converted later into numpy arrays)
values = []
labels = []
# get equal-sized subsets of each class
data_sampler = DataSampler(klass, file_path=data_params['path'], num_classes=2)
data = data_sampler.sample_balanced(min_samples=data_args.get('min_samples', None), rng_seed=data_args.get('load', {}).get('rng_seed', None))
# load dataset
logger.info("processing {} samples from {}...".format(len(data), data_params['path']))
profile_results = timed_dataload(loader, data, data_args, embedder, values, labels)
# store loading time
seconds_loading = profile_results.timer.total_tt
# shuffle if necessary
if data_args['shuffle_after_load']:
# store new lists
values_shuffled = []
labels_shuffled = []
# generate subsample of random indices out of total available
random.seed(data_args.get('load', {}).get('rng_seed', None))
indices = range(len(values))
random.shuffle(indices)
# keep entries at those random indices
for i in indices:
values_shuffled.append(values[i])
labels_shuffled.append(labels[i])
# keep shuffled lists
values = values_shuffled
labels = labels_shuffled
# convert into nparray for sklearn
values = np.nan_to_num(np.array(values, dtype="float32"))
labels = np.nan_to_num(np.array(labels, dtype="float32"))
logger.info("Loaded {} samples...".format(len(values)))
# split into training and test data
logger.info("splitting dataset into training and testing sets...")
labels_train, labels_dev, labels_test = data_utils.split_data(labels, train=data_fraction_train, dev=0, test=data_fraction_test)
values_train, values_dev, values_test = data_utils.split_data(values, train=data_fraction_train, dev=0, test=data_fraction_test)
# calculate distribution
dist = Counter()
dist.update(labels_test)
# setup classifier
logger.info("Training on {}, Testing on {}...".format(len(values_train), len(values_test)))
for classifier_name,classifier in classifiers():
# profiled training
logger.info("Training %s classifier..." % classifier.__class__.__name__)
profile_results = timed_training(classifier, values_train, labels_train)
seconds_training = profile_results.timer.total_tt
# profiled testing
logger.info("Testing %s classifier..." % classifier.__class__.__name__)
profile_results = timed_testing(classifier, values_test)
predictions = profile_results.results
seconds_testing = profile_results.timer.total_tt
# calculate metrics
data_size = len(labels_test)
data_positive = np.sum(labels_test)
data_negative = data_size - data_positive
confusion_matrix = metrics.confusion_matrix(labels_test, predictions)
TN = confusion_matrix[0][0]
FP = confusion_matrix[0][1]
FN = confusion_matrix[1][0]
TP = confusion_matrix[1][1]
accuracy = metrics.accuracy_score(labels_test, predictions)
precision = metrics.precision_score(labels_test, predictions)
recall = metrics.recall_score(labels_test, predictions)
f1 = metrics.f1_score(labels_test, predictions)
# build results object
results = { 'classifier': str(classifier.__class__.__name__),
'data': { 'source': str(data_source),
'testsize': str(data_size),
'positive': str(data_positive),
'negative': str(data_negative),
'time_in_seconds_loading': str(seconds_loading)
},
'embedding': { 'model': str(embedder_model),
'subset': str(embedder.model_subset)
},
'data_args': data_args,
'metrics': { 'TP': str(TP),
'FP': str(FP),
'TN': str(TN),
'FN': str(FN),
'accuracy': str(accuracy),
'precision': str(precision),
'recall': str(recall),
'f1': str(f1),
'time_in_seconds_training': str(seconds_training),
'time_in_seconds_testing': str(seconds_testing)
}
}
# ensure output directory exists
if not os.path.isdir(dir_results):
data_utils.mkdir_p(dir_results)
# save json file
filename_results = "{}_{}_{}.json".format(data_source, embedder_model, classifier.__class__.__name__)
logger.info("Saving results to {}...".format(filename_results))
with open(os.path.join(dir_results,filename_results), 'a') as outfile:
json.dump(results, outfile, sort_keys=True, indent=4, separators=(',', ': '))
outfile.write('\n')
| [
"src.datasets.data_utils.tokenize_hanzi",
"multiprocessing.cpu_count",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"numpy.array",
"src.datasets.data_utils.WordVectorBuilder.filename_test",
"src.datasets.data_utils.syslogger",
"threading.Lock",
"os.path.isdir",
"src.datasets.... | [((941, 968), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (966, 968), False, 'import multiprocessing\n'), ((982, 998), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (996, 998), False, 'import threading\n'), ((1025, 1055), 'src.datasets.data_utils.syslogger', 'data_utils.syslogger', (['__name__'], {}), '(__name__)\n', (1045, 1055), False, 'from src.datasets import data_utils\n'), ((1237, 1270), 'os.path.join', 'os.path.join', (['dir_data', '"""results"""'], {}), "(dir_data, 'results')\n", (1249, 1270), False, 'import os, sys, logging\n'), ((1161, 1187), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1177, 1187), False, 'import os, sys, logging\n'), ((16388, 16428), 'os.path.join', 'os.path.join', (['dir_data', '"""arabic_twitter"""'], {}), "(dir_data, 'arabic_twitter')\n", (16400, 16428), False, 'import os, sys, logging\n'), ((20782, 20928), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1.0)', 'class_weight': 'None', 'dual': '(False)', 'fit_intercept': '(True)', 'intercept_scaling': '(1)', 'penalty': '"""l2"""', 'random_state': 'None', 'tol': '(0.0001)'}), "(C=1.0, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, penalty='l2', random_state=None, tol=0.0001)\n", (20800, 20928), False, 'from sklearn.linear_model import LogisticRegression\n'), ((21335, 21406), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_jobs': '(-1)', 'n_estimators': '(15)', 'max_features': '"""sqrt"""'}), "(n_jobs=-1, n_estimators=15, max_features='sqrt')\n", (21357, 21406), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((21552, 21564), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (21562, 21564), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((30585, 30594), 'collections.Counter', 'Counter', ([], {}), '()\n', (30592, 30594), False, 'from collections import defaultdict, Counter\n'), ((22248, 22295), 'src.datasets.data_utils.normalize', 'data_utils.normalize', (['text'], {}), "(text, **args['normalize'])\n", (22268, 22295), False, 'from src.datasets import data_utils\n'), ((22468, 22510), 'src.datasets.data_utils.tokenize_hanzi', 'data_utils.tokenize_hanzi', (['text_normalized'], {}), '(text_normalized)\n', (22493, 22510), False, 'from src.datasets import data_utils\n'), ((24303, 24400), 'src.datasets.word_vector_embedder.WordVectorEmbedder', 'WordVectorEmbedder', (['embedder_model'], {'model_fullpath': 'prebuilt_path_model', 'model_args': 'model_args'}), '(embedder_model, model_fullpath=prebuilt_path_model,\n model_args=model_args)\n', (24321, 24400), False, 'from src.datasets.word_vector_embedder import WordVectorEmbedder\n'), ((27846, 27885), 'numpy.array', 'np.array', (['values_train'], {'dtype': '"""float32"""'}), "(values_train, dtype='float32')\n", (27854, 27885), True, 'import numpy as np\n'), ((27917, 27956), 'numpy.array', 'np.array', (['labels_train'], {'dtype': '"""float32"""'}), "(labels_train, dtype='float32')\n", (27925, 27956), True, 'import numpy as np\n'), ((27987, 28025), 'numpy.array', 'np.array', (['values_test'], {'dtype': '"""float32"""'}), "(values_test, dtype='float32')\n", (27995, 28025), True, 'import numpy as np\n'), ((28056, 28094), 'numpy.array', 'np.array', (['labels_test'], {'dtype': '"""float32"""'}), "(labels_test, dtype='float32')\n", (28064, 28094), True, 'import numpy as np\n'), ((28192, 28226), 'src.datasets.word_vector_embedder.WordVectorEmbedder', 'WordVectorEmbedder', (['embedder_model'], {}), '(embedder_model)\n', (28210, 28226), False, 'from src.datasets.word_vector_embedder import WordVectorEmbedder\n'), ((28451, 28515), 'src.datasets.data_utils.DataSampler', 'DataSampler', (['klass'], {'file_path': "data_params['path']", 'num_classes': '(2)'}), "(klass, file_path=data_params['path'], num_classes=2)\n", (28462, 28515), False, 'from src.datasets.data_utils import timed, TextTooShortException, DataSampler, WordVectorBuilder\n'), ((30293, 30386), 'src.datasets.data_utils.split_data', 'data_utils.split_data', (['labels'], {'train': 'data_fraction_train', 'dev': '(0)', 'test': 'data_fraction_test'}), '(labels, train=data_fraction_train, dev=0, test=\n data_fraction_test)\n', (30314, 30386), False, 'from src.datasets import data_utils\n'), ((30438, 30531), 'src.datasets.data_utils.split_data', 'data_utils.split_data', (['values'], {'train': 'data_fraction_train', 'dev': '(0)', 'test': 'data_fraction_test'}), '(values, train=data_fraction_train, dev=0, test=\n data_fraction_test)\n', (30459, 30531), False, 'from src.datasets import data_utils\n'), ((31555, 31574), 'numpy.sum', 'np.sum', (['labels_test'], {}), '(labels_test)\n', (31561, 31574), True, 'import numpy as np\n'), ((31677, 31727), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['labels_test', 'predictions'], {}), '(labels_test, predictions)\n', (31701, 31727), False, 'from sklearn import metrics\n'), ((32010, 32058), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['labels_test', 'predictions'], {}), '(labels_test, predictions)\n', (32032, 32058), False, 'from sklearn import metrics\n'), ((32097, 32146), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['labels_test', 'predictions'], {}), '(labels_test, predictions)\n', (32120, 32146), False, 'from sklearn import metrics\n'), ((32185, 32231), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['labels_test', 'predictions'], {}), '(labels_test, predictions)\n', (32205, 32231), False, 'from sklearn import metrics\n'), ((32270, 32312), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['labels_test', 'predictions'], {}), '(labels_test, predictions)\n', (32286, 32312), False, 'from sklearn import metrics\n'), ((22756, 22792), 'src.datasets.data_utils.tokenize', 'data_utils.tokenize', (['text_normalized'], {}), '(text_normalized)\n', (22775, 22792), False, 'from src.datasets import data_utils\n'), ((24560, 24618), 'src.datasets.data_utils.WordVectorBuilder.filename_components', 'WordVectorBuilder.filename_components', (['prebuilt_path_model'], {}), '(prebuilt_path_model)\n', (24597, 24618), False, 'from src.datasets.data_utils import timed, TextTooShortException, DataSampler, WordVectorBuilder\n'), ((24938, 24991), 'src.datasets.data_utils.WordVectorBuilder.filename_train', 'WordVectorBuilder.filename_train', (['prebuilt_path_model'], {}), '(prebuilt_path_model)\n', (24970, 24991), False, 'from src.datasets.data_utils import timed, TextTooShortException, DataSampler, WordVectorBuilder\n'), ((25084, 25098), 'cPickle.load', 'pickle.load', (['f'], {}), '(f)\n', (25095, 25098), True, 'import cPickle as pickle\n'), ((25349, 25401), 'src.datasets.data_utils.WordVectorBuilder.filename_test', 'WordVectorBuilder.filename_test', (['prebuilt_path_model'], {}), '(prebuilt_path_model)\n', (25380, 25401), False, 'from src.datasets.data_utils import timed, TextTooShortException, DataSampler, WordVectorBuilder\n'), ((25492, 25506), 'cPickle.load', 'pickle.load', (['f'], {}), '(f)\n', (25503, 25506), True, 'import cPickle as pickle\n'), ((26985, 27014), 'random.shuffle', 'random.shuffle', (['indices_train'], {}), '(indices_train)\n', (26999, 27014), False, 'import random\n'), ((27035, 27063), 'random.shuffle', 'random.shuffle', (['indices_test'], {}), '(indices_test)\n', (27049, 27063), False, 'import random\n'), ((29458, 29481), 'random.shuffle', 'random.shuffle', (['indices'], {}), '(indices)\n', (29472, 29481), False, 'import random\n'), ((29920, 29953), 'numpy.array', 'np.array', (['values'], {'dtype': '"""float32"""'}), "(values, dtype='float32')\n", (29928, 29953), True, 'import numpy as np\n'), ((29994, 30027), 'numpy.array', 'np.array', (['labels'], {'dtype': '"""float32"""'}), "(labels, dtype='float32')\n", (30002, 30027), True, 'import numpy as np\n'), ((34215, 34241), 'os.path.isdir', 'os.path.isdir', (['dir_results'], {}), '(dir_results)\n', (34228, 34241), False, 'import os, sys, logging\n'), ((34263, 34294), 'src.datasets.data_utils.mkdir_p', 'data_utils.mkdir_p', (['dir_results'], {}), '(dir_results)\n', (34281, 34294), False, 'from src.datasets import data_utils\n'), ((34633, 34710), 'json.dump', 'json.dump', (['results', 'outfile'], {'sort_keys': '(True)', 'indent': '(4)', 'separators': "(',', ': ')"}), "(results, outfile, sort_keys=True, indent=4, separators=(',', ': '))\n", (34642, 34710), False, 'import json\n'), ((34552, 34595), 'os.path.join', 'os.path.join', (['dir_results', 'filename_results'], {}), '(dir_results, filename_results)\n', (34564, 34595), False, 'import os, sys, logging\n')] |
import pcl
import numpy as np
def run_icp(data):
delta_theta_z, delta_x, delta_y, pc_in, pc_out, iter_t, iter_x, iter_y = data
# if do_exhaustive_serach:
transf_ini = np.eye(4)
transf_ini[0, 0] = np.cos(delta_theta_z[iter_t])
transf_ini[0, 1] = -np.sin(delta_theta_z[iter_t])
transf_ini[1, 0] = np.sin(delta_theta_z[iter_t])
transf_ini[1, 1] = np.cos(delta_theta_z[iter_t])
transf_ini[0, 3] = delta_x[iter_x]
transf_ini[1, 3] = delta_y[iter_y]
pc_in_try = (
np.matmul(transf_ini[0:3, 0:3], pc_in.transpose())
+ transf_ini[0:3, 3][:, np.newaxis]
)
pc_in_try = pc_in_try.transpose()
cloud_in = pcl.PointCloud()
cloud_out = pcl.PointCloud()
cloud_in.from_array(pc_in_try.astype(np.float32))
cloud_out.from_array(pc_out.astype(np.float32))
gicp = cloud_in.make_GeneralizedIterativeClosestPoint()
# tried open3d but found pcl version is more robust
converged, transf_iter, estimate, fitness = gicp.gicp(
cloud_in, cloud_out, max_iter=1000
)
if not converged:
fitness = 0
transf = np.eye(4)
transf[0:3, 0:3] = np.matmul(transf_iter[0:3, 0:3], transf_ini[0:3, 0:3])
transf[0:3, 3] = (
np.matmul(transf_iter[0:3, 0:3], transf_ini[0:3, 3]) + transf_iter[0:3, 3]
)
# import pdb; pdb.set_trace()
# pc_in_vec = PointCloud()
# pc_in_vec.points = Vector3dVector(pc_in)
# pc_out_vec = PointCloud()
# pc_out_vec.points = Vector3dVector(pc_out)
# draw_registration_result(pc_in_vec, pc_out_vec, transf)
return transf, fitness
| [
"numpy.eye",
"numpy.matmul",
"numpy.cos",
"numpy.sin",
"pcl.PointCloud"
] | [((182, 191), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (188, 191), True, 'import numpy as np\n'), ((215, 244), 'numpy.cos', 'np.cos', (['delta_theta_z[iter_t]'], {}), '(delta_theta_z[iter_t])\n', (221, 244), True, 'import numpy as np\n'), ((322, 351), 'numpy.sin', 'np.sin', (['delta_theta_z[iter_t]'], {}), '(delta_theta_z[iter_t])\n', (328, 351), True, 'import numpy as np\n'), ((375, 404), 'numpy.cos', 'np.cos', (['delta_theta_z[iter_t]'], {}), '(delta_theta_z[iter_t])\n', (381, 404), True, 'import numpy as np\n'), ((665, 681), 'pcl.PointCloud', 'pcl.PointCloud', ([], {}), '()\n', (679, 681), False, 'import pcl\n'), ((698, 714), 'pcl.PointCloud', 'pcl.PointCloud', ([], {}), '()\n', (712, 714), False, 'import pcl\n'), ((1105, 1114), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1111, 1114), True, 'import numpy as np\n'), ((1138, 1192), 'numpy.matmul', 'np.matmul', (['transf_iter[0:3, 0:3]', 'transf_ini[0:3, 0:3]'], {}), '(transf_iter[0:3, 0:3], transf_ini[0:3, 0:3])\n', (1147, 1192), True, 'import numpy as np\n'), ((269, 298), 'numpy.sin', 'np.sin', (['delta_theta_z[iter_t]'], {}), '(delta_theta_z[iter_t])\n', (275, 298), True, 'import numpy as np\n'), ((1224, 1276), 'numpy.matmul', 'np.matmul', (['transf_iter[0:3, 0:3]', 'transf_ini[0:3, 3]'], {}), '(transf_iter[0:3, 0:3], transf_ini[0:3, 3])\n', (1233, 1276), True, 'import numpy as np\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 28 14:33:17 2018
@author: lzw
check the box
"""
import cv2
import numpy as np
fo = open("list.txt")
lines = fo.readlines()
for line in lines:
line = line.split()
img_path = line[0]
bbox = [float(i) for i in line[1:]]
boxes = np.array(bbox, dtype=np.float32).reshape(-1, 4)
img = cv2.imread(img_path)
for box in boxes:
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (255, 0, 0), 1)
cv2.imshow("test", img)
if cv2.waitKey(0) & 0xFF == ord('q'):
cv2.destroyAllWindows() | [
"cv2.rectangle",
"cv2.imshow",
"numpy.array",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.imread"
] | [((371, 391), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (381, 391), False, 'import cv2\n'), ((497, 520), 'cv2.imshow', 'cv2.imshow', (['"""test"""', 'img'], {}), "('test', img)\n", (507, 520), False, 'import cv2\n'), ((422, 492), 'cv2.rectangle', 'cv2.rectangle', (['img', '(box[0], box[1])', '(box[2], box[3])', '(255, 0, 0)', '(1)'], {}), '(img, (box[0], box[1]), (box[2], box[3]), (255, 0, 0), 1)\n', (435, 492), False, 'import cv2\n'), ((571, 594), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (592, 594), False, 'import cv2\n'), ((313, 345), 'numpy.array', 'np.array', (['bbox'], {'dtype': 'np.float32'}), '(bbox, dtype=np.float32)\n', (321, 345), True, 'import numpy as np\n'), ((528, 542), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (539, 542), False, 'import cv2\n')] |
# © 2019 University of Illinois Board of Trustees. All rights reserved
"""
Prepare a VCF file from a set of features in the current directory
"""
import glob
import pickle
from vcfFromContigs import createVcfRecord
from PySamFastaWrapper import PySamFastaWrapper as ReferenceCache
from multiprocessing import Pool
import argparse
import os
import subprocess
import math
import shutil
import numpy as np
import sys
def checkLogs(path):
shardFiles = glob.glob("%s/shard[0-9]*.txt" % path);
print("Found %d input shard files" % len(shardFiles));
# For each shard file check whether the log file has the appropriate string
for shard in shardFiles:
logName = shard + ".log";
with open(logName, 'r') as fhandle:
if "Completed running the script" not in fhandle.read():
print("File %s doesn't have termination string" % logName);
return False;
return True;
def callAlleles(likelihoodDict, chromosome, start, length, ref):
"""
Given a likelihood dictionary, call alleles
:param likelihoodDict: dict
Dictionary of likelihoods
:param chromosome: str
Chromosome
:param start: int
Position in the reference sequence
:param length: int
Length of variant in reference sequence
:param ref: ReferenceCache
Reference cache object
:return: str
Variant record
"""
refAllele = ''.join(ref[start: start + length]);
topAlleleCombination = sorted([(v, k) for k, v in likelihoodDict.items()], reverse=True)[0];
likelihood, topAlleles = topAlleleCombination;
likelihood = min(float(likelihood), 1 - 1e-8); # Quality score restricted to value 80
quality = -10 * math.log10(1 - likelihood);
altAlleles = list(set(topAlleles).difference({refAllele}));
allelesAtSite = set();
for key in likelihoodDict:
for k in key:
allelesAtSite.add(k);
allelesAtSite = list(allelesAtSite);
if len(altAlleles) == 0:
genotypes = [0, 0];
allAlleles = allelesAtSite;
altAlleles = list(set(allAlleles).difference({refAllele}));
if len(altAlleles) == 0:
return None;
else:
genotypes = [];
for allele in topAlleles:
if allele == refAllele:
genotypes.append(0);
else:
try:
altIndex = altAlleles.index(allele);
except ValueError:
# Rethrow with message (should probably put in a finally block)
raise ValueError("Cannot find allele %s in altAllele list %s" % (allele, str(altAlleles)));
genotypes.append(altIndex + 1);
record = createVcfRecord(
chromosome,
start,
ref,
[0],
[refAllele],
[altAlleles],
[genotypes],
string="MixtureOfExpertPrediction",
qual=quality
)[0];
return record;
def vcfWrapper(args):
return vcfRecords(*args);
def vcfRecords(data, ref, tmpdir):
"""
Creates vcf records for a site
:param data: str
Filename containing site predictions from each expert and meta-expert predictions
:param ref: str
Reference cache database location
:param tmpdir: str
Temporary directory path
"""
items = pickle.load(open(data, 'rb'));
prefix = os.path.join(tmpdir, os.path.split(data)[1]);
ref = ReferenceCache(database=ref);
e0handle = open(prefix + ".expert0.vcf", 'w');
e1handle = open(prefix + ".expert1.vcf", 'w');
e2handle = open(prefix + ".expert2.vcf", 'w');
bhandle = open(prefix + ".best.vcf", 'w');
mhandle = open(prefix + ".mean.vcf", 'w');
chandle = open(prefix + ".choices.bed", 'w');
chromosomes = set();
for siteDict in items:
if ref.chrom != siteDict['chromosome']:
ref.chrom = siteDict['chromosome'];
expertRecords = [
callAlleles(likelihoodDict, siteDict['chromosome'], siteDict['position'], siteDict['length'], ref)
for likelihoodDict in siteDict['expertPredictions']
];
e0handle.write(expertRecords[0] + '\n');
e1handle.write(expertRecords[1] + '\n');
e2handle.write(expertRecords[2] + '\n');
bestRecord = expertRecords[np.argmax(siteDict['meta'])];
bhandle.write(bestRecord + '\n');
def average(allelePairing):
return sum(
float(siteDict['expertPredictions'][i][allelePairing]) * float(siteDict['meta'][i])
for i in range(3)
);
meanLikelihoodDict = dict({
allelePairing: average(allelePairing) for allelePairing in siteDict['expertPredictions'][0]
});
meanRecord = callAlleles(
meanLikelihoodDict, siteDict['chromosome'], siteDict['position'], siteDict['length'], ref
);
mhandle.write(meanRecord + '\n');
chandle.write('\t'.join([
siteDict['chromosome'],
str(siteDict['position']),
str(siteDict['position'] + siteDict['length']),
str(np.argmax(siteDict['meta']))
]) + '\n');
chromosomes.add(siteDict['chromosome']);
for h in [e0handle, e1handle, e2handle, bhandle, mhandle, chandle]:
h.close();
return chromosomes;
def headerString(ref, chromosomes, info):
ref = ReferenceCache(database=ref);
string = "##fileformat=VCFv4.1\n";
for chromosome in chromosomes:
ref.chrom = chromosome;
length = len(ref);
string += "##contig=<ID=%s,length=%d>\n" %(chromosome, length);
# string += '##INFO=<ID=MixtureOfExpertsPrediction,Description="Obtained from mixture-of-experts">\n';
string += info + '\n';
string += '##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n';
string += '##FILTER=<ID=FAIL,Description="Failed call">\n';
string += "#" + '\t'.join("CHROM POS ID REF ALT QUAL FILTER INFO FORMAT SAMPLE1".split()) + '\n';
return string;
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Create VCF files from a set of feature files in a directory"
);
parser.add_argument(
"--prefix",
help="Prefix of sharded features",
required=True,
);
parser.add_argument(
"--ref",
help="Reference cache location",
required=True,
);
parser.add_argument(
"--tmpdir",
help="Temporary directory",
default="/tmp/vcftemp"
);
parser.add_argument(
"--numThreads",
help="Number of CPU threads to use",
default=4,
type=int,
);
parser.add_argument(
"--outputPrefix",
help="Prefix of output file",
required=None,
);
parser.add_argument(
"--checkRuns",
help="Check runs before proceeding",
default=False,
action="store_true",
);
args = parser.parse_args();
allResults = glob.glob("%s*.features" % args.prefix);
if args.checkRuns:
path = os.path.split(args.prefix)[0];
print("Checking logs in path %s" % path);
if checkLogs(path):
print("All runs have completed");
else:
print("Some runs weren't completed. Stopping ... ");
sys.exit(-1);
if os.path.exists(args.tmpdir):
shutil.rmtree(args.tmpdir);
os.makedirs(args.tmpdir);
# First create sub-vcf files
mapper = Pool(args.numThreads).imap_unordered;
callArgs = [
(result, args.ref, args.tmpdir)
for result in allResults
];
chromosomes = set();
for i, chr_ in enumerate(mapper(vcfWrapper, callArgs)):
chromosomes = chromosomes.union(chr_);
if (i + 1) % 100 == 0:
print("Completed processing %d files" % (i + 1));
def combineVcfs(suffix, info, label):
searchString = os.path.join(args.tmpdir, suffix);
print("Search string %s" % searchString);
allFiles = glob.glob(searchString);
print("Found %d files" % len(allFiles));
header = headerString(
args.ref,
chromosomes,
info='##INFO=<ID=%s,Description="%s"' % (info[0], info[1])
);
tempvcf = args.outputPrefix + ".%s.tmp.vcf" % label;
finalvcf = args.outputPrefix + ".%s.vcf" % label;
with open(tempvcf, 'w') as fhandle:
fhandle.write(header);
for f in allFiles:
contents = open(f, 'r').read().rstrip();
if len(contents) > 0:
fhandle.write(contents + '\n');
with open(finalvcf, 'w') as fhandle:
command = ["vcf-sort", tempvcf];
print("Running command %s" % str(command));
subprocess.call(
command, stdout=fhandle
);
os.remove(tempvcf);
# Create VCF headers and combine sub-vcf files
combineVcfs(
"*expert0.vcf",
("MixtureOfExpertPrediction", "Prediction from NGS expert"),
label="ngs"
);
combineVcfs(
"*expert1.vcf",
("MixtureOfExpertPrediction", "Prediction from TGS expert"),
label="tgs"
);
combineVcfs(
"*expert2.vcf",
("MixtureOfExpertPrediction", "Prediction from NGS_TGS expert"),
label="ngs_tgs"
);
combineVcfs(
"*best.vcf",
("MixtureOfExpertPrediction", "Prediction from best expert"),
label="best"
);
combineVcfs(
"*mean.vcf",
("MixtureOfExpertPrediction", "Mean predictions from experts"),
label="mean"
);
# Combine all bed files and sort
allBed = glob.glob(os.path.join(args.tmpdir, "*.choices.bed"));
choiceCounts = dict({0: 0, 1: 0, 2: 0});
with open(args.outputPrefix + ".choices.bed", 'w') as fhandle:
for f in allBed:
with open(f, 'r') as rhandle:
for line in rhandle.readlines():
line = line.rstrip();
if (len(line) > 0):
fhandle.write(line + '\n');
items = line.split();
choice = int(items[3]);
choiceCounts[choice] += 1;
print("Choice histogram = %s" % (str(choiceCounts)));
| [
"os.path.exists",
"sys.exit",
"os.makedirs",
"argparse.ArgumentParser",
"os.path.join",
"numpy.argmax",
"os.path.split",
"PySamFastaWrapper.PySamFastaWrapper",
"multiprocessing.Pool",
"subprocess.call",
"shutil.rmtree",
"math.log10",
"vcfFromContigs.createVcfRecord",
"glob.glob",
"os.rem... | [((455, 493), 'glob.glob', 'glob.glob', (["('%s/shard[0-9]*.txt' % path)"], {}), "('%s/shard[0-9]*.txt' % path)\n", (464, 493), False, 'import glob\n'), ((3458, 3486), 'PySamFastaWrapper.PySamFastaWrapper', 'ReferenceCache', ([], {'database': 'ref'}), '(database=ref)\n', (3472, 3486), True, 'from PySamFastaWrapper import PySamFastaWrapper as ReferenceCache\n'), ((5415, 5443), 'PySamFastaWrapper.PySamFastaWrapper', 'ReferenceCache', ([], {'database': 'ref'}), '(database=ref)\n', (5429, 5443), True, 'from PySamFastaWrapper import PySamFastaWrapper as ReferenceCache\n'), ((6117, 6220), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create VCF files from a set of feature files in a directory"""'}), "(description=\n 'Create VCF files from a set of feature files in a directory')\n", (6140, 6220), False, 'import argparse\n'), ((7047, 7086), 'glob.glob', 'glob.glob', (["('%s*.features' % args.prefix)"], {}), "('%s*.features' % args.prefix)\n", (7056, 7086), False, 'import glob\n'), ((7395, 7422), 'os.path.exists', 'os.path.exists', (['args.tmpdir'], {}), '(args.tmpdir)\n', (7409, 7422), False, 'import os\n'), ((7465, 7489), 'os.makedirs', 'os.makedirs', (['args.tmpdir'], {}), '(args.tmpdir)\n', (7476, 7489), False, 'import os\n'), ((1734, 1760), 'math.log10', 'math.log10', (['(1 - likelihood)'], {}), '(1 - likelihood)\n', (1744, 1760), False, 'import math\n'), ((2735, 2874), 'vcfFromContigs.createVcfRecord', 'createVcfRecord', (['chromosome', 'start', 'ref', '[0]', '[refAllele]', '[altAlleles]', '[genotypes]'], {'string': '"""MixtureOfExpertPrediction"""', 'qual': 'quality'}), "(chromosome, start, ref, [0], [refAllele], [altAlleles], [\n genotypes], string='MixtureOfExpertPrediction', qual=quality)\n", (2750, 2874), False, 'from vcfFromContigs import createVcfRecord\n'), ((7432, 7458), 'shutil.rmtree', 'shutil.rmtree', (['args.tmpdir'], {}), '(args.tmpdir)\n', (7445, 7458), False, 'import shutil\n'), ((7538, 7559), 'multiprocessing.Pool', 'Pool', (['args.numThreads'], {}), '(args.numThreads)\n', (7542, 7559), False, 'from multiprocessing import Pool\n'), ((7967, 8000), 'os.path.join', 'os.path.join', (['args.tmpdir', 'suffix'], {}), '(args.tmpdir, suffix)\n', (7979, 8000), False, 'import os\n'), ((8071, 8094), 'glob.glob', 'glob.glob', (['searchString'], {}), '(searchString)\n', (8080, 8094), False, 'import glob\n'), ((8919, 8937), 'os.remove', 'os.remove', (['tempvcf'], {}), '(tempvcf)\n', (8928, 8937), False, 'import os\n'), ((9749, 9791), 'os.path.join', 'os.path.join', (['args.tmpdir', '"""*.choices.bed"""'], {}), "(args.tmpdir, '*.choices.bed')\n", (9761, 9791), False, 'import os\n'), ((3423, 3442), 'os.path.split', 'os.path.split', (['data'], {}), '(data)\n', (3436, 3442), False, 'import os\n'), ((4333, 4360), 'numpy.argmax', 'np.argmax', (["siteDict['meta']"], {}), "(siteDict['meta'])\n", (4342, 4360), True, 'import numpy as np\n'), ((7127, 7153), 'os.path.split', 'os.path.split', (['args.prefix'], {}), '(args.prefix)\n', (7140, 7153), False, 'import os\n'), ((7373, 7385), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (7381, 7385), False, 'import sys\n'), ((8839, 8879), 'subprocess.call', 'subprocess.call', (['command'], {'stdout': 'fhandle'}), '(command, stdout=fhandle)\n', (8854, 8879), False, 'import subprocess\n'), ((5145, 5172), 'numpy.argmax', 'np.argmax', (["siteDict['meta']"], {}), "(siteDict['meta'])\n", (5154, 5172), True, 'import numpy as np\n')] |
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import torch.distributed as dist
from monai.handlers import ROCAUC
def main():
dist.init_process_group(backend="nccl", init_method="env://")
torch.cuda.set_device(dist.get_rank())
auc_metric = ROCAUC(to_onehot_y=True, softmax=True)
if dist.get_rank() == 0:
y_pred = torch.tensor([[0.1, 0.9], [0.3, 1.4]], device=torch.device("cuda:0"))
y = torch.tensor([[0], [1]], device=torch.device("cuda:0"))
auc_metric.update([y_pred, y])
if dist.get_rank() == 1:
y_pred = torch.tensor([[0.2, 0.1], [0.1, 0.5]], device=torch.device("cuda:1"))
y = torch.tensor([[0], [1]], device=torch.device("cuda:1"))
auc_metric.update([y_pred, y])
result = auc_metric.compute()
np.testing.assert_allclose(0.75, result)
dist.destroy_process_group()
# suppose to execute on 2 rank processes
# python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_PER_NODE
# --nnodes=NUM_NODES --node_rank=INDEX_CURRENT_NODE
# --master_addr="192.168.1.1" --master_port=1234
# test_handler_rocauc_dist.py
if __name__ == "__main__":
main()
| [
"monai.handlers.ROCAUC",
"torch.distributed.destroy_process_group",
"numpy.testing.assert_allclose",
"torch.distributed.get_rank",
"torch.distributed.init_process_group",
"torch.device"
] | [((694, 755), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': '"""nccl"""', 'init_method': '"""env://"""'}), "(backend='nccl', init_method='env://')\n", (717, 755), True, 'import torch.distributed as dist\n'), ((817, 855), 'monai.handlers.ROCAUC', 'ROCAUC', ([], {'to_onehot_y': '(True)', 'softmax': '(True)'}), '(to_onehot_y=True, softmax=True)\n', (823, 855), False, 'from monai.handlers import ROCAUC\n'), ((1343, 1383), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(0.75)', 'result'], {}), '(0.75, result)\n', (1369, 1383), True, 'import numpy as np\n'), ((1389, 1417), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (1415, 1417), True, 'import torch.distributed as dist\n'), ((783, 798), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (796, 798), True, 'import torch.distributed as dist\n'), ((864, 879), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (877, 879), True, 'import torch.distributed as dist\n'), ((1088, 1103), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1101, 1103), True, 'import torch.distributed as dist\n'), ((949, 971), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (961, 971), False, 'import torch\n'), ((1017, 1039), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (1029, 1039), False, 'import torch\n'), ((1173, 1195), 'torch.device', 'torch.device', (['"""cuda:1"""'], {}), "('cuda:1')\n", (1185, 1195), False, 'import torch\n'), ((1241, 1263), 'torch.device', 'torch.device', (['"""cuda:1"""'], {}), "('cuda:1')\n", (1253, 1263), False, 'import torch\n')] |
import hashlib
import logging
from pathlib import Path
from urllib import request
import numpy as np
import pytest
from jfibsem_dat.read import HEADER_LENGTH, parse_metadata
logger = logging.getLogger(__name__)
TEST_DIR = Path(__file__).resolve().parent
FIXTURE_DIR = TEST_DIR / "fixtures"
BLOCKSIZE = 2**20
EXAMPLE_STEM = "FIBdeSEMAna_21-12-26_005024_0-0-0"
EXAMPLE_HOST = "https://neurophyla.mrc-lmb.cam.ac.uk/share/fibsem_example/"
HEADER_PATHS = {
8: FIXTURE_DIR / "Merlin-6281_19-08-09_120426_0-0-0.header",
}
def md5sum(fpath):
md5 = hashlib.md5()
with open(fpath, "rb") as f:
while True:
data = f.read(BLOCKSIZE)
if not data:
break
md5.update(data)
return md5.hexdigest()
def fake_data(
header_path, out_path, blocksize=None, footer=None, trunc=None, seed=1991
):
with open(header_path, "rb") as f:
header_bytes = f.read(HEADER_LENGTH)
meta = parse_metadata(header_bytes)
shape = meta.data_shape()
to_write = np.product(shape)
if isinstance(footer, int):
to_write += footer
footer = None
if trunc is not None:
to_write = int(to_write * (1 - trunc))
dtype = meta.dtype().newbyteorder("=")
rng = np.random.default_rng(seed)
if blocksize is None:
blocksize = to_write
def rand(size=blocksize):
return rng.integers(
np.iinfo(dtype).min,
np.iinfo(dtype).max,
size=size,
dtype=dtype,
)
out_path.parent.mkdir(exist_ok=True, parents=True)
with open(out_path, "wb") as f:
f.write(header_bytes)
while to_write > blocksize:
f.write(rand().tobytes())
to_write = int(to_write - blocksize)
if to_write:
f.write(rand(to_write).tobytes())
if footer is not None:
f.write(footer)
class FakeDataFactory:
def __init__(self, tmp, seed=1991) -> None:
self.tmp = Path(tmp).resolve()
self.rng = np.random.default_rng(seed)
def _random_seed(self):
return self.rng.integers(np.iinfo("int16").max)
def fake(self, header_fpath: Path, trunc=None):
if trunc is None:
name = header_fpath.with_suffix(".dat").name
else:
name = header_fpath.with_suffix("").name + f"_trunc{trunc}.dat"
path = self.tmp / name
if path.exists():
return path
fake_data(header_fpath, path, 10_000_000, seed=self._random_seed(), trunc=trunc)
return path
@pytest.fixture(scope="session")
def faker(tmp_path_factory):
tmp = Path(tmp_path_factory.mktemp("fake_dats"))
return FakeDataFactory(tmp)
@pytest.fixture(scope="session")
def fake_path(tmp_path_factory):
header_path = FIXTURE_DIR / (EXAMPLE_STEM + ".header")
# 2 channels; order doesn't matter
shape = (2, 14_464, 18_214)
dtype = np.dtype("uint16")
rand = np.random.RandomState(1991)
data = rand.randint(0, np.iinfo(dtype).max, size=shape, dtype=dtype)
path = Path(tmp_path_factory.mktemp("fake_dats")) / "rand-2c-16b.dat"
with open(header_path, "rb") as f:
header_bytes = f.read(HEADER_LENGTH)
header_bytes = header_path.read_bytes()
path.write_bytes(header_bytes + data.tobytes())
return path
def name_append(path: Path, s: str):
return path.parent / (path.stem + s + path.suffix)
@pytest.fixture(scope="session")
def trunc_fake_path(fake_path):
path = name_append(fake_path, "_trunc")
dat_size = fake_path.stat().st_size
trunc_size = int(dat_size * 0.9)
with fake_path.open("rb") as src, path.open("wb") as tgt:
tgt.write(src.read(trunc_size))
return path
def fetch(url, fpath, blocksize=100_000_000):
logger.warning("Downloading FIBSEM example (first time only) at %s", url)
with request.urlopen(url) as req, open(fpath, "wb") as f:
while True:
b = req.read(blocksize)
f.write(b)
if len(b) != blocksize:
break
@pytest.fixture(scope="session")
def real_path():
dat_path = FIXTURE_DIR / (EXAMPLE_STEM + ".dat")
if not dat_path.exists():
fetch(f"{EXAMPLE_HOST}{EXAMPLE_STEM}.dat", dat_path)
# FIBdeSEMAna
dat_md5 = "753de6ea77acd4bd86166c459fe84006"
# Merlin
# dat_md5 = "ca5d342ef389ab212d523b134144199b"
if not md5sum(dat_path) == dat_md5:
pytest.skip("Reference .dat file does not match expected")
return dat_path
@pytest.fixture(scope="session")
def trunc_real_path(real_path):
dat_size = real_path.stat().st_size
trunc_size = int(dat_size * 0.9)
trunc_path = name_append(real_path, "_trunc")
if not trunc_path.exists() or trunc_path.stat().st_size != trunc_size:
with real_path.open("rb") as src, trunc_path.open("wb") as tgt:
tgt.write(src.read(trunc_size))
return trunc_path
| [
"logging.getLogger",
"numpy.product",
"pytest.skip",
"hashlib.md5",
"numpy.random.default_rng",
"pathlib.Path",
"jfibsem_dat.read.parse_metadata",
"numpy.iinfo",
"pytest.fixture",
"numpy.dtype",
"urllib.request.urlopen",
"numpy.random.RandomState"
] | [((186, 213), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (203, 213), False, 'import logging\n'), ((2562, 2593), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (2576, 2593), False, 'import pytest\n'), ((2711, 2742), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (2725, 2742), False, 'import pytest\n'), ((3416, 3447), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (3430, 3447), False, 'import pytest\n'), ((4047, 4078), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (4061, 4078), False, 'import pytest\n'), ((4503, 4534), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (4517, 4534), False, 'import pytest\n'), ((557, 570), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (568, 570), False, 'import hashlib\n'), ((958, 986), 'jfibsem_dat.read.parse_metadata', 'parse_metadata', (['header_bytes'], {}), '(header_bytes)\n', (972, 986), False, 'from jfibsem_dat.read import HEADER_LENGTH, parse_metadata\n'), ((1033, 1050), 'numpy.product', 'np.product', (['shape'], {}), '(shape)\n', (1043, 1050), True, 'import numpy as np\n'), ((1260, 1287), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (1281, 1287), True, 'import numpy as np\n'), ((2918, 2936), 'numpy.dtype', 'np.dtype', (['"""uint16"""'], {}), "('uint16')\n", (2926, 2936), True, 'import numpy as np\n'), ((2948, 2975), 'numpy.random.RandomState', 'np.random.RandomState', (['(1991)'], {}), '(1991)\n', (2969, 2975), True, 'import numpy as np\n'), ((2029, 2056), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (2050, 2056), True, 'import numpy as np\n'), ((3854, 3874), 'urllib.request.urlopen', 'request.urlopen', (['url'], {}), '(url)\n', (3869, 3874), False, 'from urllib import request\n'), ((4420, 4478), 'pytest.skip', 'pytest.skip', (['"""Reference .dat file does not match expected"""'], {}), "('Reference .dat file does not match expected')\n", (4431, 4478), False, 'import pytest\n'), ((226, 240), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (230, 240), False, 'from pathlib import Path\n'), ((3003, 3018), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (3011, 3018), True, 'import numpy as np\n'), ((1415, 1430), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (1423, 1430), True, 'import numpy as np\n'), ((1448, 1463), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (1456, 1463), True, 'import numpy as np\n'), ((1990, 1999), 'pathlib.Path', 'Path', (['tmp'], {}), '(tmp)\n', (1994, 1999), False, 'from pathlib import Path\n'), ((2119, 2136), 'numpy.iinfo', 'np.iinfo', (['"""int16"""'], {}), "('int16')\n", (2127, 2136), True, 'import numpy as np\n')] |
import os
import numpy as np
from zero2ml.utils.data_transformations import train_test_split
from zero2ml.supervised_learning.knn import KNNClassifier
def main():
# Construct path to dataset
root_directory_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
data_path = os.path.join(root_directory_path, "tests", "test_data", "breast_cancer.csv")
# Read dataset
data = np.genfromtxt(data_path, delimiter=',', skip_header=1)
X = data[:,:-1]
y = data[:,-1].astype(int)
# Train test split
X_train, y_train, X_test, y_test = train_test_split(X, y, test_size=0.1, random_state=21)
# Instantiate model
model = KNNClassifier(k=5)
# Calculate test accuracy
test_results = model.score(X_train, y_train, X_test, y_test)
print("Finished training k-nearest neighbor classification model.\n")
print("Testing accuracy: {:0.6f}".format(test_results))
if __name__ == "__main__":
main()
| [
"zero2ml.utils.data_transformations.train_test_split",
"os.path.join",
"zero2ml.supervised_learning.knn.KNNClassifier",
"os.path.dirname",
"numpy.genfromtxt"
] | [((301, 377), 'os.path.join', 'os.path.join', (['root_directory_path', '"""tests"""', '"""test_data"""', '"""breast_cancer.csv"""'], {}), "(root_directory_path, 'tests', 'test_data', 'breast_cancer.csv')\n", (313, 377), False, 'import os\n'), ((409, 463), 'numpy.genfromtxt', 'np.genfromtxt', (['data_path'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(data_path, delimiter=',', skip_header=1)\n", (422, 463), True, 'import numpy as np\n'), ((578, 632), 'zero2ml.utils.data_transformations.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.1)', 'random_state': '(21)'}), '(X, y, test_size=0.1, random_state=21)\n', (594, 632), False, 'from zero2ml.utils.data_transformations import train_test_split\n'), ((670, 688), 'zero2ml.supervised_learning.knn.KNNClassifier', 'KNNClassifier', ([], {'k': '(5)'}), '(k=5)\n', (683, 688), False, 'from zero2ml.supervised_learning.knn import KNNClassifier\n'), ((257, 282), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (272, 282), False, 'import os\n')] |
""" Sunrise and Sunset Estimation Module
This module contains functions for estimating sunrise and sunset times from an
unlabel PV power dataset.
"""
import numpy as np
from solardatatools.signal_decompositions import tl1_l2d2p365
def rise_set_rough(bool_msk):
nvals = bool_msk.shape[0]
num_meas_per_hour = nvals / 24
hour_of_day = np.arange(0, 24, 1.0 / num_meas_per_hour)
sunrise_idxs = np.argmax(bool_msk, axis=0)
sunset_idxs = nvals - np.argmax(np.flip(bool_msk, axis=0), axis=0) - 1
sunrises = np.nan * np.ones_like(sunrise_idxs)
sunsets = np.nan * np.ones_like(sunset_idxs)
sunrises[sunrise_idxs != 0] = hour_of_day[sunrise_idxs][sunrise_idxs != 0]
sunsets[sunset_idxs != nvals - 1] = hour_of_day[sunset_idxs][
sunset_idxs != nvals - 1
]
# sunrises[np.isnan(sunrises)] = 1000
# sunrises[sunrises > 12] = np.nan
# sunsets[np.isnan(sunsets)] = -1000
# sunsets[sunsets < 12] = np.nan
return {"sunrises": sunrises, "sunsets": sunsets}
def rise_set_smoothed(rough_dict, sunrise_tau=0.05, sunset_tau=0.95, solver=None):
sunrises = rough_dict["sunrises"]
sunsets = rough_dict["sunsets"]
sr_smoothed = tl1_l2d2p365(
sunrises, ~np.isnan(sunrises), tau=sunrise_tau, solver=solver
)
ss_smoothed = tl1_l2d2p365(
sunsets, ~np.isnan(sunsets), tau=sunset_tau, solver=solver
)
return {"sunrises": sr_smoothed, "sunsets": ss_smoothed}
| [
"numpy.ones_like",
"numpy.flip",
"numpy.argmax",
"numpy.isnan",
"numpy.arange"
] | [((348, 389), 'numpy.arange', 'np.arange', (['(0)', '(24)', '(1.0 / num_meas_per_hour)'], {}), '(0, 24, 1.0 / num_meas_per_hour)\n', (357, 389), True, 'import numpy as np\n'), ((409, 436), 'numpy.argmax', 'np.argmax', (['bool_msk'], {'axis': '(0)'}), '(bool_msk, axis=0)\n', (418, 436), True, 'import numpy as np\n'), ((536, 562), 'numpy.ones_like', 'np.ones_like', (['sunrise_idxs'], {}), '(sunrise_idxs)\n', (548, 562), True, 'import numpy as np\n'), ((586, 611), 'numpy.ones_like', 'np.ones_like', (['sunset_idxs'], {}), '(sunset_idxs)\n', (598, 611), True, 'import numpy as np\n'), ((1219, 1237), 'numpy.isnan', 'np.isnan', (['sunrises'], {}), '(sunrises)\n', (1227, 1237), True, 'import numpy as np\n'), ((1326, 1343), 'numpy.isnan', 'np.isnan', (['sunsets'], {}), '(sunsets)\n', (1334, 1343), True, 'import numpy as np\n'), ((473, 498), 'numpy.flip', 'np.flip', (['bool_msk'], {'axis': '(0)'}), '(bool_msk, axis=0)\n', (480, 498), True, 'import numpy as np\n')] |
import numpy as np
def most_common(lst):
return max(set(lst), key=lst.count)
def get_preds(y_pred):
y_pred = np.array(y_pred).T.tolist()
y_pred = [most_common(y) for y in y_pred]
return y_pred
def get_score(score, thr, pred):
if pred == 0:
return 1-score/thr
return (score-thr)/(1-thr)
def get_scores(y_scores, preds, thrs = (0,0,0)):
y_scores[0] = [get_score(score, thrs[0], preds[i]) for i, score in enumerate(y_scores[0])]
y_scores[1] = [get_score(score, thrs[1], preds[i]) for i, score in enumerate(y_scores[1])]
y_scores[2] = [get_score(score, thrs[2], preds[i]) for i, score in enumerate(y_scores[2])]
y_scores = np.array(y_scores).T.tolist()
y_scores = [np.mean(y) for y in y_scores]
return y_scores
def predict_df(df, clf_rf, clf_mlp, clf_knn):
thr_rf = 0.435
thr_mlp = 0.466
thr_knn = 0.4
X = df
y_rf = clf_rf.predict_proba(X)[:, 1]
y_mlp = clf_mlp.predict_proba(X.values)[:, 1]
y_knn = clf_knn.predict_proba(X)[:, 1]
y_scores = [y_rf, y_mlp, y_knn]
y_rf = [round(i - thr_rf + 0.5) for i in y_rf]
y_mlp = [round(i - thr_mlp + 0.5) for i in y_mlp]
y_knn = [round(i - thr_knn + 0.5) for i in y_knn]
y_pred = [y_rf, y_mlp, y_knn]
preds = get_preds(y_pred)
thresholds = (thr_rf, thr_mlp, thr_knn)
scores = get_scores(y_scores, preds, thresholds)
results = preds, scores
results = np.array(results).T.tolist()
return results
| [
"numpy.mean",
"numpy.array"
] | [((696, 706), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (703, 706), True, 'import numpy as np\n'), ((120, 136), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (128, 136), True, 'import numpy as np\n'), ((653, 671), 'numpy.array', 'np.array', (['y_scores'], {}), '(y_scores)\n', (661, 671), True, 'import numpy as np\n'), ((1350, 1367), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (1358, 1367), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
from collections import OrderedDict
from torch.autograd import Variable
from options.test_options import TestOptions
from models.models import create_model
from models.mapping_model import Pix2PixHDModel_Mapping
import util.util as util
from PIL import Image
import torch
import torchvision.utils as vutils
import torchvision.transforms as transforms
import torchvision.transforms as transforms
import numpy as np
def data_transforms(img, method=Image.BILINEAR, scale=False):
ow, oh = img.size
pw, ph = ow, oh
if scale == True:
if ow < oh:
ow = 256
oh = ph / pw * 256
else:
oh = 256
ow = pw / ph * 256
h = int(round(oh / 4) * 4)
w = int(round(ow / 4) * 4)
if (h == ph) and (w == pw):
return img
return img.resize((w, h), method)
def data_transforms_rgb_old(img):
w, h = img.size
A = img
if w < 256 or h < 256:
A = transforms.Scale(256, Image.BILINEAR)(img)
return transforms.CenterCrop(256)(A)
def irregular_hole_synthesize(img, mask):
img_np = np.array(img).astype("uint8")
mask_np = np.array(mask).astype("uint8")
mask_np = mask_np / 255
img_new = img_np * (1 - mask_np) + mask_np * 255
hole_img = Image.fromarray(img_new.astype("uint8")).convert("RGB")
return hole_img
def parameter_set(opt):
## Default parameters
opt.serial_batches = True # no shuffle
opt.no_flip = True # no flip
opt.label_nc = 0
opt.n_downsample_global = 3
opt.mc = 64
opt.k_size = 4
opt.start_r = 1
opt.mapping_n_block = 6
opt.map_mc = 512
opt.no_instance = True
opt.checkpoints_dir = "./checkpoints/restoration"
##
if opt.Quality_restore:
opt.name = "mapping_quality"
opt.load_pretrainA = os.path.join(opt.checkpoints_dir, "VAE_A_quality")
opt.load_pretrainB = os.path.join(opt.checkpoints_dir, "VAE_B_quality")
if opt.Scratch_and_Quality_restore:
opt.NL_res = True
opt.use_SN = True
opt.correlation_renormalize = True
opt.NL_use_mask = True
opt.NL_fusion_method = "combine"
opt.non_local = "Setting_42"
opt.name = "mapping_scratch"
opt.load_pretrainA = os.path.join(opt.checkpoints_dir, "VAE_A_quality")
opt.load_pretrainB = os.path.join(opt.checkpoints_dir, "VAE_B_scratch")
if __name__ == "__main__":
opt = TestOptions().parse(save=False)
parameter_set(opt)
model = Pix2PixHDModel_Mapping()
model.initialize(opt)
model.eval()
if not os.path.exists(opt.outputs_dir + "/" + "input_image"):
os.makedirs(opt.outputs_dir + "/" + "input_image")
if not os.path.exists(opt.outputs_dir + "/" + "restored_image"):
os.makedirs(opt.outputs_dir + "/" + "restored_image")
if not os.path.exists(opt.outputs_dir + "/" + "origin"):
os.makedirs(opt.outputs_dir + "/" + "origin")
dataset_size = 0
input_loader = os.listdir(opt.test_input)
dataset_size = len(os.listdir(opt.test_input))
input_loader.sort()
if opt.test_mask != "":
mask_loader = os.listdir(opt.test_mask)
dataset_size = len(os.listdir(opt.test_mask))
mask_loader.sort()
img_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
mask_transform = transforms.ToTensor()
for i in range(dataset_size):
input_name = input_loader[i]
input = Image.open(os.path.join(opt.test_input, input_name)).convert("RGB")
print("Now you are processing %s" % (input_name))
if opt.NL_use_mask:
mask_name = mask_loader[i]
mask = Image.open(os.path.join(opt.test_mask, mask_name)).convert("RGB")
origin = input
input = irregular_hole_synthesize(input, mask)
mask = mask_transform(mask)
mask = mask[:1, :, :] ## Convert to single channel
mask = mask.unsqueeze(0)
input = img_transform(input)
input = input.unsqueeze(0)
else:
if opt.test_mode == "Scale":
input = data_transforms(input, scale=True)
if opt.test_mode == "Full":
input = data_transforms(input, scale=False)
if opt.test_mode == "Crop":
input = data_transforms_rgb_old(input)
origin = input
input = img_transform(input)
input = input.unsqueeze(0)
mask = torch.zeros_like(input)
### Necessary input
try:
print("Inference input is %s", input)
generated = model.inference(input, mask)
except Exception as e:
print("Skip %s" % (input_name))
print("Excetion that occured is %s",e)
continue
if input_name.endswith(".jpg"):
input_name = input_name[:-4] + ".png"
image_grid = vutils.save_image(
(input + 1.0) / 2.0,
opt.outputs_dir + "/input_image/" + input_name,
nrow=1,
padding=0,
normalize=True,
)
image_grid = vutils.save_image(
(generated.data.cpu() + 1.0) / 2.0,
opt.outputs_dir + "/restored_image/" + input_name,
nrow=1,
padding=0,
normalize=True,
)
origin.save(opt.outputs_dir + "/origin/" + input_name)
| [
"torchvision.transforms.CenterCrop",
"os.path.exists",
"os.listdir",
"os.makedirs",
"torchvision.transforms.Scale",
"os.path.join",
"torch.zeros_like",
"numpy.array",
"options.test_options.TestOptions",
"torchvision.transforms.Normalize",
"torchvision.transforms.ToTensor",
"torchvision.utils.s... | [((2567, 2591), 'models.mapping_model.Pix2PixHDModel_Mapping', 'Pix2PixHDModel_Mapping', ([], {}), '()\n', (2589, 2591), False, 'from models.mapping_model import Pix2PixHDModel_Mapping\n'), ((3050, 3076), 'os.listdir', 'os.listdir', (['opt.test_input'], {}), '(opt.test_input)\n', (3060, 3076), False, 'import os\n'), ((3466, 3487), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3485, 3487), True, 'import torchvision.transforms as transforms\n'), ((1080, 1106), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(256)'], {}), '(256)\n', (1101, 1106), True, 'import torchvision.transforms as transforms\n'), ((1887, 1937), 'os.path.join', 'os.path.join', (['opt.checkpoints_dir', '"""VAE_A_quality"""'], {}), "(opt.checkpoints_dir, 'VAE_A_quality')\n", (1899, 1937), False, 'import os\n'), ((1967, 2017), 'os.path.join', 'os.path.join', (['opt.checkpoints_dir', '"""VAE_B_quality"""'], {}), "(opt.checkpoints_dir, 'VAE_B_quality')\n", (1979, 2017), False, 'import os\n'), ((2328, 2378), 'os.path.join', 'os.path.join', (['opt.checkpoints_dir', '"""VAE_A_quality"""'], {}), "(opt.checkpoints_dir, 'VAE_A_quality')\n", (2340, 2378), False, 'import os\n'), ((2408, 2458), 'os.path.join', 'os.path.join', (['opt.checkpoints_dir', '"""VAE_B_scratch"""'], {}), "(opt.checkpoints_dir, 'VAE_B_scratch')\n", (2420, 2458), False, 'import os\n'), ((2648, 2701), 'os.path.exists', 'os.path.exists', (["(opt.outputs_dir + '/' + 'input_image')"], {}), "(opt.outputs_dir + '/' + 'input_image')\n", (2662, 2701), False, 'import os\n'), ((2711, 2761), 'os.makedirs', 'os.makedirs', (["(opt.outputs_dir + '/' + 'input_image')"], {}), "(opt.outputs_dir + '/' + 'input_image')\n", (2722, 2761), False, 'import os\n'), ((2773, 2829), 'os.path.exists', 'os.path.exists', (["(opt.outputs_dir + '/' + 'restored_image')"], {}), "(opt.outputs_dir + '/' + 'restored_image')\n", (2787, 2829), False, 'import os\n'), ((2839, 2892), 'os.makedirs', 'os.makedirs', (["(opt.outputs_dir + '/' + 'restored_image')"], {}), "(opt.outputs_dir + '/' + 'restored_image')\n", (2850, 2892), False, 'import os\n'), ((2904, 2952), 'os.path.exists', 'os.path.exists', (["(opt.outputs_dir + '/' + 'origin')"], {}), "(opt.outputs_dir + '/' + 'origin')\n", (2918, 2952), False, 'import os\n'), ((2962, 3007), 'os.makedirs', 'os.makedirs', (["(opt.outputs_dir + '/' + 'origin')"], {}), "(opt.outputs_dir + '/' + 'origin')\n", (2973, 3007), False, 'import os\n'), ((3100, 3126), 'os.listdir', 'os.listdir', (['opt.test_input'], {}), '(opt.test_input)\n', (3110, 3126), False, 'import os\n'), ((3203, 3228), 'os.listdir', 'os.listdir', (['opt.test_mask'], {}), '(opt.test_mask)\n', (3213, 3228), False, 'import os\n'), ((5028, 5153), 'torchvision.utils.save_image', 'vutils.save_image', (['((input + 1.0) / 2.0)', "(opt.outputs_dir + '/input_image/' + input_name)"], {'nrow': '(1)', 'padding': '(0)', 'normalize': '(True)'}), "((input + 1.0) / 2.0, opt.outputs_dir + '/input_image/' +\n input_name, nrow=1, padding=0, normalize=True)\n", (5045, 5153), True, 'import torchvision.utils as vutils\n'), ((1026, 1063), 'torchvision.transforms.Scale', 'transforms.Scale', (['(256)', 'Image.BILINEAR'], {}), '(256, Image.BILINEAR)\n', (1042, 1063), True, 'import torchvision.transforms as transforms\n'), ((1168, 1181), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1176, 1181), True, 'import numpy as np\n'), ((1212, 1226), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (1220, 1226), True, 'import numpy as np\n'), ((2499, 2512), 'options.test_options.TestOptions', 'TestOptions', ([], {}), '()\n', (2510, 2512), False, 'from options.test_options import TestOptions\n'), ((3256, 3281), 'os.listdir', 'os.listdir', (['opt.test_mask'], {}), '(opt.test_mask)\n', (3266, 3281), False, 'import os\n'), ((3360, 3381), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3379, 3381), True, 'import torchvision.transforms as transforms\n'), ((3383, 3437), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (3403, 3437), True, 'import torchvision.transforms as transforms\n'), ((4599, 4622), 'torch.zeros_like', 'torch.zeros_like', (['input'], {}), '(input)\n', (4615, 4622), False, 'import torch\n'), ((3588, 3628), 'os.path.join', 'os.path.join', (['opt.test_input', 'input_name'], {}), '(opt.test_input, input_name)\n', (3600, 3628), False, 'import os\n'), ((3802, 3840), 'os.path.join', 'os.path.join', (['opt.test_mask', 'mask_name'], {}), '(opt.test_mask, mask_name)\n', (3814, 3840), False, 'import os\n')] |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests add_loss API correctness."""
import tensorflow.compat.v2 as tf
import numpy as np
from keras import Input
from keras.testing_infra import test_combinations
from keras import layers
from keras import losses
from keras import Model
from keras.optimizers import optimizer_v2
from keras import Sequential
from keras.testing_infra import test_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.rmsprop import RMSPropOptimizer
MAE = losses.MeanAbsoluteError
mae = losses.mean_absolute_error
def get_ctl_train_step(model):
optimizer = optimizer_v2.gradient_descent.SGD(0.05)
def train_step(x, y, w=None):
with tf.GradientTape() as tape:
if w is not None:
model([x, y, w])
else:
model([x, y])
loss = tf.reduce_sum(model.losses)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
return loss
return train_step
# TODO(psv): Add tests cases where a model is used in loss function but is
# not part of the training model.
class TestAddLossCorrectness(test_combinations.TestCase):
def setUp(self):
super(TestAddLossCorrectness, self).setUp()
self.x = np.array([[0.], [1.], [2.]], dtype='float32')
self.y = np.array([[0.5], [2.], [3.5]], dtype='float32')
self.w = np.array([[1.25], [0.5], [1.25]], dtype='float32')
@test_combinations.run_all_keras_modes
def test_loss_on_model_fit(self):
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
outputs = test_utils.Bias()(inputs)
model = Model([inputs, targets], outputs)
model.add_loss(MAE()(targets, outputs))
model.add_loss(tf.reduce_mean(mae(targets, outputs)))
model.compile(
optimizer_v2.gradient_descent.SGD(0.05),
run_eagerly=test_utils.should_run_eagerly())
history = model.fit([self.x, self.y], batch_size=3, epochs=5)
self.assertAllClose(history.history['loss'], [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
@test_combinations.run_with_all_model_types(exclude_models=['sequential'])
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_loss_callable_on_model_fit(self):
model = test_utils.get_model_from_layers([test_utils.Bias()],
input_shape=(1,))
def callable_loss():
return tf.reduce_sum(model.weights)
model.add_loss(callable_loss)
model.compile(
optimizer_v2.gradient_descent.SGD(0.1),
run_eagerly=test_utils.should_run_eagerly())
history = model.fit(self.x, batch_size=3, epochs=5)
self.assertAllClose(history.history['loss'], [0., -.1, -.2, -.3, -.4], 1e-3)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_loss_on_model_ctl(self):
def get_model_and_train_step():
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
outputs = test_utils.Bias()(inputs)
model = Model([inputs, targets], outputs)
model.add_loss(MAE()(targets, outputs))
model.add_loss(tf.reduce_mean(mae(targets, outputs)))
return get_ctl_train_step(model)
train_step = get_model_and_train_step()
loss = [train_step(self.x, self.y) for _ in range(5)]
self.assertAllClose(loss, [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
train_step = tf.function(get_model_and_train_step())
loss = [train_step(self.x, self.y) for _ in range(5)]
self.assertAllClose(loss, [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_loss_callable_on_model_ctl(self):
def get_model_and_train_step():
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
outputs = test_utils.Bias()(inputs)
model = Model([inputs, targets], outputs)
def callable_loss():
return tf.reduce_sum(model.weights)
model.add_loss(callable_loss)
return get_ctl_train_step(model)
train_step = get_model_and_train_step()
loss = [train_step(self.x, self.y) for _ in range(5)]
self.assertAllClose(loss, [0., -0.05, -0.1, -0.15, -0.2], 1e-3)
train_step = tf.function(get_model_and_train_step())
loss = [train_step(self.x, self.y) for _ in range(5)]
self.assertAllClose(loss, [0., -0.05, -0.1, -0.15, -0.2], 1e-3)
@test_combinations.run_all_keras_modes
def test_loss_with_sample_weight_on_model_fit(self):
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
sw = Input(shape=(1,))
outputs = test_utils.Bias()(inputs)
model = Model([inputs, targets, sw], outputs)
model.add_loss(MAE()(targets, outputs, sw))
model.add_loss(3 * tf.reduce_mean(sw * mae(targets, outputs)))
model.compile(
optimizer_v2.gradient_descent.SGD(0.025),
run_eagerly=test_utils.should_run_eagerly())
history = model.fit([self.x, self.y, self.w], batch_size=3, epochs=5)
self.assertAllClose(history.history['loss'], [4., 3.6, 3.2, 2.8, 2.4], 1e-3)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_loss_with_sample_weight_on_model_ctl(self):
def get_model_and_train_step():
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
sw = Input(shape=(1,))
outputs = test_utils.Bias()(inputs)
model = Model([inputs, targets, sw], outputs)
model.add_loss(MAE()(targets, outputs, sw))
model.add_loss(tf.reduce_mean(sw * mae(targets, outputs)))
return get_ctl_train_step(model)
train_step = get_model_and_train_step()
loss = [train_step(self.x, self.y, self.w) for _ in range(5)]
self.assertAllClose(loss, [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
train_step = tf.function(get_model_and_train_step())
loss = [train_step(self.x, self.y, self.w) for _ in range(5)]
self.assertAllClose(loss, [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
@test_combinations.run_all_keras_modes
def test_loss_with_sample_weight_in_model_call(self):
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.bias = test_utils.Bias()
def call(self, inputs):
outputs = self.bias(inputs[0])
self.add_loss(MAE()(inputs[1], outputs, inputs[2]))
self.add_loss(tf.reduce_mean(inputs[2] * mae(inputs[1], outputs)))
return outputs
model = MyModel()
model.predict([self.x, self.y, self.w])
model.compile(
optimizer_v2.gradient_descent.SGD(0.05),
run_eagerly=test_utils.should_run_eagerly())
history = model.fit([self.x, self.y, self.w], batch_size=3, epochs=5)
self.assertEqual(len(model.losses), 2)
self.assertAllClose(history.history['loss'], [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
eval_out = model.evaluate([self.x, self.y, self.w])
self.assertAlmostEqual(eval_out, 1.0, 3)
@test_combinations.run_all_keras_modes
def test_loss_with_sample_weight_in_layer_call(self):
class MyLayer(layers.Layer):
def __init__(self):
super(MyLayer, self).__init__()
self.bias = test_utils.Bias()
def call(self, inputs):
out = self.bias(inputs[0])
self.add_loss(MAE()(inputs[1], out, inputs[2]))
self.add_loss(tf.reduce_mean(inputs[2] * mae(inputs[1], out)))
return out
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
sw = Input(shape=(1,))
outputs = MyLayer()([inputs, targets, sw])
model = Model([inputs, targets, sw], outputs)
model.predict([self.x, self.y, self.w])
model.compile(
optimizer_v2.gradient_descent.SGD(0.05),
run_eagerly=test_utils.should_run_eagerly())
history = model.fit([self.x, self.y, self.w], batch_size=3, epochs=5)
self.assertAllClose(history.history['loss'], [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
output = model.evaluate([self.x, self.y, self.w])
self.assertAlmostEqual(output, 1.0, 3)
output = model.test_on_batch([self.x, self.y, self.w])
self.assertAlmostEqual(output, 1.0, 3)
@test_combinations.run_all_keras_modes
def test_loss_on_layer(self):
class MyLayer(layers.Layer):
def call(self, inputs):
self.add_loss(tf.reduce_sum(inputs))
return inputs
inputs = Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = Model(inputs, outputs)
self.assertEqual(len(model.losses), 1)
model.compile(
'sgd',
'mse',
run_eagerly=test_utils.should_run_eagerly())
loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(loss, 2 * 3)
@test_combinations.run_all_keras_modes
@test_combinations.run_with_all_model_types
def test_activity_regularizer(self):
loss = {}
for reg in [None, 'l2']:
model_layers = [
layers.Dense(
10,
activation='relu',
activity_regularizer=reg,
kernel_initializer='ones',
use_bias=False),
layers.Dense(
1,
activation='sigmoid',
kernel_initializer='ones',
use_bias=False),
]
model = test_utils.get_model_from_layers(
model_layers, input_shape=(10,))
x = np.ones((10, 10), 'float32')
y = np.zeros((10, 1), 'float32')
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(
optimizer,
'binary_crossentropy',
run_eagerly=test_utils.should_run_eagerly())
model.fit(x, y, batch_size=2, epochs=5)
loss[reg] = model.evaluate(x, y)
self.assertLess(loss[None], loss['l2'])
@test_combinations.run_all_keras_modes
@test_combinations.run_with_all_model_types
def test_activity_regularizer_loss_value(self):
layer = layers.Dense(
1,
kernel_initializer='zeros',
bias_initializer='ones',
activity_regularizer='l2')
model = test_utils.get_model_from_layers([layer], input_shape=(10,))
x = np.ones((10, 10), 'float32')
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(
optimizer,
run_eagerly=test_utils.should_run_eagerly())
loss = model.test_on_batch(x)
self.assertAlmostEqual(0.01, loss, places=4)
@test_combinations.run_all_keras_modes
def test_activity_regularizer_batch_independent(self):
inputs = layers.Input(shape=(10,))
x = layers.Dense(10, activation='relu', activity_regularizer='l2')(inputs)
outputs = layers.Dense(1, activation='sigmoid')(x)
model = Model(inputs, outputs)
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(
optimizer,
run_eagerly=test_utils.should_run_eagerly())
loss_small_batch = model.test_on_batch(np.ones((10, 10), 'float32'))
loss_big_batch = model.test_on_batch(np.ones((20, 10), 'float32'))
self.assertAlmostEqual(loss_small_batch, loss_big_batch, places=4)
@test_combinations.run_all_keras_modes
def test_with_shared_layer(self):
class LayerWithLoss(layers.Layer):
def call(self, inputs):
self.add_loss(tf.reduce_sum(inputs), inputs=inputs)
return inputs * 2
shared_layer = LayerWithLoss()
m = Sequential([shared_layer])
m2 = Sequential([shared_layer, m])
m2(tf.constant([1, 2, 3]))
self.assertEqual(len(m2.losses), 2)
self.assertAllClose(m2.losses, [6, 12])
@test_combinations.run_all_keras_modes
def test_with_shared_nested_layer(self):
class LayerWithLoss(layers.Layer):
def call(self, inputs):
self.add_loss(tf.reduce_sum(inputs), inputs=inputs)
return inputs * 2
class LayerWithNestedLayerWithLoss(layers.Layer):
def __init__(self):
super(LayerWithNestedLayerWithLoss, self).__init__()
self.loss_layer = LayerWithLoss()
def call(self, inputs):
return self.loss_layer(inputs)
shared_layer = LayerWithNestedLayerWithLoss()
m = Sequential([shared_layer])
m2 = Sequential([shared_layer, m])
m2(tf.constant([1, 2, 3]))
self.assertEqual(len(m2.losses), 2)
self.assertAllClose(m2.losses, [6, 12])
@test_combinations.run_all_keras_modes
def test_clear_losses(self):
class LayerWithSharedNestedLossLayer(layers.Layer):
def __init__(self):
super(LayerWithSharedNestedLossLayer, self).__init__()
self.loss_layer = layers.ActivityRegularization(l2=0.001)
self.add_weight(shape=(1,), regularizer='l2')
def call(self, x):
x = self.loss_layer(x)
return self.loss_layer(x)
inputs = Input(shape=(1,))
l = LayerWithSharedNestedLossLayer() # Weight loss + 2 activity losses.
x1 = tf.ones((1, 1))
_ = l(x1)
if not tf.executing_eagerly():
self.assertEqual(len(l.get_losses_for(x1)), 2)
self.assertEqual(len(l.get_losses_for(None)), 1)
x2 = tf.ones((1, 1))
_ = l(x2)
if not tf.executing_eagerly():
self.assertEqual(len(l.get_losses_for(x1)), 2)
self.assertEqual(len(l.get_losses_for(x2)), 2)
self.assertEqual(len(l.get_losses_for(None)), 1)
outputs = l(inputs)
model = Model(inputs, outputs)
if not tf.executing_eagerly():
self.assertEqual(len(model.losses), 7)
self.assertEqual(len(l.get_losses_for(x1)), 2)
self.assertEqual(len(l.get_losses_for(x2)), 2)
self.assertEqual(len(l.get_losses_for(None)), 1)
x3 = tf.ones((1, 1))
model(x3)
x4 = tf.ones((1, 1))
model(x4)
if tf.executing_eagerly():
# Eager losses are cleared every `__call__`.
self.assertEqual(len(model.losses), 3)
else:
self.assertEqual(len(model.losses), 11)
self.assertEqual(len(model.get_losses_for(x3)), 2)
self.assertEqual(len(model.get_losses_for(x4)), 2)
self.assertEqual(len(model.get_losses_for(None)), 1)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_invalid_constant_input(self):
inputs = Input(shape=(1,))
outputs = test_utils.Bias()(inputs)
model = Model(inputs, outputs)
with self.assertRaisesRegex(
ValueError,
'Expected a symbolic Tensors or a callable for the loss value'):
model.add_loss(1.)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_invalid_variable_input(self):
inputs = Input(shape=(1,))
outputs = test_utils.Bias()(inputs)
model = Model(inputs, outputs)
with self.assertRaisesRegex(
ValueError,
'Expected a symbolic Tensors or a callable for the loss value'):
model.add_loss(model.weights[0])
@test_combinations.run_all_keras_modes
def test_add_entropy_loss_on_functional_model(self):
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
outputs = test_utils.Bias()(inputs)
model = Model([inputs, targets], outputs)
model.add_loss(losses.binary_crossentropy(targets, outputs))
model.compile('sgd', run_eagerly=test_utils.should_run_eagerly())
with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log:
model.fit([self.x, self.y], batch_size=3, epochs=5)
self.assertNotIn('Gradients do not exist for variables',
str(mock_log.call_args))
if __name__ == '__main__':
tf.test.main()
| [
"keras.losses.binary_crossentropy",
"keras.testing_infra.test_utils.should_run_eagerly",
"keras.testing_infra.test_utils.get_model_from_layers",
"numpy.array",
"keras.layers.Dense",
"keras.Sequential",
"tensorflow.compat.v2.executing_eagerly",
"keras.Model",
"keras.testing_infra.test_combinations.ru... | [((1281, 1320), 'keras.optimizers.optimizer_v2.gradient_descent.SGD', 'optimizer_v2.gradient_descent.SGD', (['(0.05)'], {}), '(0.05)\n', (1314, 1320), False, 'from keras.optimizers import optimizer_v2\n'), ((2708, 2781), 'keras.testing_infra.test_combinations.run_with_all_model_types', 'test_combinations.run_with_all_model_types', ([], {'exclude_models': "['sequential']"}), "(exclude_models=['sequential'])\n", (2750, 2781), False, 'from keras.testing_infra import test_combinations\n'), ((2785, 2843), 'keras.testing_infra.test_combinations.run_all_keras_modes', 'test_combinations.run_all_keras_modes', ([], {'always_skip_v1': '(True)'}), '(always_skip_v1=True)\n', (2822, 2843), False, 'from keras.testing_infra import test_combinations\n'), ((3383, 3441), 'keras.testing_infra.test_combinations.run_all_keras_modes', 'test_combinations.run_all_keras_modes', ([], {'always_skip_v1': '(True)'}), '(always_skip_v1=True)\n', (3420, 3441), False, 'from keras.testing_infra import test_combinations\n'), ((4163, 4221), 'keras.testing_infra.test_combinations.run_all_keras_modes', 'test_combinations.run_all_keras_modes', ([], {'always_skip_v1': '(True)'}), '(always_skip_v1=True)\n', (4200, 4221), False, 'from keras.testing_infra import test_combinations\n'), ((5637, 5695), 'keras.testing_infra.test_combinations.run_all_keras_modes', 'test_combinations.run_all_keras_modes', ([], {'always_skip_v1': '(True)'}), '(always_skip_v1=True)\n', (5674, 5695), False, 'from keras.testing_infra import test_combinations\n'), ((14369, 14427), 'keras.testing_infra.test_combinations.run_all_keras_modes', 'test_combinations.run_all_keras_modes', ([], {'always_skip_v1': '(True)'}), '(always_skip_v1=True)\n', (14406, 14427), False, 'from keras.testing_infra import test_combinations\n'), ((14730, 14788), 'keras.testing_infra.test_combinations.run_all_keras_modes', 'test_combinations.run_all_keras_modes', ([], {'always_skip_v1': '(True)'}), '(always_skip_v1=True)\n', (14767, 14788), False, 'from keras.testing_infra import test_combinations\n'), ((15760, 15774), 'tensorflow.compat.v2.test.main', 'tf.test.main', ([], {}), '()\n', (15772, 15774), True, 'import tensorflow.compat.v2 as tf\n'), ((1935, 1983), 'numpy.array', 'np.array', (['[[0.0], [1.0], [2.0]]'], {'dtype': '"""float32"""'}), "([[0.0], [1.0], [2.0]], dtype='float32')\n", (1943, 1983), True, 'import numpy as np\n'), ((1994, 2042), 'numpy.array', 'np.array', (['[[0.5], [2.0], [3.5]]'], {'dtype': '"""float32"""'}), "([[0.5], [2.0], [3.5]], dtype='float32')\n", (2002, 2042), True, 'import numpy as np\n'), ((2055, 2105), 'numpy.array', 'np.array', (['[[1.25], [0.5], [1.25]]'], {'dtype': '"""float32"""'}), "([[1.25], [0.5], [1.25]], dtype='float32')\n", (2063, 2105), True, 'import numpy as np\n'), ((2197, 2214), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (2202, 2214), False, 'from keras import Input\n'), ((2229, 2246), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (2234, 2246), False, 'from keras import Input\n'), ((2299, 2332), 'keras.Model', 'Model', (['[inputs, targets]', 'outputs'], {}), '([inputs, targets], outputs)\n', (2304, 2332), False, 'from keras import Model\n'), ((5073, 5090), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (5078, 5090), False, 'from keras import Input\n'), ((5105, 5122), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (5110, 5122), False, 'from keras import Input\n'), ((5132, 5149), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (5137, 5149), False, 'from keras import Input\n'), ((5202, 5239), 'keras.Model', 'Model', (['[inputs, targets, sw]', 'outputs'], {}), '([inputs, targets, sw], outputs)\n', (5207, 5239), False, 'from keras import Model\n'), ((7900, 7917), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (7905, 7917), False, 'from keras import Input\n'), ((7932, 7949), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (7937, 7949), False, 'from keras import Input\n'), ((7959, 7976), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (7964, 7976), False, 'from keras import Input\n'), ((8037, 8074), 'keras.Model', 'Model', (['[inputs, targets, sw]', 'outputs'], {}), '([inputs, targets, sw], outputs)\n', (8042, 8074), False, 'from keras import Model\n'), ((8817, 8828), 'keras.Input', 'Input', (['(3,)'], {}), '((3,))\n', (8822, 8828), False, 'from keras import Input\n'), ((8891, 8913), 'keras.Model', 'Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (8896, 8913), False, 'from keras import Model\n'), ((10333, 10432), 'keras.layers.Dense', 'layers.Dense', (['(1)'], {'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""ones"""', 'activity_regularizer': '"""l2"""'}), "(1, kernel_initializer='zeros', bias_initializer='ones',\n activity_regularizer='l2')\n", (10345, 10432), False, 'from keras import layers\n'), ((10475, 10535), 'keras.testing_infra.test_utils.get_model_from_layers', 'test_utils.get_model_from_layers', (['[layer]'], {'input_shape': '(10,)'}), '([layer], input_shape=(10,))\n', (10507, 10535), False, 'from keras.testing_infra import test_utils\n'), ((10545, 10573), 'numpy.ones', 'np.ones', (['(10, 10)', '"""float32"""'], {}), "((10, 10), 'float32')\n", (10552, 10573), True, 'import numpy as np\n'), ((10590, 10627), 'tensorflow.python.training.rmsprop.RMSPropOptimizer', 'RMSPropOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (10606, 10627), False, 'from tensorflow.python.training.rmsprop import RMSPropOptimizer\n'), ((10914, 10939), 'keras.layers.Input', 'layers.Input', ([], {'shape': '(10,)'}), '(shape=(10,))\n', (10926, 10939), False, 'from keras import layers\n'), ((11086, 11108), 'keras.Model', 'Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (11091, 11108), False, 'from keras import Model\n'), ((11126, 11163), 'tensorflow.python.training.rmsprop.RMSPropOptimizer', 'RMSPropOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (11142, 11163), False, 'from tensorflow.python.training.rmsprop import RMSPropOptimizer\n'), ((11751, 11777), 'keras.Sequential', 'Sequential', (['[shared_layer]'], {}), '([shared_layer])\n', (11761, 11777), False, 'from keras import Sequential\n'), ((11787, 11816), 'keras.Sequential', 'Sequential', (['[shared_layer, m]'], {}), '([shared_layer, m])\n', (11797, 11816), False, 'from keras import Sequential\n'), ((12489, 12515), 'keras.Sequential', 'Sequential', (['[shared_layer]'], {}), '([shared_layer])\n', (12499, 12515), False, 'from keras import Sequential\n'), ((12525, 12554), 'keras.Sequential', 'Sequential', (['[shared_layer, m]'], {}), '([shared_layer, m])\n', (12535, 12554), False, 'from keras import Sequential\n'), ((13115, 13132), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (13120, 13132), False, 'from keras import Input\n'), ((13220, 13235), 'tensorflow.compat.v2.ones', 'tf.ones', (['(1, 1)'], {}), '((1, 1))\n', (13227, 13235), True, 'import tensorflow.compat.v2 as tf\n'), ((13403, 13418), 'tensorflow.compat.v2.ones', 'tf.ones', (['(1, 1)'], {}), '((1, 1))\n', (13410, 13418), True, 'import tensorflow.compat.v2 as tf\n'), ((13666, 13688), 'keras.Model', 'Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (13671, 13688), False, 'from keras import Model\n'), ((13940, 13955), 'tensorflow.compat.v2.ones', 'tf.ones', (['(1, 1)'], {}), '((1, 1))\n', (13947, 13955), True, 'import tensorflow.compat.v2 as tf\n'), ((13979, 13994), 'tensorflow.compat.v2.ones', 'tf.ones', (['(1, 1)'], {}), '((1, 1))\n', (13986, 13994), True, 'import tensorflow.compat.v2 as tf\n'), ((14016, 14038), 'tensorflow.compat.v2.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (14036, 14038), True, 'import tensorflow.compat.v2 as tf\n'), ((14482, 14499), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (14487, 14499), False, 'from keras import Input\n'), ((14552, 14574), 'keras.Model', 'Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (14557, 14574), False, 'from keras import Model\n'), ((14843, 14860), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (14848, 14860), False, 'from keras import Input\n'), ((14913, 14935), 'keras.Model', 'Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (14918, 14935), False, 'from keras import Model\n'), ((15211, 15228), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (15216, 15228), False, 'from keras import Input\n'), ((15243, 15260), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (15248, 15260), False, 'from keras import Input\n'), ((15313, 15346), 'keras.Model', 'Model', (['[inputs, targets]', 'outputs'], {}), '([inputs, targets], outputs)\n', (15318, 15346), False, 'from keras import Model\n'), ((1363, 1380), 'tensorflow.compat.v2.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (1378, 1380), True, 'import tensorflow.compat.v2 as tf\n'), ((1486, 1513), 'tensorflow.compat.v2.reduce_sum', 'tf.reduce_sum', (['model.losses'], {}), '(model.losses)\n', (1499, 1513), True, 'import tensorflow.compat.v2 as tf\n'), ((2261, 2278), 'keras.testing_infra.test_utils.Bias', 'test_utils.Bias', ([], {}), '()\n', (2276, 2278), False, 'from keras.testing_infra import test_utils\n'), ((2462, 2501), 'keras.optimizers.optimizer_v2.gradient_descent.SGD', 'optimizer_v2.gradient_descent.SGD', (['(0.05)'], {}), '(0.05)\n', (2495, 2501), False, 'from keras.optimizers import optimizer_v2\n'), ((3057, 3085), 'tensorflow.compat.v2.reduce_sum', 'tf.reduce_sum', (['model.weights'], {}), '(model.weights)\n', (3070, 3085), True, 'import tensorflow.compat.v2 as tf\n'), ((3148, 3186), 'keras.optimizers.optimizer_v2.gradient_descent.SGD', 'optimizer_v2.gradient_descent.SGD', (['(0.1)'], {}), '(0.1)\n', (3181, 3186), False, 'from keras.optimizers import optimizer_v2\n'), ((3529, 3546), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (3534, 3546), False, 'from keras import Input\n'), ((3563, 3580), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (3568, 3580), False, 'from keras import Input\n'), ((3637, 3670), 'keras.Model', 'Model', (['[inputs, targets]', 'outputs'], {}), '([inputs, targets], outputs)\n', (3642, 3670), False, 'from keras import Model\n'), ((4318, 4335), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (4323, 4335), False, 'from keras import Input\n'), ((4352, 4369), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (4357, 4369), False, 'from keras import Input\n'), ((4426, 4459), 'keras.Model', 'Model', (['[inputs, targets]', 'outputs'], {}), '([inputs, targets], outputs)\n', (4431, 4459), False, 'from keras import Model\n'), ((5164, 5181), 'keras.testing_infra.test_utils.Bias', 'test_utils.Bias', ([], {}), '()\n', (5179, 5181), False, 'from keras.testing_infra import test_utils\n'), ((5382, 5422), 'keras.optimizers.optimizer_v2.gradient_descent.SGD', 'optimizer_v2.gradient_descent.SGD', (['(0.025)'], {}), '(0.025)\n', (5415, 5422), False, 'from keras.optimizers import optimizer_v2\n'), ((5802, 5819), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (5807, 5819), False, 'from keras import Input\n'), ((5836, 5853), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (5841, 5853), False, 'from keras import Input\n'), ((5865, 5882), 'keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (5870, 5882), False, 'from keras import Input\n'), ((5939, 5976), 'keras.Model', 'Model', (['[inputs, targets, sw]', 'outputs'], {}), '([inputs, targets, sw], outputs)\n', (5944, 5976), False, 'from keras import Model\n'), ((7042, 7081), 'keras.optimizers.optimizer_v2.gradient_descent.SGD', 'optimizer_v2.gradient_descent.SGD', (['(0.05)'], {}), '(0.05)\n', (7075, 7081), False, 'from keras.optimizers import optimizer_v2\n'), ((8146, 8185), 'keras.optimizers.optimizer_v2.gradient_descent.SGD', 'optimizer_v2.gradient_descent.SGD', (['(0.05)'], {}), '(0.05)\n', (8179, 8185), False, 'from keras.optimizers import optimizer_v2\n'), ((9091, 9106), 'numpy.ones', 'np.ones', (['(2, 3)'], {}), '((2, 3))\n', (9098, 9106), True, 'import numpy as np\n'), ((9108, 9123), 'numpy.ones', 'np.ones', (['(2, 3)'], {}), '((2, 3))\n', (9115, 9123), True, 'import numpy as np\n'), ((9711, 9776), 'keras.testing_infra.test_utils.get_model_from_layers', 'test_utils.get_model_from_layers', (['model_layers'], {'input_shape': '(10,)'}), '(model_layers, input_shape=(10,))\n', (9743, 9776), False, 'from keras.testing_infra import test_utils\n'), ((9799, 9827), 'numpy.ones', 'np.ones', (['(10, 10)', '"""float32"""'], {}), "((10, 10), 'float32')\n", (9806, 9827), True, 'import numpy as np\n'), ((9838, 9866), 'numpy.zeros', 'np.zeros', (['(10, 1)', '"""float32"""'], {}), "((10, 1), 'float32')\n", (9846, 9866), True, 'import numpy as np\n'), ((9886, 9923), 'tensorflow.python.training.rmsprop.RMSPropOptimizer', 'RMSPropOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (9902, 9923), False, 'from tensorflow.python.training.rmsprop import RMSPropOptimizer\n'), ((10948, 11010), 'keras.layers.Dense', 'layers.Dense', (['(10)'], {'activation': '"""relu"""', 'activity_regularizer': '"""l2"""'}), "(10, activation='relu', activity_regularizer='l2')\n", (10960, 11010), False, 'from keras import layers\n'), ((11033, 11070), 'keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (11045, 11070), False, 'from keras import layers\n'), ((11299, 11327), 'numpy.ones', 'np.ones', (['(10, 10)', '"""float32"""'], {}), "((10, 10), 'float32')\n", (11306, 11327), True, 'import numpy as np\n'), ((11370, 11398), 'numpy.ones', 'np.ones', (['(20, 10)', '"""float32"""'], {}), "((20, 10), 'float32')\n", (11377, 11398), True, 'import numpy as np\n'), ((11824, 11846), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (11835, 11846), True, 'import tensorflow.compat.v2 as tf\n'), ((12562, 12584), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (12573, 12584), True, 'import tensorflow.compat.v2 as tf\n'), ((13261, 13283), 'tensorflow.compat.v2.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (13281, 13283), True, 'import tensorflow.compat.v2 as tf\n'), ((13444, 13466), 'tensorflow.compat.v2.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (13464, 13466), True, 'import tensorflow.compat.v2 as tf\n'), ((13700, 13722), 'tensorflow.compat.v2.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (13720, 13722), True, 'import tensorflow.compat.v2 as tf\n'), ((14514, 14531), 'keras.testing_infra.test_utils.Bias', 'test_utils.Bias', ([], {}), '()\n', (14529, 14531), False, 'from keras.testing_infra import test_utils\n'), ((14875, 14892), 'keras.testing_infra.test_utils.Bias', 'test_utils.Bias', ([], {}), '()\n', (14890, 14892), False, 'from keras.testing_infra import test_utils\n'), ((15275, 15292), 'keras.testing_infra.test_utils.Bias', 'test_utils.Bias', ([], {}), '()\n', (15290, 15292), False, 'from keras.testing_infra import test_utils\n'), ((15366, 15410), 'keras.losses.binary_crossentropy', 'losses.binary_crossentropy', (['targets', 'outputs'], {}), '(targets, outputs)\n', (15392, 15410), False, 'from keras import losses\n'), ((15491, 15546), 'tensorflow.compat.v2.compat.v1.test.mock.patch.object', 'tf.compat.v1.test.mock.patch.object', (['logging', '"""warning"""'], {}), "(logging, 'warning')\n", (15526, 15546), True, 'import tensorflow.compat.v2 as tf\n'), ((2523, 2554), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (2552, 2554), False, 'from keras.testing_infra import test_utils\n'), ((2935, 2952), 'keras.testing_infra.test_utils.Bias', 'test_utils.Bias', ([], {}), '()\n', (2950, 2952), False, 'from keras.testing_infra import test_utils\n'), ((3208, 3239), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (3237, 3239), False, 'from keras.testing_infra import test_utils\n'), ((3597, 3614), 'keras.testing_infra.test_utils.Bias', 'test_utils.Bias', ([], {}), '()\n', (3612, 3614), False, 'from keras.testing_infra import test_utils\n'), ((4386, 4403), 'keras.testing_infra.test_utils.Bias', 'test_utils.Bias', ([], {}), '()\n', (4401, 4403), False, 'from keras.testing_infra import test_utils\n'), ((4503, 4531), 'tensorflow.compat.v2.reduce_sum', 'tf.reduce_sum', (['model.weights'], {}), '(model.weights)\n', (4516, 4531), True, 'import tensorflow.compat.v2 as tf\n'), ((5444, 5475), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (5473, 5475), False, 'from keras.testing_infra import test_utils\n'), ((5899, 5916), 'keras.testing_infra.test_utils.Bias', 'test_utils.Bias', ([], {}), '()\n', (5914, 5916), False, 'from keras.testing_infra import test_utils\n'), ((6702, 6719), 'keras.testing_infra.test_utils.Bias', 'test_utils.Bias', ([], {}), '()\n', (6717, 6719), False, 'from keras.testing_infra import test_utils\n'), ((7103, 7134), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (7132, 7134), False, 'from keras.testing_infra import test_utils\n'), ((7656, 7673), 'keras.testing_infra.test_utils.Bias', 'test_utils.Bias', ([], {}), '()\n', (7671, 7673), False, 'from keras.testing_infra import test_utils\n'), ((8207, 8238), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (8236, 8238), False, 'from keras.testing_infra import test_utils\n'), ((9026, 9057), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (9055, 9057), False, 'from keras.testing_infra import test_utils\n'), ((9362, 9470), 'keras.layers.Dense', 'layers.Dense', (['(10)'], {'activation': '"""relu"""', 'activity_regularizer': 'reg', 'kernel_initializer': '"""ones"""', 'use_bias': '(False)'}), "(10, activation='relu', activity_regularizer=reg,\n kernel_initializer='ones', use_bias=False)\n", (9374, 9470), False, 'from keras import layers\n'), ((9549, 9634), 'keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""sigmoid"""', 'kernel_initializer': '"""ones"""', 'use_bias': '(False)'}), "(1, activation='sigmoid', kernel_initializer='ones', use_bias=False\n )\n", (9561, 9634), False, 'from keras import layers\n'), ((10686, 10717), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (10715, 10717), False, 'from keras.testing_infra import test_utils\n'), ((11222, 11253), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (11251, 11253), False, 'from keras.testing_infra import test_utils\n'), ((12916, 12955), 'keras.layers.ActivityRegularization', 'layers.ActivityRegularization', ([], {'l2': '(0.001)'}), '(l2=0.001)\n', (12945, 12955), False, 'from keras import layers\n'), ((15449, 15480), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (15478, 15480), False, 'from keras.testing_infra import test_utils\n'), ((8758, 8779), 'tensorflow.compat.v2.reduce_sum', 'tf.reduce_sum', (['inputs'], {}), '(inputs)\n', (8771, 8779), True, 'import tensorflow.compat.v2 as tf\n'), ((10021, 10052), 'keras.testing_infra.test_utils.should_run_eagerly', 'test_utils.should_run_eagerly', ([], {}), '()\n', (10050, 10052), False, 'from keras.testing_infra import test_utils\n'), ((11642, 11663), 'tensorflow.compat.v2.reduce_sum', 'tf.reduce_sum', (['inputs'], {}), '(inputs)\n', (11655, 11663), True, 'import tensorflow.compat.v2 as tf\n'), ((12110, 12131), 'tensorflow.compat.v2.reduce_sum', 'tf.reduce_sum', (['inputs'], {}), '(inputs)\n', (12123, 12131), True, 'import tensorflow.compat.v2 as tf\n')] |
#!/usr/bin/env python3
import matplotlib
import matplotlib.pyplot as plt
from collections import defaultdict
import numpy as np
lengths = defaultdict(list)
lib_count = defaultdict(int)
lib="good"
lengths[lib].append(20)
lengths[lib].append(30)
lengths[lib].append(100)
lengths[lib].append(330)
lengths[lib].append(10)
lengths[lib].append(40)
lengths[lib].append(45)
lengths[lib].append(60)
lengths[lib].append(22)
lengths[lib].append(10)
lib_count[lib]+=10
lib="good"
lengths[lib].append(21)
lengths[lib].append(33)
lengths[lib].append(109)
lengths[lib].append(320)
lengths[lib].append(14)
lengths[lib].append(32)
lengths[lib].append(33)
lengths[lib].append(51)
lengths[lib].append(72)
lengths[lib].append(60)
lib_count[lib]+=10
lib="bad"
lengths[lib].append(12)
lengths[lib].append(13)
lengths[lib].append(24)
lengths[lib].append(300)
lengths[lib].append(7)
lengths[lib].append(35)
lengths[lib].append(25)
lengths[lib].append(45)
lengths[lib].append(19)
lengths[lib].append(8)
lib_count[lib]+=10
libs = sorted(lengths.keys())
print(libs)
for lib in libs:
lengths[lib] = np.array(lengths[lib])
lens = lengths[lib][lengths[lib] != 0]
length_cdf = np.linspace(0, 1, len(lens))
plt.plot(sorted(lens), length_cdf, "-")
plt.xscale("log")
plt.ylabel("Fraction of aligned fragments")
plt.xlabel("Pair size")
plt.title(f"Cumulative distribution (cis-chromosomal pairs only)")
plt.grid(linewidth=0.25)
plt.show()
#plt.savefig(f"{args.output_prefix}_size_cdf_cisonly.{args.output_image_format}")
plt.close()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"numpy.array",
"collections.defaultdict",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.show"
] | [((141, 158), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (152, 158), False, 'from collections import defaultdict\n'), ((171, 187), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (182, 187), False, 'from collections import defaultdict\n'), ((1254, 1271), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (1264, 1271), True, 'import matplotlib.pyplot as plt\n'), ((1272, 1315), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fraction of aligned fragments"""'], {}), "('Fraction of aligned fragments')\n", (1282, 1315), True, 'import matplotlib.pyplot as plt\n'), ((1316, 1339), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Pair size"""'], {}), "('Pair size')\n", (1326, 1339), True, 'import matplotlib.pyplot as plt\n'), ((1340, 1406), 'matplotlib.pyplot.title', 'plt.title', (['f"""Cumulative distribution (cis-chromosomal pairs only)"""'], {}), "(f'Cumulative distribution (cis-chromosomal pairs only)')\n", (1349, 1406), True, 'import matplotlib.pyplot as plt\n'), ((1407, 1431), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'linewidth': '(0.25)'}), '(linewidth=0.25)\n', (1415, 1431), True, 'import matplotlib.pyplot as plt\n'), ((1432, 1442), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1440, 1442), True, 'import matplotlib.pyplot as plt\n'), ((1525, 1536), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1534, 1536), True, 'import matplotlib.pyplot as plt\n'), ((1084, 1106), 'numpy.array', 'np.array', (['lengths[lib]'], {}), '(lengths[lib])\n', (1092, 1106), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from covsirphy.ode.mbase import ModelBase
class SIRF(ModelBase):
"""
SIR-F model.
Args:
population (int): total population
theta (float)
kappa (float)
rho (float)
sigma (float)
"""
# Model name
NAME = "SIR-F"
# names of parameters
PARAMETERS = ["theta", "kappa", "rho", "sigma"]
DAY_PARAMETERS = [
"alpha1 [-]", "1/alpha2 [day]", "1/beta [day]", "1/gamma [day]"
]
# Variable names in (non-dim, dimensional) ODEs
VAR_DICT = {
"x": ModelBase.S,
"y": ModelBase.CI,
"z": ModelBase.R,
"w": ModelBase.F
}
VARIABLES = list(VAR_DICT.values())
# Weights of variables in parameter estimation error function
WEIGHTS = np.array([0, 1, 1, 1])
# Variables that increases monotonically
VARS_INCLEASE = [ModelBase.R, ModelBase.F]
# Example set of parameters and initial values
EXAMPLE = {
ModelBase.STEP_N: 180,
ModelBase.N.lower(): 1_000_000,
ModelBase.PARAM_DICT: {
"theta": 0.002, "kappa": 0.005, "rho": 0.2, "sigma": 0.075,
},
ModelBase.Y0_DICT: {
ModelBase.S: 999_000, ModelBase.CI: 1000, ModelBase.R: 0, ModelBase.F: 0,
},
}
def __init__(self, population, theta, kappa, rho, sigma):
# Total population
self.population = self._ensure_natural_int(
population, name="population"
)
# Non-dim parameters
self.theta = theta
self.kappa = kappa
self.rho = rho
self.sigma = sigma
self.non_param_dict = {
"theta": theta, "kappa": kappa, "rho": rho, "sigma": sigma}
def __call__(self, t, X):
"""
Return the list of dS/dt (tau-free) etc.
Args:
t (int): time steps
X (numpy.array): values of th model variables
Returns:
(np.array)
"""
n = self.population
s, i, *_ = X
dsdt = 0 - self.rho * s * i / n
drdt = self.sigma * i
dfdt = self.kappa * i + (0 - dsdt) * self.theta
didt = 0 - dsdt - drdt - dfdt
return np.array([dsdt, didt, drdt, dfdt])
@classmethod
def param_range(cls, taufree_df, population, quantiles=(0.1, 0.9)):
"""
Define the value range of ODE parameters using (X, dX/dt) points.
In SIR model, X is S, I, R, F here.
Args:
taufree_df (pandas.DataFrame):
Index
reset index
Columns
- t (int): time steps (tau-free)
- columns with dimensional variables
population (int): total population
quantiles (tuple(int, int)): quantiles to cut, like confidence interval
Returns:
dict(str, tuple(float, float)): minimum/maximum values
"""
df = cls._ensure_dataframe(taufree_df, name="taufree_df", columns=[cls.TS, *cls.VARIABLES])
df = df.loc[(df[cls.S] > 0) & (df[cls.CI] > 0)]
n, t = population, df[cls.TS]
s, i, r, f = df[cls.S], df[cls.CI], df[cls.R], df[cls.F]
# kappa = (dF/dt) / I when theta -> 0
kappa_series = f.diff() / t.diff() / i
# rho = - n * (dS/dt) / S / I
rho_series = 0 - n * s.diff() / t.diff() / s / i
# sigma = (dR/dt) / I
sigma_series = r.diff() / t.diff() / i
# Calculate quantile
_dict = {
k: tuple(v.quantile(quantiles).clip(0, 1)) for (k, v)
in zip(["kappa", "rho", "sigma"], [kappa_series, rho_series, sigma_series])
}
_dict["theta"] = (0, 1)
return _dict
@classmethod
def specialize(cls, data_df, population):
"""
Specialize the dataset for this model.
Args:
data_df (pandas.DataFrame):
Index
reset index
Columns
- Confirmed (int): the number of confirmed cases
- Infected (int): the number of currently infected cases
- Fatal (int): the number of fatal cases
- Recovered (int): the number of recovered cases
- any columns
population (int): total population in the place
Returns:
(pandas.DataFrame)
Index
reset index
Columns
- any columns @data_df has
- Susceptible (int): the number of susceptible cases
"""
df = cls._ensure_dataframe(
data_df, name="data_df", columns=cls.VALUE_COLUMNS)
# Calculate dimensional variables
df[cls.S] = population - df[cls.C]
return df
@classmethod
def restore(cls, specialized_df):
"""
Restore Confirmed/Infected/Recovered/Fatal.
using a dataframe with the variables of the model.
Args:
specialized_df (pandas.DataFrame): dataframe with the variables
Index
(object)
Columns
- Susceptible (int): the number of susceptible cases
- Infected (int): the number of currently infected cases
- Recovered (int): the number of recovered cases
- Fatal (int): the number of fatal cases
- any columns
Returns:
(pandas.DataFrame)
Index
(object): as-is
Columns
- Confirmed (int): the number of confirmed cases
- Infected (int): the number of currently infected cases
- Fatal (int): the number of fatal cases
- Recovered (int): the number of recovered cases
- the other columns @specialzed_df has
"""
df = specialized_df.copy()
other_cols = list(set(df.columns) - set(cls.VALUE_COLUMNS))
df[cls.C] = df[cls.CI] + df[cls.R] + df[cls.F]
return df.loc[:, [*cls.VALUE_COLUMNS, *other_cols]]
def calc_r0(self):
"""
Calculate (basic) reproduction number.
Returns:
float
"""
try:
rt = self.rho * (1 - self.theta) / (self.sigma + self.kappa)
except ZeroDivisionError:
return None
return round(rt, 2)
def calc_days_dict(self, tau):
"""
Calculate 1/beta [day] etc.
Args:
param tau (int): tau value [min]
Returns:
dict[str, int]
"""
try:
return {
"alpha1 [-]": round(self.theta, 3),
"1/alpha2 [day]": int(tau / 24 / 60 / self.kappa),
"1/beta [day]": int(tau / 24 / 60 / self.rho),
"1/gamma [day]": int(tau / 24 / 60 / self.sigma)
}
except ZeroDivisionError:
return {p: None for p in self.DAY_PARAMETERS}
| [
"numpy.array",
"covsirphy.ode.mbase.ModelBase.N.lower"
] | [((820, 842), 'numpy.array', 'np.array', (['[0, 1, 1, 1]'], {}), '([0, 1, 1, 1])\n', (828, 842), True, 'import numpy as np\n'), ((1041, 1060), 'covsirphy.ode.mbase.ModelBase.N.lower', 'ModelBase.N.lower', ([], {}), '()\n', (1058, 1060), False, 'from covsirphy.ode.mbase import ModelBase\n'), ((2229, 2263), 'numpy.array', 'np.array', (['[dsdt, didt, drdt, dfdt]'], {}), '([dsdt, didt, drdt, dfdt])\n', (2237, 2263), True, 'import numpy as np\n')] |
import numpy as np
import sys
import logging
import pickle
import matplotlib.pyplot as plt
from pathlib import Path
from scipy.optimize import minimize_scalar
from itertools import product
import ray
import pandas as pd
import click
from neslab.find import distributions as dists
from neslab.find import Model
logger = logging.getLogger("model")
def f_obj(scale, t_chr, n_nodes):
m = Model(scale, "Geometric", t_chr, n_nodes, n_jobs=1)
return m.disco_latency()
@ray.remote
def job(t_chr, n_nodes):
scale_range = dists.Geometric.get_scale_range(t_chr)
res = minimize_scalar(
f_obj,
bounds=scale_range,
method="bounded",
args=(t_chr, n_nodes),
)
log_entry = {
"t_chr": t_chr,
"n_nodes": n_nodes,
"scale": res.x,
"disco_latency": res.fun,
}
return log_entry
@click.command()
@click.option("--redis-password", "-p", type=str, default="<PASSWORD>")
@click.option("--head-address", "-a", type=str, default="auto")
@click.option(
"--outfile",
"-o",
type=click.Path(dir_okay=False),
help="Output file",
default="results_scale.csv",
)
@click.option("-v", "--verbose", count=True, default=1)
def main(
redis_password: str,
head_address: str,
outfile: click.Path,
verbose,
):
hnd = logging.StreamHandler()
logger.addHandler(hnd)
if verbose == 0:
logger.setLevel(logging.ERROR)
elif verbose == 1:
logger.setLevel(logging.WARNING)
elif verbose == 2:
logger.setLevel(logging.INFO)
elif verbose > 2:
logger.setLevel(logging.DEBUG)
ray.init(address=head_address, _redis_password=redis_password)
# Configs for 2 nodes and different charging times
args_tchrs = list(product(np.arange(5, 2500, 5), [2]))
# Configs for charging time 25 and different numbers of nodes
args_nnodes = list(product([25], np.arange(3, 110, 5)))
futures = list()
for arg in args_tchrs + args_nnodes:
futures.append(job.remote(arg[0], arg[1]))
logger.info(f"Running {len(futures)} jobs")
results = ray.get(futures)
df = pd.DataFrame(results)
df.to_csv(outfile, index=False)
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"neslab.find.Model",
"logging.StreamHandler",
"ray.init",
"ray.get",
"click.option",
"neslab.find.distributions.Geometric.get_scale_range",
"click.Path",
"scipy.optimize.minimize_scalar",
"pandas.DataFrame",
"click.command",
"numpy.arange"
] | [((322, 348), 'logging.getLogger', 'logging.getLogger', (['"""model"""'], {}), "('model')\n", (339, 348), False, 'import logging\n'), ((861, 876), 'click.command', 'click.command', ([], {}), '()\n', (874, 876), False, 'import click\n'), ((878, 948), 'click.option', 'click.option', (['"""--redis-password"""', '"""-p"""'], {'type': 'str', 'default': '"""<PASSWORD>"""'}), "('--redis-password', '-p', type=str, default='<PASSWORD>')\n", (890, 948), False, 'import click\n'), ((950, 1012), 'click.option', 'click.option', (['"""--head-address"""', '"""-a"""'], {'type': 'str', 'default': '"""auto"""'}), "('--head-address', '-a', type=str, default='auto')\n", (962, 1012), False, 'import click\n'), ((1152, 1206), 'click.option', 'click.option', (['"""-v"""', '"""--verbose"""'], {'count': '(True)', 'default': '(1)'}), "('-v', '--verbose', count=True, default=1)\n", (1164, 1206), False, 'import click\n'), ((393, 444), 'neslab.find.Model', 'Model', (['scale', '"""Geometric"""', 't_chr', 'n_nodes'], {'n_jobs': '(1)'}), "(scale, 'Geometric', t_chr, n_nodes, n_jobs=1)\n", (398, 444), False, 'from neslab.find import Model\n'), ((531, 569), 'neslab.find.distributions.Geometric.get_scale_range', 'dists.Geometric.get_scale_range', (['t_chr'], {}), '(t_chr)\n', (562, 569), True, 'from neslab.find import distributions as dists\n'), ((580, 667), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['f_obj'], {'bounds': 'scale_range', 'method': '"""bounded"""', 'args': '(t_chr, n_nodes)'}), "(f_obj, bounds=scale_range, method='bounded', args=(t_chr,\n n_nodes))\n", (595, 667), False, 'from scipy.optimize import minimize_scalar\n'), ((1317, 1340), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1338, 1340), False, 'import logging\n'), ((1620, 1682), 'ray.init', 'ray.init', ([], {'address': 'head_address', '_redis_password': 'redis_password'}), '(address=head_address, _redis_password=redis_password)\n', (1628, 1682), False, 'import ray\n'), ((2101, 2117), 'ray.get', 'ray.get', (['futures'], {}), '(futures)\n', (2108, 2117), False, 'import ray\n'), ((2128, 2149), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (2140, 2149), True, 'import pandas as pd\n'), ((1064, 1090), 'click.Path', 'click.Path', ([], {'dir_okay': '(False)'}), '(dir_okay=False)\n', (1074, 1090), False, 'import click\n'), ((1769, 1790), 'numpy.arange', 'np.arange', (['(5)', '(2500)', '(5)'], {}), '(5, 2500, 5)\n', (1778, 1790), True, 'import numpy as np\n'), ((1901, 1921), 'numpy.arange', 'np.arange', (['(3)', '(110)', '(5)'], {}), '(3, 110, 5)\n', (1910, 1921), True, 'import numpy as np\n')] |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
full_data = pd.read_csv(filename).dropna().drop_duplicates()
full_data.drop(full_data[full_data.price < 1].index, inplace=True)
labels = full_data["price"]
# features = full_data[["bedrooms",
# "bathrooms",
# "sqft_living",
# "sqft_lot",
# "floors",
# "waterfront",
# "view",
# "condition",
# "grade",
# "sqft_above",
# "sqft_basement",
# "yr_built",
# "yr_renovated",
# "lat",
# "zipcode",
# "long",
# "sqft_living15",
# "sqft_lot15"]]
features = full_data[["bedrooms",
"bathrooms",
"sqft_living",
"floors",
"yr_built",
"yr_renovated"]]
features["yr_renovated"] = features["yr_renovated"].mask(features["yr_renovated"] <= 0,
features["yr_built"], axis=0)
# print(features["yr_renovated"])
return features, labels
def feature_evaluation(X: pd.DataFrame, y: pd.Series, output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
pearson_correlation_values = np.apply_along_axis(pearson_correlation, 0, X, np.array(y))
for index, feature in enumerate(X.keys()):
go.Figure().add_trace(
go.Scatter(x=X[feature], y=y,
mode="markers")).update_layout(title={
"text": f"{feature} with Pearson Correlation "
f"{pearson_correlation_values[index]}"}).write_image(f"{output_path}/{feature}.png")
def pearson_correlation(X: np.array, y: np.array) -> float:
return (np.cov(X, y) / (np.std(X) * np.std(y)))[0][1]
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of housing prices dataset
house_features, house_prices = load_data(
"../datasets/house_prices.csv")
# Question 2 - Feature evaluation with respect to response
# feature_evaluation(house_features, house_prices)
regressor = LinearRegression()
regressor.fit(house_features, house_prices)
# Question 3 - Split samples into training- and testing sets.
x_train, y_train, x_test, y_test = split_train_test(house_features, house_prices)
# Question 4 - Fit model over increasing percentages of the overall training data
# For every percentage p in 10%, 11%, ..., 100%, repeat the following 10 times:
# 1) Sample p% of the overall training data
# 2) Fit linear model (including intercept) over sampled set
# 3) Test fitted model over test set
# 4) Store average and variance of loss over test set
# Then plot average loss as function of training size with error ribbon of size (mean-2*std, mean+2*std)
regressor = LinearRegression()
#
# for i in range(10, 101):
# regressor.fit(x_train.sample(frac=))
| [
"pandas.read_csv",
"IMLearn.learners.regressors.LinearRegression",
"numpy.array",
"IMLearn.utils.split_train_test",
"plotly.graph_objects.Scatter",
"plotly.graph_objects.Figure",
"numpy.random.seed",
"numpy.std",
"numpy.cov"
] | [((3265, 3282), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3279, 3282), True, 'import numpy as np\n'), ((3573, 3591), 'IMLearn.learners.regressors.LinearRegression', 'LinearRegression', ([], {}), '()\n', (3589, 3591), False, 'from IMLearn.learners.regressors import LinearRegression\n'), ((3747, 3793), 'IMLearn.utils.split_train_test', 'split_train_test', (['house_features', 'house_prices'], {}), '(house_features, house_prices)\n', (3763, 3793), False, 'from IMLearn.utils import split_train_test\n'), ((4312, 4330), 'IMLearn.learners.regressors.LinearRegression', 'LinearRegression', ([], {}), '()\n', (4328, 4330), False, 'from IMLearn.learners.regressors import LinearRegression\n'), ((2752, 2763), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2760, 2763), True, 'import numpy as np\n'), ((3186, 3198), 'numpy.cov', 'np.cov', (['X', 'y'], {}), '(X, y)\n', (3192, 3198), True, 'import numpy as np\n'), ((632, 653), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (643, 653), True, 'import pandas as pd\n'), ((3202, 3211), 'numpy.std', 'np.std', (['X'], {}), '(X)\n', (3208, 3211), True, 'import numpy as np\n'), ((3214, 3223), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (3220, 3223), True, 'import numpy as np\n'), ((2856, 2901), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'X[feature]', 'y': 'y', 'mode': '"""markers"""'}), "(x=X[feature], y=y, mode='markers')\n", (2866, 2901), True, 'import plotly.graph_objects as go\n'), ((2821, 2832), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (2830, 2832), True, 'import plotly.graph_objects as go\n')] |
import matplotlib.pyplot as plt
import numpy as np
y = np.array([3, 22, 50, 351, 159,40 ])
sports = ['Biathlon', 'Bobsleigh', 'Curling','Ice hockey','Skating','Skiing']
plt.pie(y, labels = sports)
plt.xlabel("Canada")
plt.title("Which Sport had Most Medal", pad="20")
plt.show()
| [
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.pie",
"numpy.array",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((56, 91), 'numpy.array', 'np.array', (['[3, 22, 50, 351, 159, 40]'], {}), '([3, 22, 50, 351, 159, 40])\n', (64, 91), True, 'import numpy as np\n'), ((171, 196), 'matplotlib.pyplot.pie', 'plt.pie', (['y'], {'labels': 'sports'}), '(y, labels=sports)\n', (178, 196), True, 'import matplotlib.pyplot as plt\n'), ((199, 219), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Canada"""'], {}), "('Canada')\n", (209, 219), True, 'import matplotlib.pyplot as plt\n'), ((220, 269), 'matplotlib.pyplot.title', 'plt.title', (['"""Which Sport had Most Medal"""'], {'pad': '"""20"""'}), "('Which Sport had Most Medal', pad='20')\n", (229, 269), True, 'import matplotlib.pyplot as plt\n'), ((271, 281), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (279, 281), True, 'import matplotlib.pyplot as plt\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import io
import logging
import contextlib
import os
import datetime
import json
import numpy as np
import cv2
import math
import torch
from PIL import Image
from fvcore.common.timer import Timer
from detectron2.structures import BoxMode, PolygonMasks, Boxes
from fvcore.common.file_io import PathManager, file_lock
from detectron2.data.catalog import MetadataCatalog, DatasetCatalog
"""
This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format".
"""
DOTA_CATEGORIES = [
{"color": [220, 20, 60], "isthing": 1, "id": 0, "name": "small-vehicle"},
{"color": [119, 11, 32], "isthing": 1, "id": 1, "name": 'large-vehicle'},
{"color": [0, 0, 142], "isthing": 1, "id": 2, "name": 'ship'},
{"color": [0, 0, 230], "isthing": 1, "id": 3, "name": 'container-crane'},
{"color": [106, 0, 228], "isthing": 1, "id": 4, "name": 'storage-tank'},
{"color": [0, 60, 100], "isthing": 1, "id": 5, "name": 'plane'},
{"color": [0, 80, 100], "isthing": 1, "id": 6, "name": 'tennis-court'},
{"color": [0, 0, 70], "isthing": 1, "id": 7, "name": 'harbor'},
{"color": [0, 0, 192], "isthing": 1, "id": 8, "name": 'bridge'},
{"color": [250, 170, 30], "isthing": 1, "id": 9, "name": 'baseball-diamond'},
{"color": [100, 170, 30], "isthing": 1, "id": 10, "name": 'roundabout'},
{"color": [220, 220, 0], "isthing": 1, "id": 11, "name": 'basketball-court'},
{"color": [175, 116, 175], "isthing": 1, "id": 12, "name": 'swimming-pool'},
{"color": [250, 0, 30], "isthing": 1, "id": 13, "name": 'soccer-ball-field'},
{"color": [165, 42, 42], "isthing": 1, "id": 14, "name": 'ground-track-field'},
{"color": [0, 82, 0], "isthing": 1, "id": 15, "name": "helicopter"},
]
class DotaAPI:
def __init__(self, json_file):
with open(json_file) as f:
data = json.load(f)
self.features = data['features']
@staticmethod
def cvt_dota_to_detectron(dota_bbox: list, patch_size: tuple) -> list:
""" Processes a coordinate array from a geojson into (cy, cx, height, width, theta) format
:param (list) coords: an array of shape (N, 8) with 4 corner points of boxes
:return: (numpy.ndarray) an array of shape (N, 5) with coordinates in proper format
"""
coord = np.asarray(dota_bbox)
pts = np.reshape(coord, (-1, 5)).astype(dtype=np.float32)
cx = pts[:, 0] * patch_size[0]
cy = pts[:, 1] * patch_size[1]
width = pts[:, 2] * patch_size[0]
height = pts[:, 3] * patch_size[1]
theta = pts[:, 4] * 180 / math.pi
if width < height:
width, height = height, width
theta += 90.0
arr = [cx, cy, width, height, theta]
arr = np.asarray(arr).reshape(-1, 5)
arr = torch.tensor(arr)
original_dtype = arr.dtype
arr = arr.double()
w = arr[:, 2]
h = arr[:, 3]
a = arr[:, 4]
c = torch.abs(torch.cos(a * math.pi / 180.0))
s = torch.abs(torch.sin(a * math.pi / 180.0))
# This basically computes the horizontal bounding rectangle of the rotated box
new_w = c * w + s * h
new_h = c * h + s * w
# convert center to top-left corner
arr[:, 0] -= new_w / 2.0
arr[:, 1] -= new_h / 2.0
# bottom-right corner
arr[:, 2] = arr[:, 0] + new_w
arr[:, 3] = arr[:, 1] + new_h
arr = arr[:, :4].to(dtype=original_dtype)
arr = arr.numpy()
return arr
@staticmethod
def cvt_dota_to_detectron_rotated(dota_bbox: list, patch_size: tuple) -> list:
""" Processes a coordinate array from a geojson into (cy, cx, height, width, theta) format
:param (list) coords: an array of shape (N, 8) with 4 corner points of boxes
:return: (numpy.ndarray) an array of shape (N, 5) with coordinates in proper format
"""
coord = np.asarray(dota_bbox)
pts = np.reshape(coord, (-1, 5)).astype(dtype=np.float32)
cx = pts[:, 0] * patch_size[0]
cy = pts[:, 1] * patch_size[1]
width = pts[:, 2] * patch_size[0]
height = pts[:, 3] * patch_size[1]
theta = pts[:, 4] * 180 / math.pi
if width < height:
width, height = height, width
theta += 90.0
detectron_bbox = [cx, cy, width, height, theta]
return detectron_bbox
def main():
"""
Load a json file with DACON's instances annotation format.
Currently supports instance detection, instance segmentation,
and person keypoints annotations.
Args:
json_file (str): full path to the json file in dota instances annotation format.
image_root (str): the directory where the images in this json file exists.
dataset_name (str): the name of the dataset (e.g., coco_2017_train).
If provided, this function will also put "thing_classes" into
the metadata associated with this dataset.
extra_annotation_keys (list[str]): list of per-annotation keys that should also be
loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints",
"category_id", "segmentation"). The values for these keys will be returned as-is.
For example, the densepose annotations are loaded in this way.
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
Notes:
1. This function does not read the image files.
The results do not have the "image" field.
"""
data_path = "/ws/data/open_datasets/detection/dota/dota_patch_512_256/train"
json_file = os.path.join(data_path, 'labels.json')
json_file = PathManager.get_local_path(json_file)
with contextlib.redirect_stdout(io.StringIO()):
dota_api = DotaAPI(json_file)
anns = dota_api.features
dataset_dicts = []
for ann in anns:
record = {}
record["file_name"] = ann['image_id']
record["height"] = ann['height']
record["width"] = ann['width']
patch_size = (ann['width'], ann['height'])
objs = []
properties = ann['properties']
count = 0
for p in properties:
# Check that the image_id in this annotation is the same as
# the image_id we're looking at.
# This fails only when the data parsing logic or the annotation file is buggy.
# The original COCO valminusminival2014 & minival2014 annotation files
# actually contains bugs that, together with certain ways of using COCO API,
# can trigger this assertion.
# if int(p["type_id"]) > 5:
# continue
count += 1
obj = {}
obj["bbox"] = dota_api.cvt_dota_to_detectron_rotated(p["bounds_imcoords"].split(","), patch_size)
obj["bbox_mode"] = BoxMode.XYWHA_ABS
obj["category_id"] = int(p["type_id"])
objs.append(obj)
if count == 0:
continue
record["annotations"] = objs
dataset_dicts.append(record)
output_dict = {"type":"instances","images":[],"annotations":[],
"categories": [
{
"supercategory": "none",
"name": d["name"],
"id": d["id"]
} for d in DOTA_CATEGORIES
]}
for record in dataset_dicts:
image = {}
image["file_name"] = record["file_name"]
image["height"] = record["height"]
image["width"] = record["width"]
f, b = os.path.splitext(os.path.split(record["file_name"])[1])[0].split('_')
f = int(''.join(i for i in f if i.isdigit()))
b = int(''.join(i for i in b if i.isdigit()))
image_id = f * 1000 + b
image["id"] = image_id
output_dict["images"].append(image)
count = 0
for obj in record["annotations"]:
annotation = {}
annotation["id"] = image_id * 10000 + count
bbox = [d.item() for d in obj["bbox"]]
annotation["bbox"] = bbox
annotation["image_id"] = image_id
annotation["ignore"] = 0
annotation["area"] = bbox[2] * bbox[3]
annotation["iscrowd"] = 0
annotation["category_id"] = obj["category_id"]
output_dict["annotations"].append(annotation)
count += 1
output_path = os.path.join(data_path, "coco_labels.json")
with open(output_path, 'w') as outfile:
json.dump(output_dict, outfile)
main() | [
"numpy.reshape",
"numpy.asarray",
"os.path.join",
"fvcore.common.file_io.PathManager.get_local_path",
"torch.sin",
"os.path.split",
"torch.tensor",
"torch.cos",
"json.load",
"io.StringIO",
"json.dump"
] | [((5757, 5795), 'os.path.join', 'os.path.join', (['data_path', '"""labels.json"""'], {}), "(data_path, 'labels.json')\n", (5769, 5795), False, 'import os\n'), ((5812, 5849), 'fvcore.common.file_io.PathManager.get_local_path', 'PathManager.get_local_path', (['json_file'], {}), '(json_file)\n', (5838, 5849), False, 'from fvcore.common.file_io import PathManager, file_lock\n'), ((8601, 8644), 'os.path.join', 'os.path.join', (['data_path', '"""coco_labels.json"""'], {}), "(data_path, 'coco_labels.json')\n", (8613, 8644), False, 'import os\n'), ((2370, 2391), 'numpy.asarray', 'np.asarray', (['dota_bbox'], {}), '(dota_bbox)\n', (2380, 2391), True, 'import numpy as np\n'), ((2864, 2881), 'torch.tensor', 'torch.tensor', (['arr'], {}), '(arr)\n', (2876, 2881), False, 'import torch\n'), ((3986, 4007), 'numpy.asarray', 'np.asarray', (['dota_bbox'], {}), '(dota_bbox)\n', (3996, 4007), True, 'import numpy as np\n'), ((8697, 8728), 'json.dump', 'json.dump', (['output_dict', 'outfile'], {}), '(output_dict, outfile)\n', (8706, 8728), False, 'import json\n'), ((1917, 1929), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1926, 1929), False, 'import json\n'), ((3033, 3063), 'torch.cos', 'torch.cos', (['(a * math.pi / 180.0)'], {}), '(a * math.pi / 180.0)\n', (3042, 3063), False, 'import torch\n'), ((3087, 3117), 'torch.sin', 'torch.sin', (['(a * math.pi / 180.0)'], {}), '(a * math.pi / 180.0)\n', (3096, 3117), False, 'import torch\n'), ((5886, 5899), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (5897, 5899), False, 'import io\n'), ((2406, 2432), 'numpy.reshape', 'np.reshape', (['coord', '(-1, 5)'], {}), '(coord, (-1, 5))\n', (2416, 2432), True, 'import numpy as np\n'), ((2819, 2834), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (2829, 2834), True, 'import numpy as np\n'), ((4022, 4048), 'numpy.reshape', 'np.reshape', (['coord', '(-1, 5)'], {}), '(coord, (-1, 5))\n', (4032, 4048), True, 'import numpy as np\n'), ((7768, 7802), 'os.path.split', 'os.path.split', (["record['file_name']"], {}), "(record['file_name'])\n", (7781, 7802), False, 'import os\n')] |
import os
import glob
import json
import time
import pickle
import shutil
import random
import warnings
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
from multiprocessing import Process, cpu_count, Array
from omsdetector.mof import Helper
from omsdetector.mof import MofStructure
from omsdetector.atomic_parameters import Atom
from sys import exit
pd.options.display.max_rows = 1000
class MofCollection:
"""A collection to hold and analyse MOF structures from CIF files"""
separator = "".join(['-'] * 50)
def __init__(self, path_list, analysis_folder='analysis_folder'):
"""Create a MofCollection from a list of path names.
:param path_list: List of paths to MOF CIF files to be added to the
collection.
:param analysis_folder: Path to the folder where the results will
be stored. (default: 'analysis_folder')
"""
self._analysis_folder = analysis_folder
self.path_list = path_list
self.mof_coll = []
self.batches = []
self._metal_site_df = None
self._mof_oms_df = None
self._properties = {}
self.load_balance_index = {}
self.analysis_limit = None
self.filter_functions = {
"density": self._apply_filter_range,
"oms_density": self._apply_filter_range,
"uc_volume": self._apply_filter_range,
"metal_species": self._apply_filter_in_value,
"non_metal_species": self._apply_filter_in_value,
"cif_okay": self._apply_filter_value,
"has_oms": self._apply_filter_value,
"mof_name": self._apply_value_in_filter
}
self._load_mofs()
def __len__(self):
return len(self.mof_coll)
def __repr__(self):
print_str = self.separator
print_str += "\nThis collection holds information for "
print_str += "{} MOFs.\n".format(len(self))
if self.analysis_folder is None:
print_str += "Analysis folder is not set.\n"
else:
f = os.path.abspath(self.analysis_folder)
print_str += "Analysis folder is: {}\n\n".format(f)
print_str += "List of cif files in collection:\n\n"
for mc in self.mof_coll:
print_str += "{}\n".format(mc['mof_file'])
print_str += self.separator
return print_str
@property
def analysis_folder(self):
"""Get value of the analysis folder."""
Helper.make_folder(self._analysis_folder)
return self._analysis_folder
@analysis_folder.setter
def analysis_folder(self, analysis_folder):
"""Set value of the analysis folder."""
self._analysis_folder = analysis_folder
@property
def oms_results_folder(self):
"""Get value of the OMS results folder."""
orf = self.analysis_folder + '/oms_results'
Helper.make_folder(orf)
return orf
@property
def summary_folder(self):
"""Get value of the summary folder."""
sf = self.analysis_folder + '/summary'
Helper.make_folder(sf)
return sf
@property
def _properties_filename(self):
"""Get value of the properties pickle file."""
return self.analysis_folder + '/properties.pickle'
@property
def properties(self):
"""Get value for the MOF properties. If the property variable is not
None and the pickle file exists, then load the file and return it."""
if not self._properties and os.path.isfile(self._properties_filename):
with open(self._properties_filename, 'rb') as properties_file:
self._properties = pickle.load(properties_file)
return self._properties
@property
def mof_oms_df(self):
"""Get a pandas DataFrame that lists for each MOF whether it has an OMS
or not and if it has an OMS what metal types it is.
"""
if self._mof_oms_df is not None:
return self._mof_oms_df
if not self._validate_properties(['has_oms'])[1]:
print('OMS analysis not finished for all MOFs in collection.')
return False
mof_info = {}
for mi in self.mof_coll:
mp = self.properties[mi['checksum']]
if 'metal_sites' not in mp:
continue
metal_sites = mp['metal_sites']
if len(metal_sites) == 0:
print('No Metal Found in {}'.format(mp['name']))
oms_types = [ms["metal"] for ms in metal_sites
if ms["is_open"] and ms["unique"]]
oms_types = list(set(oms_types))
if oms_types:
oms_types = ",".join(oms_types)
else:
oms_types = "N/A"
if mp['has_oms']:
has_oms = 'Yes'
else:
has_oms = 'No'
all_metal_species = ",".join(set(mp['metal_species']))
mof_info[mp['name']] = {'Metal Types': all_metal_species,
'Has OMS': has_oms,
'OMS Types': oms_types}
self._metal_site_df = pd.DataFrame.from_dict(mof_info,
orient='index')
return self._metal_site_df
@property
def metal_site_df(self):
"""Get a pandas DataFrame that lists the OMS results for each metal
type.
"""
if self._metal_site_df is not None:
return self._metal_site_df
if not self._validate_properties(['has_oms'])[1]:
print('OMS analysis not finished for all MOFs in collection.')
return False
site_info = {}
for mi in self.mof_coll:
mp = self.properties[mi['checksum']]
if 'metal_sites' not in mp:
continue
metal_sites = mp['metal_sites']
if len(metal_sites) == 0:
print('No Metal Found in {}'.format(mp['name']))
for i, ms in enumerate(metal_sites):
key = mp['name'] + '_' + str(i)
site_info[key] = ms
if 'all_dihedrals' in ms:
del site_info[key]['all_dihedrals']
if 'min_dihedral' in ms:
del site_info[key]['min_dihedral']
site_info[key]['mof_name'] = mp['name']
self._metal_site_df = pd.DataFrame.from_dict(site_info, orient='index')
return self._metal_site_df
@classmethod
def from_folder(cls, collection_folder, analysis_folder='analysis_folder',
name_list=None):
"""Create a MofCollection from a the CIF files in a folder.
:param collection_folder: Path to the folder containing the CIF files to
be added to the collection.
:param analysis_folder: Path to the folder where the results will
be stored. (default: 'analysis_folder')
:param name_list: List of MOF names to include in the collection. If
set, all the other CIF files in the folder will be excluded.
(default: None)
:return: A MofCollection object holding the specified MOF structures.
"""
if name_list:
print(cls.separator)
print('Using only MOFs in the name list.')
print(cls.separator)
d = collection_folder
path_list = [d+'/'+name for name in name_list]
else:
path_list = glob.glob(collection_folder + "/*.cif")
return cls(path_list, analysis_folder)
def analyse_mofs(self, overwrite=False, num_batches=1, analysis_limit=None):
"""Run OMS analysis for the MOFs in the collection.
:param overwrite: Controls if the results will be overwritten or not
(default: False)
:param num_batches: Sets the number of batches the structures will be
split in and analyzed on a separate process. (default: 1)
:param analysis_limit: Analyze only up to the number of MOFs set by
analysis_limit, if set to None all MOFs will be analyzed (default: None)
"""
print(self.separator)
print("Running OMS Analysis...")
self.analysis_limit = analysis_limit
t0 = time.time()
self._make_batches(num_batches, overwrite)
status = Array('i', [0 for i in range(num_batches)])
for i, batch in enumerate(self.batches):
p = Process(target=self._run_batch,
args=(i, batch, overwrite,status))
p.start()
lbs = [len(batch)/100.0 for batch in self.batches]
wait_time = 0.0
status_prev = [0 for i in range(num_batches)]
while True:
# Create a list from the shared array to make sure it doesnt change
# during the iteration
status_ = list(status)
if all([sp == s for sp, s in zip(status_prev, status_)]):
wait_time = min(25, 0.1+wait_time)
time.sleep(wait_time)
status_prev = status_
sout = ["Batch {} Finished.".format(b + 1)
if len(self.batches[b]) == 0 or s < 0 else
"Batch {} {:.2f} % : Analysing {:}"
"".format(b+1, (s+1)/lbs[b], self.batches[b][s]['mof_name'])
for b, s in enumerate(status_)]
print("|**| ".join(sout) + 100 * " ", end='\r', flush=True)
if all([s < 0 for s in status_]):
break
if overwrite:
for mi in self.mof_coll:
self._update_property_from_oms_result(mi)
self._validate_properties(['has_oms'])
t1 = time.time()
print('\nAnalysis Finished. Time required:{:.2f} sec'.format(t1 - t0))
print(self.separator)
def check_structures(self):
"""Iterate over all the MOFs in the collection and validate that they
can be read and a MofStructure can be created.
"""
self._validate_properties(['cif_okay'])
not_read = [mi for mi in self.mof_coll
if not self.properties[mi['checksum']]['cif_okay']]
read_len = len(self.mof_coll) - len(not_read)
print('\nChecked {} structures.'.format(len(self.mof_coll)))
msg1 = {0: '\r',
1: '{} was read.'.format(read_len),
2: '{} were read.'.format(read_len)}
msg2 = {0: '\r',
1: '{} was NOT read.'.format(len(not_read)),
2: '{} were NOT read.'.format(len(not_read))}
print(msg1[min(2, read_len)])
print(msg2[min(2, len(not_read))])
msg = {0: "\r", 1: "\nThe following structures could not be read:"}
print(msg[min(1, len(not_read))])
for i, mi in enumerate(not_read):
print("{}".format(mi['mof_name']))
mofs_no_metal = [mi for mi in self.mof_coll
if self.properties[mi['checksum']]['cif_okay']
and not
self.properties[mi['checksum']]['metal_species']]
msg = {0: "\r", 1: "The following structures contain no metal:"}
print(msg[min(1, len(mofs_no_metal))])
for mi in mofs_no_metal:
p = self.properties[mi['checksum']]
print("{}.cif {}".format(p['name'],
p['metal_species']+p['non_metal_species']))
print('\nFinished checking structures.')
def check_analysis_status(self):
"""Iterate over all the MOFs in the collection and check if the results
from the OMS analysis exist.
"""
print(self.separator)
not_done = [mi['mof_file'] for mi in self.mof_coll
if not self._check_if_results_exist(mi['mof_name'])]
done = len(self.mof_coll) - len(not_done)
msg1 = {0: '\nAnalysis for no structures has been completed.',
1: '\nAnalysis for {} out of {} structures have been completed.'
.format(done, len(self.mof_coll))}
msg2 = {0: "\r", 1: "\nThe following structures are missing:"}
print(msg1[min(1, done)])
print(msg2[min(1, len(not_done))])
for nd in not_done:
print(nd)
print(self.separator)
def sample_collection(self, sample_size=50):
"""Randomly select a sample of MOFs in the collection and
return a new collection with the MOFs in the sample.
:param sample_size: Number of MOFs to be selected. Default value is 50.
"""
ll = len(self.mof_coll)
if sample_size > ll:
sample_size = ll
print(f"Can only sample up to the number of MOFs "
f"in the collection ({ll}).")
mof_list = [mi['mof_file'] for mi in self.mof_coll]
sampled_list = random.sample(mof_list, sample_size)
return MofCollection(sampled_list, analysis_folder=self.analysis_folder)
def filter_collection(self, using_filter=None,
new_collection_folder=None,
new_analysis_folder=None):
"""Filter a collection given a number of filters.
Calling this method of a MofCollection applies the filter and creates a
new collection for the MOFs that match the filter. The cif files that
match the filter are copied to the new_collection_folder.
The filters can be one or more of the following:
'density': [min, max] (range of values)
'oms_density': [min, max] (range of values)
'uc_volume': [min, max] (range of values)
'metal_species': ["Cu", "Zn", ...] (list of metal species)
'non_metal_species': ["C", "N", ...] (list of non metal species)
'cif_okay': True (boolean value)
'has_oms': True (boolean value)
'mof_name': [mof_name1, mof_name2] (string values)
:param using_filter: Filter used to identify MOFs with certain
characteristics. Has to be a python dictionary (default: None)
:param new_collection_folder: Path to the folder where the CIF files of
the filtered collection will be stored. If set to None the CIF files
will not be copied. (default: None)
:param new_analysis_folder: Path to the folder where the OMS result
files of the filtered collection will be stored. If set to None the
result files will not be copied. (default: None)
:return: A MofCollection with only the filtered MOFs. If
new_collection_folder or new_analysis_folder is not set then the
collection will point to the original location of these files.
"""
print(self.separator)
if any([f not in self.filter_functions for f in using_filter]):
print('Unknown filter. Try again using one of the following '
'filters:\n\"{}\"'.format(", ".join(self.filter_functions)))
print(self.separator)
return
validation_level, cf = self._validate_properties(using_filter)
if validation_level == 1 and not cf:
print('Properties from CIF files could not be validated.'
'Check that all CIF files can be read')
return
elif validation_level == 2 and not cf:
print('Requested a filter that needs OMS information but the '
'OMS analysis does not appear to be complete.\n'
'Run it first and try again.')
return
print(self.separator)
print('Filtering collection.')
filtered_list = []
for i, mi in enumerate(self.mof_coll):
mp = self.properties[mi['checksum']]
fun = self._apply_filter
if all([fun(f, mp[f], using_filter[f]) for f in using_filter]):
filtered_list.append(mi['mof_file'])
found_s = {0: "No", 1: len(filtered_list)}[min(1, len(filtered_list))]
print('\n{} MOFs were matched using the provided'
' filter.'.format(found_s))
if len(filtered_list) == 0:
print('No collection returned.')
return None
print('Returning a new collection using the matched MOFs.')
sub_collection = MofCollection(filtered_list,
analysis_folder=self.analysis_folder)
print(self.separator)
sub_collection.copy_cifs(new_collection_folder)
sub_collection.copy_results(new_analysis_folder)
return sub_collection
def read_cif_files(self):
"""Iterate over all MOF files in the collection, load each CIF and
store MOF properties such as density, unit cell volume etc.
"""
print(self.separator)
print('Reading CIF files and updating properties...')
self._loop_over_collection(self._update_property_from_cif_file)
self._store_properties()
print('Done')
print(self.separator)
def read_oms_results(self):
"""Iterate over all MOF files in the collection, load each OMS result
file and store OMS information to the MOF properties.
"""
print(self.separator)
print('Adding results to properties.')
self._loop_over_collection(self._update_property_from_oms_result)
print('Done')
self._store_properties()
print(self.separator)
def copy_cifs(self, target_folder):
"""Copy cif files from their existing location to the specified
target_folder.
:param target_folder: Path of folder to copy collection CIF files to.
"""
if target_folder is None:
return
tf_abspath = os.path.abspath(target_folder)
Helper.make_folder(tf_abspath)
print(self.separator)
print('The cif files for this collection will be copied to'
' the specified folder:\n\"{}\"'.format(tf_abspath))
print('The cif paths will be updated.')
for i, mi in enumerate(list(self.mof_coll)):
destination_path = "{}/{}.cif".format(tf_abspath, mi['mof_name'])
self.mof_coll[i] = {"mof_name": mi['mof_name'],
"mof_file": destination_path,
"checksum": mi['checksum']}
if not os.path.isfile(destination_path):
shutil.copyfile(mi['mof_file'], destination_path)
print(self.separator)
def copy_results(self, target_folder):
"""Copy OMS result files from their existing location to the specified
target_folder.
:param target_folder: Path of folder to copy collection OMS result
files to.
"""
if target_folder is None:
return
print(self.separator)
tf_abspath = os.path.abspath(target_folder)
destination_path = tf_abspath + '/oms_results'
print('The result files for this collection will be copied to the '
'specified folder:\n{}\nThe analysis folder will be updated.'
''.format(tf_abspath))
Helper.make_folder(tf_abspath)
Helper.make_folder(destination_path)
for i, mi in enumerate(self.mof_coll):
mof_name = mi['mof_name']
if self._check_if_results_exist(mof_name):
source_path = "{}/{}".format(self.oms_results_folder, mof_name)
Helper.copy_folder(destination_path, source_path)
self.analysis_folder = tf_abspath
self._validate_properties(['has_oms'])
print(self.separator)
def summarize_results(self, max_atomic_number=None):
"""Create a summary table for the OMS results of the collection, group
results by metal type.
:param max_atomic_number: Maximum atomic number to be included in
summary table. If not defined all metal atoms will be considered
(default: None)
"""
df = self.metal_site_df.copy()
site_df_u = df.loc[df['unique']]
site_df_o = site_df_u.loc[site_df_u['is_open']]
all_sites = self._group_and_summarize(site_df_u, ['MOFs',
'Metal Sites'])
open_sites = self._group_and_summarize(site_df_o, ['MOFs_with_OMS',
'OMS'])
s_df = pd.concat([all_sites, open_sites], axis=1)
s_df.fillna(0.0, inplace=True)
s_df = s_df.astype(int)
s_df['MOFs_with_OMS(%)'] = 100.0 * s_df['MOFs_with_OMS']/s_df['MOFs']
s_df['OMS (%)'] = 100.0 * s_df['OMS'] / s_df['Metal Sites']
cols = ['MOFs', 'MOFs_with_OMS', 'Metal Sites', 'OMS',
'MOFs_with_OMS(%)', 'OMS (%)']
s_df = s_df[cols]
s_df['MOFs_with_OMS(%)'] = s_df['MOFs_with_OMS(%)'].apply('{:.2f} %'
''.format)
s_df['OMS (%)'] = s_df['OMS (%)'].apply('{:.2f} %'.format)
s_df.sort_values("MOFs", inplace=True, ascending=False)
num_mofs = df['mof_name'].nunique()
num_oms_mofs = df[df['is_open']]['mof_name'].nunique()
num_sites = len(site_df_u)
num_oms_sites = len(site_df_u[site_df_u['is_open']])
print(self.separator)
print('Number of total MOFs: {}'.format(num_mofs))
print('Number of total MOFs with open metal sites: {}'
''.format(num_oms_mofs))
print('Number of total unique sites: {}'.format(num_sites))
print('Number of total unique open metal sites: {}'
''.format(num_oms_sites))
print(self.separator)
msg = "Summary Table\n"
fname = "{0}/stats.out".format(self.summary_folder, max_atomic_number)
if max_atomic_number:
subset = pd.Series(s_df.index).apply(
lambda x: Atom(x).atomic_number <= max_atomic_number)
s_df = s_df.loc[subset.values]
fname = "{0}/stats_less_{1}.out".format(self.summary_folder,
max_atomic_number)
msg = "Summary Table for metal atoms with atomic number smaller " \
"than {}.\n".format(max_atomic_number)
print(msg)
print(s_df)
s_df.to_csv(fname, sep=' ')
def summarize_tfactors(self):
"""Summarize the t-factor information and make histograms for all the
MOFs in the collection.
"""
tfac_analysis_folder = self.summary_folder + '/tfac_analysis'
Helper.make_folder(self.summary_folder)
Helper.make_folder(tfac_analysis_folder)
df = self.metal_site_df.copy()
sites_u = df[df['unique']]
for n in range(4, 7):
self._write_t_factors(sites_u, n, tfac_analysis_folder)
def _load_mofs(self):
"""Add MOfs to collection, use CIF file checksum as an identifier."""
print('Loading CIF files...')
li = max(int(len(self.path_list) / 1000), 1)
lm = len(self.path_list) / 100.0
for i, mof_file in enumerate(self.path_list):
if i % li == 0:
print("{:4.1f} %".format((i+1) / lm), end="\r", flush=True)
checksum = Helper.get_checksum(mof_file)
mof_name = os.path.splitext(os.path.basename(mof_file))[0]
mof_info = {"mof_name": mof_name,
"mof_file": mof_file,
"checksum": checksum}
self.mof_coll.append(mof_info)
if checksum not in self.properties:
self.properties[checksum] = {"mof_name": mof_name}
else:
if self.properties[checksum]["mof_name"] != mof_name:
exit("MOF name and CIF checksum mismatch for {}.cif "
"{}.cif. Either the CIF files has already been "
"processed with a different name, or the CIF file "
"has changed since it was processed."
"".format(mof_name,
self.properties[checksum]['mof_name']))
if self._check_if_results_exist(mof_name):
self._compare_checksums(mof_file, mof_name, checksum)
print("\nAll Done.")
self._store_properties()
def _compare_checksums(self, mof_file, mof_name, checksum):
"""If OMS results exist for one of the CIF names in the collection then
ensure that the CIF checksum matches the one in the result file.
"""
mof_folder = "{0}/{1}/".format(self.oms_results_folder,
mof_name)
results_file = "{0}/{1}.json".format(mof_folder, mof_name)
with open(results_file, 'r') as f:
results_dict = json.load(f)
if results_dict['checksum'] != checksum:
print("Results for a MOF named {0} appear to already exist"
" in the analysis folder \n\"{1}\".\nHowever the "
"file checksum in the result file does not match the "
"checksum of \n\"{2}\".\n\nHave the CIF files in the "
"collection changed since the results were computed?"
"\nClear results and try again.".format(mof_name,
mof_folder,
mof_file))
exit(1)
def _run_batch(self, b, batch, overwrite, status):
"""Run OMS analysis for each of the batches."""
for i, mi in enumerate(batch):
status[b] = i
self._analyse(mi, overwrite)
status[b] = -1
def _analyse(self, mi, overwrite):
"""For a given CIF file, create MofStructure object and run OMS
analysis. If overwrite is false check if results already exist first.
"""
mof_folder = "{}/{}".format(self.oms_results_folder, mi['mof_name'])
results_exist = self._check_if_results_exist(mi['mof_name'])
if not overwrite and results_exist:
print("Skipping {}. Results already exist and overwrite is set "
"to False.".format(mi['mof_name']))
return
mof = self._create_mof_from_cif_file(mi['mof_file'])
if mof.summary['cif_okay']:
mof.analyze_metals(output_folder=mof_folder)
def _make_batches(self, num_batches=1, overwrite=False):
"""Split collection into number of batches
:param num_batches: Number of batches (default: 1)
:param overwrite: Controls if the results will be overwritten or not
(default: False)
"""
print(self.separator)
if cpu_count() < num_batches:
warnings.warn('You requested {} batches but there are only {}'
' CPUs available.'.format(num_batches, cpu_count()))
b_s = {1: 'batch', 2: 'batches'}[min(num_batches, 2)]
print('{} {} requested. '.format(num_batches, b_s))
print('Overwrite is set to {}. '.format(overwrite))
print('Storing results in {}. '.format(self.oms_results_folder))
print(self.separator)
self._validate_properties(['load_balancing_index'])
print(self.separator)
lbi = {}
for mi in self.mof_coll:
mp = self.properties[mi['checksum']]
lbi[mi['mof_name']] = mp['load_balancing_index']
# Remove any structures not in load balancing index.
subset = [mc for mc in self.mof_coll if mc['mof_name'] in lbi]
# If there is no balancing info for a MOF at this point it means
# that it could not be read.
if len(self.mof_coll) != len(subset):
print('\nSkipping {} structures that could not be read.'
' '.format(len(self.mof_coll)-len(subset)))
# Remove any structures already completed
if not overwrite:
print('Checking if results for any of the MOFs exist...')
all_ = len(subset)
subset = [mc for mc in subset if not
self._check_if_results_exist(mc['mof_name'])]
msg = {0: "Will not skip any MOFs",
1: "Skipping {} MOFs because results were found. "
"".format(all_ - len(subset))}
print(msg[min(1, all_ - len(subset))])
# Sort mof list using the load balancing index
subset.sort(key=lambda x: lbi[x['mof_name']])
sum_load_balance = sum(lbi[mi["mof_name"]] for mi in subset)
lb_per_batch = sum_load_balance / num_batches
# Select only up to analysis_limit to work with
if self.analysis_limit and len(subset) > self.analysis_limit:
subset = subset[0:self.analysis_limit]
self.batches = [[] for b in range(num_batches)]
for i, mi in enumerate(subset):
sum_lb = sum([lbi[mi["mof_name"]] for mi in subset[0:i]])
batch = int(sum_lb / lb_per_batch)
self.batches[batch].append(mi)
print(self.separator)
for i, batch in enumerate(self.batches):
print("Batch {0} has {1} MOFs".format(i+1, len(batch)))
print(self.separator)
def _check_if_results_exist(self, mof_name):
"""Check if OMS results already exist for a MOF"""
mof_folder = "{}/{}".format(self.oms_results_folder, mof_name)
if os.path.isfile(mof_folder+'/'+mof_name+'.json'):
if not os.path.isfile(mof_folder + '/' + 'analysis_running'):
return True
return False
def _loop_over_collection(self, func):
"""Iterate over all the MOFs in the collection and run the specified
function.
:param func: Function to use.
"""
li = max(int(len(self.mof_coll) / 1000), 1)
lm = len(self.mof_coll) / 100
for i, mi in enumerate(self.mof_coll):
if i % li == 0:
print("{:4.1f} % {} {:100}".format((i+1)/lm, mi['mof_name'],
" "), end="\r", flush=True)
func(mi)
print()
def _apply_filter(self, filter_, v, f):
"""Apply the proper filter_function for the given filter"""
return self.filter_functions[filter_](v, f)
@staticmethod
def _apply_filter_value(v, f):
"""Filter function to match a value. Returns false if values is None"""
if not v:
return False
return v == f
@staticmethod
def _apply_filter_in_value(v, f):
"""Filter function to match all values of a list"""
if not v:
return False
return all([f_ in v for f_ in f])
@staticmethod
def _apply_value_in_filter(v, f):
"""Filter function to match any of the values of a list"""
if not v:
return False
return v in f
@staticmethod
def _apply_filter_range(v, f):
"""Filter function to match a range of values"""
if not v:
return False
return min(f) <= v <= max(f)
def _validate_properties(self, keys):
"""Check if a given property can be found in the properties dictionary.
If not try to read the CIF file and check again. If the check fails
again try to read the OMS results and check again. If the check fails
a third time return False, the property cannot be validated."""
msg = {1: "Validating property", 2: "Validating properties"}
print('\n{} : '.format(msg[min(2, len(keys))]), end='')
print("\"{}\"".format(", ".join([k for k in keys])))
validation_level = 0
li = max(int(len(self.mof_coll)/1000), 1)
lm = len(self.mof_coll) / 100
for i, mi in enumerate(self.mof_coll):
if i % li == 0:
print("{:4.1f} % {} {:100}".format((i+1) / lm, mi['mof_name'],
" "), end="\r", flush=True)
mp = self.properties[mi['checksum']]
if not self._validate_property(mp, keys):
self._update_property_from_cif_file(mi)
validation_level = 1
if not self._validate_property(mp, keys):
self._update_property_from_oms_result(mi)
validation_level = 2
if not self._validate_property(mp, keys):
self._store_properties()
print('\nProperty Missing\n{}'.format(self.separator))
return validation_level, False
self._store_properties()
print("Validated 100 % "+100*" ", end="\r")
print()
return validation_level, True
@staticmethod
def _validate_property(mp, keys):
"""Check if property exists."""
test1 = all([f in mp for f in keys])
if test1 and all([mp[f] != 'N/A' for f in keys]):
return True
if test1 and not mp['cif_okay']:
return True
return False
def _update_property_from_cif_file(self, mi):
"""Update properties dictionary from a CIF file."""
mp = self.properties[mi['checksum']]
mof = self._create_mof_from_cif_file(mi['mof_file'])
if mof:
mp.update(mof.summary)
self.load_balance_index[mi['mof_name']] = len(mof) * len(mof)
mp['load_balancing_index'] = self.load_balance_index[mi['mof_name']]
def _update_property_from_oms_result(self, mi):
"""Update properties dictionary from an OMS result file."""
mp = self.properties[mi['checksum']]
mof_name = mp["mof_name"]
mof_folder = "{0}/{1}/".format(self.oms_results_folder, mof_name)
results_file = "{0}/{1}.json".format(mof_folder, mof_name)
results_dict = None
if os.path.isfile(results_file):
results_dict = json.load(open(results_file))
if isinstance(results_dict, dict):
results_dict['source_name'] = mof_folder
mp.update(results_dict)
def _store_properties(self):
"""Store properties dictionary as a python pickle file."""
with open(self._properties_filename, 'wb') as properties_file:
pickle.dump(self._properties, properties_file)
@staticmethod
def _create_mof_from_cif_file(path_to_mof):
"""Create and return a MofStructure object from a path to a CIF file."""
mof = MofStructure.from_file(path_to_mof, primitive=False)
return mof
def _write_t_factors(self, sites, n, target):
"""Summarize the findings in table form and histograms for a give
t-factor.
"""
s_n = sites.loc[sites['number_of_linkers'] == n].copy()
s_n['is_open_yn'] = np.where(s_n['is_open'], 'yes', 'no')
s_n = s_n[['mof_name', 'is_open_yn', 't_factor']]
for flag in ['yes', 'no']:
outpath = "{}/{}_{}.out".format(target, flag, str(n))
s = s_n[s_n['is_open_yn'] == flag]
s.to_csv(outpath, index=False)
fout = "{}/{}_{}_hist.out".format(target, flag, n)
self._write_histogram(s['t_factor'], True, fout)
fout = "{}/{}_{}_hist_abs.out".format(target, flag, n)
self._write_histogram(s['t_factor'], False, fout)
fig = plt.figure(figsize=(10, 5))
plt.title('t-{} factor'.format(n))
s_yes = s_n[s_n['is_open_yn'] == 'yes']
s_yes['t_factor'].hist(bins=50, range=(0, 1), normed=False)
s_no = s_n[s_n['is_open_yn'] == 'no']
s_no['t_factor'].hist(bins=50, range=(0, 1), normed=False)
plt.show()
@staticmethod
def _write_histogram(sites, dens, target):
"""Generate histograms to be used for summarizing the t-factor
results.
"""
hist, edges = np.histogram(sites, bins=50, range=(0, 1), density=dens)
with open(target, 'w') as hist_file:
w = (edges[1] - edges[0]) / 2
for e, h in zip(edges, hist):
print(e + w, h, file=hist_file)
@staticmethod
def _group_and_summarize(df, names=None):
"""Group the DataFrame holding the OMS results by metal type and rename
its columns.
"""
rename = {"mof_name": names[0], "is_open": names[1]}
agg_dict = {"mof_name": pd.Series.nunique, "is_open": "count"}
return df.groupby('metal').agg(agg_dict).rename(columns=rename)
| [
"omsdetector.mof.Helper.get_checksum",
"multiprocessing.Process",
"multiprocessing.cpu_count",
"time.sleep",
"omsdetector.mof.Helper.copy_folder",
"matplotlib.pylab.show",
"sys.exit",
"numpy.histogram",
"matplotlib.pylab.figure",
"numpy.where",
"pandas.DataFrame.from_dict",
"glob.glob",
"oms... | [((2481, 2522), 'omsdetector.mof.Helper.make_folder', 'Helper.make_folder', (['self._analysis_folder'], {}), '(self._analysis_folder)\n', (2499, 2522), False, 'from omsdetector.mof import Helper\n'), ((2893, 2916), 'omsdetector.mof.Helper.make_folder', 'Helper.make_folder', (['orf'], {}), '(orf)\n', (2911, 2916), False, 'from omsdetector.mof import Helper\n'), ((3083, 3105), 'omsdetector.mof.Helper.make_folder', 'Helper.make_folder', (['sf'], {}), '(sf)\n', (3101, 3105), False, 'from omsdetector.mof import Helper\n'), ((5163, 5211), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['mof_info'], {'orient': '"""index"""'}), "(mof_info, orient='index')\n", (5185, 5211), True, 'import pandas as pd\n'), ((6417, 6466), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['site_info'], {'orient': '"""index"""'}), "(site_info, orient='index')\n", (6439, 6466), True, 'import pandas as pd\n'), ((8254, 8265), 'time.time', 'time.time', ([], {}), '()\n', (8263, 8265), False, 'import time\n'), ((9687, 9698), 'time.time', 'time.time', ([], {}), '()\n', (9696, 9698), False, 'import time\n'), ((12825, 12861), 'random.sample', 'random.sample', (['mof_list', 'sample_size'], {}), '(mof_list, sample_size)\n', (12838, 12861), False, 'import random\n'), ((17654, 17684), 'os.path.abspath', 'os.path.abspath', (['target_folder'], {}), '(target_folder)\n', (17669, 17684), False, 'import os\n'), ((17693, 17723), 'omsdetector.mof.Helper.make_folder', 'Helper.make_folder', (['tf_abspath'], {}), '(tf_abspath)\n', (17711, 17723), False, 'from omsdetector.mof import Helper\n'), ((18757, 18787), 'os.path.abspath', 'os.path.abspath', (['target_folder'], {}), '(target_folder)\n', (18772, 18787), False, 'import os\n'), ((19042, 19072), 'omsdetector.mof.Helper.make_folder', 'Helper.make_folder', (['tf_abspath'], {}), '(tf_abspath)\n', (19060, 19072), False, 'from omsdetector.mof import Helper\n'), ((19081, 19117), 'omsdetector.mof.Helper.make_folder', 'Helper.make_folder', (['destination_path'], {}), '(destination_path)\n', (19099, 19117), False, 'from omsdetector.mof import Helper\n'), ((20312, 20354), 'pandas.concat', 'pd.concat', (['[all_sites, open_sites]'], {'axis': '(1)'}), '([all_sites, open_sites], axis=1)\n', (20321, 20354), True, 'import pandas as pd\n'), ((22485, 22524), 'omsdetector.mof.Helper.make_folder', 'Helper.make_folder', (['self.summary_folder'], {}), '(self.summary_folder)\n', (22503, 22524), False, 'from omsdetector.mof import Helper\n'), ((22533, 22573), 'omsdetector.mof.Helper.make_folder', 'Helper.make_folder', (['tfac_analysis_folder'], {}), '(tfac_analysis_folder)\n', (22551, 22573), False, 'from omsdetector.mof import Helper\n'), ((29329, 29382), 'os.path.isfile', 'os.path.isfile', (["(mof_folder + '/' + mof_name + '.json')"], {}), "(mof_folder + '/' + mof_name + '.json')\n", (29343, 29382), False, 'import os\n'), ((33697, 33725), 'os.path.isfile', 'os.path.isfile', (['results_file'], {}), '(results_file)\n', (33711, 33725), False, 'import os\n'), ((34309, 34361), 'omsdetector.mof.MofStructure.from_file', 'MofStructure.from_file', (['path_to_mof'], {'primitive': '(False)'}), '(path_to_mof, primitive=False)\n', (34331, 34361), False, 'from omsdetector.mof import MofStructure\n'), ((34628, 34665), 'numpy.where', 'np.where', (["s_n['is_open']", '"""yes"""', '"""no"""'], {}), "(s_n['is_open'], 'yes', 'no')\n", (34636, 34665), True, 'import numpy as np\n'), ((35183, 35210), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (35193, 35210), True, 'import matplotlib.pylab as plt\n'), ((35491, 35501), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (35499, 35501), True, 'import matplotlib.pylab as plt\n'), ((35690, 35746), 'numpy.histogram', 'np.histogram', (['sites'], {'bins': '(50)', 'range': '(0, 1)', 'density': 'dens'}), '(sites, bins=50, range=(0, 1), density=dens)\n', (35702, 35746), True, 'import numpy as np\n'), ((2067, 2104), 'os.path.abspath', 'os.path.abspath', (['self.analysis_folder'], {}), '(self.analysis_folder)\n', (2082, 2104), False, 'import os\n'), ((3521, 3562), 'os.path.isfile', 'os.path.isfile', (['self._properties_filename'], {}), '(self._properties_filename)\n', (3535, 3562), False, 'import os\n'), ((7479, 7518), 'glob.glob', 'glob.glob', (["(collection_folder + '/*.cif')"], {}), "(collection_folder + '/*.cif')\n", (7488, 7518), False, 'import glob\n'), ((8445, 8512), 'multiprocessing.Process', 'Process', ([], {'target': 'self._run_batch', 'args': '(i, batch, overwrite, status)'}), '(target=self._run_batch, args=(i, batch, overwrite, status))\n', (8452, 8512), False, 'from multiprocessing import Process, cpu_count, Array\n'), ((23166, 23195), 'omsdetector.mof.Helper.get_checksum', 'Helper.get_checksum', (['mof_file'], {}), '(mof_file)\n', (23185, 23195), False, 'from omsdetector.mof import Helper\n'), ((24726, 24738), 'json.load', 'json.load', (['f'], {}), '(f)\n', (24735, 24738), False, 'import json\n'), ((25366, 25373), 'sys.exit', 'exit', (['(1)'], {}), '(1)\n', (25370, 25373), False, 'from sys import exit\n'), ((26639, 26650), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (26648, 26650), False, 'from multiprocessing import Process, cpu_count, Array\n'), ((34100, 34146), 'pickle.dump', 'pickle.dump', (['self._properties', 'properties_file'], {}), '(self._properties, properties_file)\n', (34111, 34146), False, 'import pickle\n'), ((3674, 3702), 'pickle.load', 'pickle.load', (['properties_file'], {}), '(properties_file)\n', (3685, 3702), False, 'import pickle\n'), ((9003, 9024), 'time.sleep', 'time.sleep', (['wait_time'], {}), '(wait_time)\n', (9013, 9024), False, 'import time\n'), ((18270, 18302), 'os.path.isfile', 'os.path.isfile', (['destination_path'], {}), '(destination_path)\n', (18284, 18302), False, 'import os\n'), ((18320, 18369), 'shutil.copyfile', 'shutil.copyfile', (["mi['mof_file']", 'destination_path'], {}), "(mi['mof_file'], destination_path)\n", (18335, 18369), False, 'import shutil\n'), ((19355, 19404), 'omsdetector.mof.Helper.copy_folder', 'Helper.copy_folder', (['destination_path', 'source_path'], {}), '(destination_path, source_path)\n', (19373, 19404), False, 'from omsdetector.mof import Helper\n'), ((29397, 29450), 'os.path.isfile', 'os.path.isfile', (["(mof_folder + '/' + 'analysis_running')"], {}), "(mof_folder + '/' + 'analysis_running')\n", (29411, 29450), False, 'import os\n'), ((21752, 21773), 'pandas.Series', 'pd.Series', (['s_df.index'], {}), '(s_df.index)\n', (21761, 21773), True, 'import pandas as pd\n'), ((23236, 23262), 'os.path.basename', 'os.path.basename', (['mof_file'], {}), '(mof_file)\n', (23252, 23262), False, 'import os\n'), ((26806, 26817), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (26815, 26817), False, 'from multiprocessing import Process, cpu_count, Array\n'), ((21807, 21814), 'omsdetector.atomic_parameters.Atom', 'Atom', (['x'], {}), '(x)\n', (21811, 21814), False, 'from omsdetector.atomic_parameters import Atom\n')] |
"""
Module varn calculates local densities of the 2D system and plots histogram
of these local densities.
Files are saved according to the active_particles.naming.varN standard.
Environment modes
-----------------
COMPUTE : bool
Compute local densities.
DEFAULT: False
CHECK : bool
Evaluate difference between the parametrised global packing fraction and
the measured averaged packing fraction.
DEFAULT: False
PLOT : bool
Plots histogram of local densities.
DEFAULT: False
PLOT_MODE : string
Histogram type.
_______________________________________________________________________
| Mode | Histogram |
|________|______________________________________________________________|
| 'mean' | Simple histogram of local densities from all computed times. |
|________|______________________________________________________________|
| 'time' | Histogram of local densities as function of time. |
|________|______________________________________________________________|
DEFAULT: mean
SHOW : bool
Show graphs.
DEFAULT: False
PEAK [(COMPUTE and SHOW) or PLOT mode] : bool
Highlight highest peak of the histogram.
DEFAULT: True
SAVE [(COMPUTE and SHOW) or PLOT mode] : bool
Save graphs.
DEFAULT: False
SUPTITLE [(COMPUTE and SHOW) or PLOT mode] : bool
Display suptitle.
DEFAULT: True
Environment parameters
----------------------
DATA_DIRECTORY : string
Data directory.
DEFAULT: current working directory
PARAMETERS_FILE : string
Simulation parameters file.
DEFAULT: DATA_DIRECTORY/active_particles.naming.parameters_file
WRAPPED_FILE : string
Wrapped trajectory file. (.gsd)
DEFAULT: DATA_DIRECTORY/active_particles.naming.wrapped_trajectory_file
INITIAL_FRAME : int
Frame to consider as initial.
NOTE: INITIAL_FRAME < 0 will be interpreted as the initial frame being
the middle frame of the simulation.
DEFAULT: -1
INTERVAL_MAXIMUM : int
Maximum number of frames at which we compute local densities.
DEFAULT: 1
BOX_SIZE : float
Length of the square boxes in which particles are counted to compute local
densities.
DEFAULT: active_particles.analysis.varn._box_size
N_CASES : int
Number of boxes in each direction to compute the shear strain and
displacement vorticity grid.
DEFAULT: smallest integer value greater than or equal to the square root of
the number of particles from the simulation parameters file.
N_BINS [PLOT or SHOW mode] : int
Number of bins for the histogram of local densities.
DEFAULT: active_particles.analysis.varn._Nbins
PHIMAX [PLOT or SHOW mode] : int
Maximum local density for the histogram of local densities.
DEFAULT: active_particles.analysis.varn._phimax
PPHILOCMIN [PLOT or SHOW and 'time' mode] : float
Minimum local density probability.
DEFAULT: active_particles.analysis.varn._pphilocmin
PPHILOCMAX [PLOT or SHOW and 'time' mode] : float
Maximum local density probability.
DEFAULT: active_particles.analysis.varn._pphilocmax
CONTOURS : int
Number of contour lines.
DEFAULT: active_particles.analysis.varn._contours
FONT_SIZE : int
Plot font size.
DEFAULT: active_particles.analysis.varn._font_size
Output
------
[COMPUTE MODE]
> Prints neigbours grid computation time and execution time.
> Saves computed local densities according to the active_particles.naming.varN
standard in DATA_DIRECTORY.
[SHOW or PLOT mode]
> Plots histogram of local densities.
[SAVE mode]
> Saves local densities histogram figure in DATA_DIRECTORY.
"""
import active_particles.naming as naming
from active_particles.init import get_env, slurm_output
from active_particles.dat import Gsd
from active_particles.maths import Histogram
from os import getcwd
from os import environ as envvar
from os.path import join as joinpath
import numpy as np
from math import ceil
import pickle
from collections import OrderedDict
from datetime import datetime
import matplotlib as mpl
if not(get_env('SHOW', default=False, vartype=bool)):
mpl.use('Agg') # avoids crash if launching without display
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
from mpl_toolkits.axes_grid1 import make_axes_locatable
# DEFAULT VARIABLES
_init_frame = -1 # default frame to consider as initial
_int_max = 1 # default maximum number of frames on which to calculate densities
_box_size = 10 # default length of the square box in which particles are counted
_Nbins = 10 # default number of bins for the histogram
_phimax = 1 # default maximum local density for histogram
_pphilocmin = 1e-4 # default minimum local density probability
_pphilocmax = 1e-1 # default maximum local density probability
_contours = 20 # default contour level value
_font_size = 10 # default plot font size
# FUNCTIONS AND CLASSES
def density(w_traj, frame, Ncases, box_size):
"""
Returns local densities in squares of length box_size around
Ncases x Ncases nodes, uniformly distributed in the 2D system, at frame
'frame'.
Parameters
----------
w_traj : active_particles.dat.Gsd
Wrapped trajectory object.
frame : int
Frame index.
Ncases : int
Number of nodes in one direction.
box_size : float
Length of the square box in which we calculate the local density.
Returns
-------
density_list : 1D Numpy array
Array of calculated local densities.
"""
L = w_traj.box_size() # system box size
dL = L/Ncases # distance between two consecutive nodes
max_node_dist = ceil(box_size/dL) # maximum distance in infinity norm in terms of nodes between particle and containing node
area_sum = np.zeros((Ncases, Ncases)) # sum of particles' area close to each node (centre of grid box) of the system
def node_position(node_index):
"""
Returns node position from node index.
Parameters
----------
node_index : 2-uple of int
Node index.
Returns
-------
r : (2,) Numpy array
Position of node.
"""
return dL*(1/2 + np.array(node_index)) - L/2
for position, area in zip(w_traj.position(frame),
(np.pi/4)*(w_traj.diameter(frame)**2)):
closest_node_index = np.array((position + L/2)//dL, dtype=int) # index of closest node
for dx in range(-max_node_dist, max_node_dist + 1):
for dy in range(-max_node_dist, max_node_dist + 1):
node_index = tuple(
(closest_node_index + np.array([dx, dy]))%Ncases)
if (np.abs(position - node_position(node_index))
< box_size/2).all(): # particle within box of node
area_sum[node_index] += area
return area_sum/(box_size**2)
def histogram(densities, Nbins, phimax):
"""
Returns histogram and bin values from densities array.
Parameters
----------
densities : array-like
Array of densities.
Nbins : int
Number of bins for histogram.
phimax : float
Maximum density for hsistogram.
NOTE: Minimum density is 0.
Returns
-------
bins : Numpy array
Bins of the histogram.
hist : Numpy array
Values of the histogram at bins.
"""
hist = Histogram(Nbins, 0, phimax)
hist.add_values(densities)
return hist.bins, hist.get_histogram()
class Plot:
"""
Plot mean histograms of densities.
"""
def __init__(self, suptitle=True):
"""
Set figure.
Parameters
----------
suptitle : bool
Display suptitle. (default: True)
"""
self.fig, self.ax = plt.subplots()
if suptitle: self.fig.suptitle(
r'$N=%.2e, \phi=%1.2f, \tilde{v}=%.2e, \tilde{\nu}_r=%.2e$'
% (parameters['N'], parameters['density'], parameters['vzero'],
parameters['dr']) + '\n' +
r'$S_{init}=%.2e, S_{max}=%.2e, N_{cases}=%.2e, l=%.2e$'
% (init_frame, int_max, Ncases, box_size))
self.ax.set_xlabel(r'$\phi_{loc}$')
self.ax.set_ylabel(r'$P(\phi_{loc})$')
def add_hist(self, bins, hist, peak=True):
"""
Add histogram of densities.
Parameters
----------
bins : array-like
Bins of the histogram.
hist : array-like
Values of the histogram at bins.
peak : bool
Highlight tallest peak of histogram. (default: True)
Returns
-------
line : matplotlib.lines.Line2D
Plotted histogram line.
"""
line, = self.ax.semilogy(bins, hist)
if peak:
philocmax, Pphilocmax = bins[np.argmax(hist)], np.max(hist)
self.ax.axhline(Pphilocmax, 0, 1,
linestyle='--', color=line.get_color())
self.ax.axvline(philocmax, 0, 1,
linestyle='--', color=line.get_color(),
label=r'$(\phi_{loc}^* = %1.2f, P(\phi_{loc}^*) = %.2e)$'
% (philocmax, Pphilocmax))
return line
class PlotTime:
"""
Plot histograms of densities as functions of time.
"""
def __init__(self, Nbins, phimax,
pphilocmin=_pphilocmin, pphilocmax=_pphilocmax, contours=_contours,
colormap=plt.cm.inferno, pad=20, suptitle=True):
"""
Set figure and histogram parameters.
Parameters
----------
Nbins : int
Number of bins for the histogram.
phimax : float
Maximum local density for histogram.
pphilocmin : float
Minimum local density probability.
(default: active_particles.analysis.varn._pphilocmin)
pphilocmax : float
Maximum local density probability.
(default: active_particles.analysis.varn._pphilocmax)
contours : int
Number of contour lines.
(default: active_particles.analysis.varn._contours)
colormap : matplotlib colormap
Histogram colormap. (default: matplotlib.pyplot.cm.inferno)
pad : float
Separation between label and colormap. (default: 20)
suptitle : bool
Display suptitle. (default: True)
"""
self.Nbins = Nbins
self.phimax = phimax
self.pphilocmin = np.log10(pphilocmin)
self.pphilocmax = np.log10(pphilocmax)
self.contours = contours
self.fig, self.ax = plt.subplots()
self.fig.subplots_adjust(top=0.98, bottom=0.10, left=0.10, right=0.88)
self.cmap = colormap
self.norm = colors.Normalize(
vmin=self.pphilocmin, vmax=self.pphilocmax)
self.colorbar = mpl.colorbar.ColorbarBase(
make_axes_locatable(self.ax).append_axes(
"right", size="5%", pad=0.05),
cmap=self.cmap, norm=self.norm, orientation='vertical')
if suptitle: self.fig.suptitle(
r'$N=%.2e, \phi=%1.2f, \tilde{v}=%.2e, \tilde{\nu}_r=%.2e$'
% (parameters['N'], parameters['density'], parameters['vzero'],
parameters['dr']) + '\n' +
r'$S_{max}=%.2e, N_{cases}=%.2e, l=%.2e$'
% (int_max, Ncases, box_size))
self.ax.set_xlabel(r'$t$')
self.ax.set_ylabel(r'$\phi_{loc}$')
self.colorbar.set_label(r'$\log P(\phi_{loc})$',
labelpad=pad, rotation=270)
def plot(self, times, densities, peak=True):
"""
Plot histogram.
Parameters
----------
times : array-like
Array of times at which densities have been calculated.
densities : array-like of array-like
Array of densities arrays at times times.
peak : bool
Highlight tallest peak of histogram. (default: True)
"""
self.times = times
self.histogram3D = [] # local densities histogram
self.philocmax = [] # most probable local densities at times
for time, density in zip(self.times, densities):
time_value = np.full(self.Nbins, fill_value=time)
bins, hist = histogram(density, self.Nbins, self.phimax) # histogram of local densities with corresponding bins
hist = np.log10(hist)
histogram3D_time = np.transpose([time_value, bins, hist]).tolist()
self.histogram3D += histogram3D_time
self.philocmax += [max(histogram3D_time, key=lambda el: el[2])[1]]
self.histogram3D = np.transpose(self.histogram3D)
self.histogram3D[2][
self.histogram3D[2] < self.pphilocmin] = self.pphilocmin # set minimum histogram value as pphilocmin
self.ax.tricontourf(*self.histogram3D, self.contours,
cmap=self.cmap, norm=self.norm) # local density histogram
if peak: self.ax.plot(self.times, self.philocmax,
linestyle='--', color='red', linewidth=4) # most probable packing fraction line
# SCRIPT
if __name__ == '__main__': # executing as script
# VARIABLE DEFINITIONS
data_dir = get_env('DATA_DIRECTORY', default=getcwd()) # data directory
init_frame = get_env('INITIAL_FRAME', default=_init_frame, vartype=int) # frame to consider as initial
int_max = get_env('INTERVAL_MAXIMUM', default=_int_max, vartype=int) # maximum number of frames on which to calculate densities
box_size = get_env('BOX_SIZE', default=_box_size, vartype=float) # length of the square boxes in which particles are counted
parameters_file = get_env('PARAMETERS_FILE',
default=joinpath(data_dir, naming.parameters_file)) # simulation parameters file
with open(parameters_file, 'rb') as param_file:
parameters = pickle.load(param_file) # parameters hash table
prep_frames = ceil(parameters['prep_steps']/parameters['period_dump']) # number of preparation frames (FIRE energy minimisation)
Nentries = parameters['N_steps']//parameters['period_dump'] # number of time snapshots in unwrapped trajectory file
Nentries = get_env('FINAL_FRAME', default=Nentries, vartype=int) # final frame to consider
init_frame = int(Nentries/2) if init_frame < 0 else init_frame # initial frame
frames = list(OrderedDict.fromkeys(map(
int,
np.linspace(init_frame, Nentries - 1, int_max)
))) # linearly spaced frames at which to calculate the densities
Ncases = get_env('N_CASES', default=ceil(np.sqrt(parameters['N'])),
vartype=int) # number of boxes in each direction to compute the local density
# NAMING
attributes = {'density': parameters['density'],
'vzero': parameters['vzero'], 'dr': parameters['dr'],
'N': parameters['N'], 'init_frame': init_frame, 'int_max': int_max,
'fin_frame': Nentries, 'Ncases': Ncases, 'box_size': box_size} # attributes displayed in filenames
naming_varN = naming.VarN(final_frame='FINAL_FRAME' in envvar) # varN naming object
varN_filename, = naming_varN.filename(**attributes) # varN file name
# STANDARD OUTPUT
if 'SLURM_JOB_ID' in envvar: # script executed from Slurm job scheduler
slurm_output(joinpath(data_dir, 'out'), naming_varN, attributes)
# MODE SELECTION
if get_env('COMPUTE', default=False, vartype=bool): # COMPUTE mode
startTime = datetime.now()
# VARIABLE DEFINITIONS
wrap_file_name = get_env('WRAPPED_FILE',
default=joinpath(data_dir, naming.wrapped_trajectory_file)) # wrapped trajectory file (.gsd)
with open(wrap_file_name, 'rb') as wrap_file: # opens wrapped trajectory file
w_traj = Gsd(wrap_file, prep_frames=prep_frames) # wrapped trajectory object
densities = list(map(
lambda frame: density(w_traj, frame, Ncases, box_size),
frames)) # density lists at frames
# SAVING
with open(joinpath(data_dir, varN_filename), 'wb') as varN_dump_file:
pickle.dump(densities, varN_dump_file)
# EXECUTION TIME
print("Execution time: %s" % (datetime.now() - startTime))
if get_env('CHECK', default=False, vartype=bool): # CHECK mode
# DATA
with open(joinpath(data_dir, varN_filename), 'rb') as varN_dump_file:
densities = pickle.load(varN_dump_file)
# CHECK
mean_density = np.mean(densities)
difference = np.abs(mean_density - parameters['density'])
relative_difference = difference/parameters['density']
print('Parametrised packing fraction: %f' % parameters['density'])
print('Measured averaged packing fraction: %f' % mean_density)
print('Difference: %f' % difference)
print('Relative difference: %f' % relative_difference)
if get_env('PLOT', default=False, vartype=bool): # PLOT mode
# DATA
with open(joinpath(data_dir, varN_filename), 'rb') as varN_dump_file:
densities = pickle.load(varN_dump_file)
if get_env('PLOT', default=False, vartype=bool) or\
get_env('SHOW', default=False, vartype=bool): # PLOT or SHOW mode
# PLOT
Nbins = get_env('N_BINS', default=_Nbins, vartype=int) # number of bins for the histogram
phimax = get_env('PHIMAX', default=_phimax, vartype=float) # maximum local density for histogram
peak = get_env('PEAK', default=True, vartype=bool) # highlight highest peak of the histogram
suptitle = get_env('SUPTITLE', default=True, vartype=bool) # display suptitle
font_size = get_env('FONT_SIZE', default=_font_size, vartype=int) # plot font size
mpl.rcParams.update({'font.size': font_size})
mode = get_env('PLOT_MODE', default='mean') # histogram plot mode
if mode == 'mean':
plot = Plot(suptitle=suptitle)
plot.add_hist(*histogram(densities, Nbins, phimax), peak=peak)
if peak: plot.ax.legend() # display legend of highlighted peaks in histograms
elif mode == 'time':
pphilocmin = get_env('PPHILOCMIN',
default=_pphilocmin, vartype=float) # minimum local density probability
pphilocmax = get_env('PPHILOCMIN',
default=_pphilocmax, vartype=float) # maximum local density probability
contours = get_env('CONTOURS', default=_contours, vartype=int) # number of contour lines
plot = PlotTime(Nbins, phimax,
pphilocmin=pphilocmin, pphilocmax=pphilocmax,
contours=contours, suptitle=suptitle)
plot.plot(
parameters['period_dump']*parameters['time_step']
*np.array(frames),
densities, peak=peak)
else: raise ValueError('Mode %s is not known.' % mode) # mode is not known
# SAVING
if get_env('SAVE', default=False, vartype=bool): # SAVE mode
image_name, = naming_varN.image().filename(**attributes)
plot.fig.savefig(joinpath(data_dir, image_name))
# SHOW
if get_env('SHOW', default=False, vartype=bool): # SHOW mode
plt.show()
| [
"numpy.log10",
"numpy.sqrt",
"numpy.array",
"numpy.mean",
"numpy.max",
"numpy.linspace",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"numpy.abs",
"matplotlib.rcParams.update",
"matplotlib.use",
"pickle.load",
"numpy.argmax",
"active_particles.init.get_env",
"active_particles.maths.Histo... | [((3929, 3973), 'active_particles.init.get_env', 'get_env', (['"""SHOW"""'], {'default': '(False)', 'vartype': 'bool'}), "('SHOW', default=False, vartype=bool)\n", (3936, 3973), False, 'from active_particles.init import get_env, slurm_output\n'), ((3977, 3991), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (3984, 3991), True, 'import matplotlib as mpl\n'), ((5559, 5578), 'math.ceil', 'ceil', (['(box_size / dL)'], {}), '(box_size / dL)\n', (5563, 5578), False, 'from math import ceil\n'), ((5686, 5712), 'numpy.zeros', 'np.zeros', (['(Ncases, Ncases)'], {}), '((Ncases, Ncases))\n', (5694, 5712), True, 'import numpy as np\n'), ((7308, 7335), 'active_particles.maths.Histogram', 'Histogram', (['Nbins', '(0)', 'phimax'], {}), '(Nbins, 0, phimax)\n', (7317, 7335), False, 'from active_particles.maths import Histogram\n'), ((12334, 12392), 'active_particles.init.get_env', 'get_env', (['"""INITIAL_FRAME"""'], {'default': '_init_frame', 'vartype': 'int'}), "('INITIAL_FRAME', default=_init_frame, vartype=int)\n", (12341, 12392), False, 'from active_particles.init import get_env, slurm_output\n'), ((12438, 12496), 'active_particles.init.get_env', 'get_env', (['"""INTERVAL_MAXIMUM"""'], {'default': '_int_max', 'vartype': 'int'}), "('INTERVAL_MAXIMUM', default=_int_max, vartype=int)\n", (12445, 12496), False, 'from active_particles.init import get_env, slurm_output\n'), ((12572, 12625), 'active_particles.init.get_env', 'get_env', (['"""BOX_SIZE"""'], {'default': '_box_size', 'vartype': 'float'}), "('BOX_SIZE', default=_box_size, vartype=float)\n", (12579, 12625), False, 'from active_particles.init import get_env, slurm_output\n'), ((12965, 13023), 'math.ceil', 'ceil', (["(parameters['prep_steps'] / parameters['period_dump'])"], {}), "(parameters['prep_steps'] / parameters['period_dump'])\n", (12969, 13023), False, 'from math import ceil\n'), ((13218, 13271), 'active_particles.init.get_env', 'get_env', (['"""FINAL_FRAME"""'], {'default': 'Nentries', 'vartype': 'int'}), "('FINAL_FRAME', default=Nentries, vartype=int)\n", (13225, 13271), False, 'from active_particles.init import get_env, slurm_output\n'), ((14021, 14069), 'active_particles.naming.VarN', 'naming.VarN', ([], {'final_frame': "('FINAL_FRAME' in envvar)"}), "(final_frame='FINAL_FRAME' in envvar)\n", (14032, 14069), True, 'import active_particles.naming as naming\n'), ((14372, 14419), 'active_particles.init.get_env', 'get_env', (['"""COMPUTE"""'], {'default': '(False)', 'vartype': 'bool'}), "('COMPUTE', default=False, vartype=bool)\n", (14379, 14419), False, 'from active_particles.init import get_env, slurm_output\n'), ((15238, 15283), 'active_particles.init.get_env', 'get_env', (['"""CHECK"""'], {'default': '(False)', 'vartype': 'bool'}), "('CHECK', default=False, vartype=bool)\n", (15245, 15283), False, 'from active_particles.init import get_env, slurm_output\n'), ((15885, 15929), 'active_particles.init.get_env', 'get_env', (['"""PLOT"""'], {'default': '(False)', 'vartype': 'bool'}), "('PLOT', default=False, vartype=bool)\n", (15892, 15929), False, 'from active_particles.init import get_env, slurm_output\n'), ((6278, 6323), 'numpy.array', 'np.array', (['((position + L / 2) // dL)'], {'dtype': 'int'}), '((position + L / 2) // dL, dtype=int)\n', (6286, 6323), True, 'import numpy as np\n'), ((7637, 7651), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7649, 7651), True, 'import matplotlib.pyplot as plt\n'), ((9875, 9895), 'numpy.log10', 'np.log10', (['pphilocmin'], {}), '(pphilocmin)\n', (9883, 9895), True, 'import numpy as np\n'), ((9916, 9936), 'numpy.log10', 'np.log10', (['pphilocmax'], {}), '(pphilocmax)\n', (9924, 9936), True, 'import numpy as np\n'), ((9987, 10001), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9999, 10001), True, 'import matplotlib.pyplot as plt\n'), ((10112, 10172), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': 'self.pphilocmin', 'vmax': 'self.pphilocmax'}), '(vmin=self.pphilocmin, vmax=self.pphilocmax)\n', (10128, 10172), True, 'import matplotlib.colors as colors\n'), ((11736, 11766), 'numpy.transpose', 'np.transpose', (['self.histogram3D'], {}), '(self.histogram3D)\n', (11748, 11766), True, 'import numpy as np\n'), ((12895, 12918), 'pickle.load', 'pickle.load', (['param_file'], {}), '(param_file)\n', (12906, 12918), False, 'import pickle\n'), ((14457, 14471), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14469, 14471), False, 'from datetime import datetime\n'), ((15474, 15492), 'numpy.mean', 'np.mean', (['densities'], {}), '(densities)\n', (15481, 15492), True, 'import numpy as np\n'), ((15514, 15558), 'numpy.abs', 'np.abs', (["(mean_density - parameters['density'])"], {}), "(mean_density - parameters['density'])\n", (15520, 15558), True, 'import numpy as np\n'), ((16092, 16136), 'active_particles.init.get_env', 'get_env', (['"""PLOT"""'], {'default': '(False)', 'vartype': 'bool'}), "('PLOT', default=False, vartype=bool)\n", (16099, 16136), False, 'from active_particles.init import get_env, slurm_output\n'), ((16143, 16187), 'active_particles.init.get_env', 'get_env', (['"""SHOW"""'], {'default': '(False)', 'vartype': 'bool'}), "('SHOW', default=False, vartype=bool)\n", (16150, 16187), False, 'from active_particles.init import get_env, slurm_output\n'), ((16236, 16282), 'active_particles.init.get_env', 'get_env', (['"""N_BINS"""'], {'default': '_Nbins', 'vartype': 'int'}), "('N_BINS', default=_Nbins, vartype=int)\n", (16243, 16282), False, 'from active_particles.init import get_env, slurm_output\n'), ((16340, 16389), 'active_particles.init.get_env', 'get_env', (['"""PHIMAX"""'], {'default': '_phimax', 'vartype': 'float'}), "('PHIMAX', default=_phimax, vartype=float)\n", (16347, 16389), False, 'from active_particles.init import get_env, slurm_output\n'), ((16445, 16488), 'active_particles.init.get_env', 'get_env', (['"""PEAK"""'], {'default': '(True)', 'vartype': 'bool'}), "('PEAK', default=True, vartype=bool)\n", (16452, 16488), False, 'from active_particles.init import get_env, slurm_output\n'), ((16551, 16598), 'active_particles.init.get_env', 'get_env', (['"""SUPTITLE"""'], {'default': '(True)', 'vartype': 'bool'}), "('SUPTITLE', default=True, vartype=bool)\n", (16558, 16598), False, 'from active_particles.init import get_env, slurm_output\n'), ((16639, 16692), 'active_particles.init.get_env', 'get_env', (['"""FONT_SIZE"""'], {'default': '_font_size', 'vartype': 'int'}), "('FONT_SIZE', default=_font_size, vartype=int)\n", (16646, 16692), False, 'from active_particles.init import get_env, slurm_output\n'), ((16718, 16763), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (["{'font.size': font_size}"], {}), "({'font.size': font_size})\n", (16737, 16763), True, 'import matplotlib as mpl\n'), ((16780, 16816), 'active_particles.init.get_env', 'get_env', (['"""PLOT_MODE"""'], {'default': '"""mean"""'}), "('PLOT_MODE', default='mean')\n", (16787, 16816), False, 'from active_particles.init import get_env, slurm_output\n'), ((17813, 17857), 'active_particles.init.get_env', 'get_env', (['"""SAVE"""'], {'default': '(False)', 'vartype': 'bool'}), "('SAVE', default=False, vartype=bool)\n", (17820, 17857), False, 'from active_particles.init import get_env, slurm_output\n'), ((18017, 18061), 'active_particles.init.get_env', 'get_env', (['"""SHOW"""'], {'default': '(False)', 'vartype': 'bool'}), "('SHOW', default=False, vartype=bool)\n", (18024, 18061), False, 'from active_particles.init import get_env, slurm_output\n'), ((11355, 11391), 'numpy.full', 'np.full', (['self.Nbins'], {'fill_value': 'time'}), '(self.Nbins, fill_value=time)\n', (11362, 11391), True, 'import numpy as np\n'), ((11518, 11532), 'numpy.log10', 'np.log10', (['hist'], {}), '(hist)\n', (11526, 11532), True, 'import numpy as np\n'), ((12289, 12297), 'os.getcwd', 'getcwd', ([], {}), '()\n', (12295, 12297), False, 'from os import getcwd\n'), ((12749, 12791), 'os.path.join', 'joinpath', (['data_dir', 'naming.parameters_file'], {}), '(data_dir, naming.parameters_file)\n', (12757, 12791), True, 'from os.path import join as joinpath\n'), ((14290, 14315), 'os.path.join', 'joinpath', (['data_dir', '"""out"""'], {}), "(data_dir, 'out')\n", (14298, 14315), True, 'from os.path import join as joinpath\n'), ((14756, 14795), 'active_particles.dat.Gsd', 'Gsd', (['wrap_file'], {'prep_frames': 'prep_frames'}), '(wrap_file, prep_frames=prep_frames)\n', (14759, 14795), False, 'from active_particles.dat import Gsd\n'), ((15097, 15135), 'pickle.dump', 'pickle.dump', (['densities', 'varN_dump_file'], {}), '(densities, varN_dump_file)\n', (15108, 15135), False, 'import pickle\n'), ((15411, 15438), 'pickle.load', 'pickle.load', (['varN_dump_file'], {}), '(varN_dump_file)\n', (15422, 15438), False, 'import pickle\n'), ((16056, 16083), 'pickle.load', 'pickle.load', (['varN_dump_file'], {}), '(varN_dump_file)\n', (16067, 16083), False, 'import pickle\n'), ((18087, 18097), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18095, 18097), True, 'import matplotlib.pyplot as plt\n'), ((8553, 8565), 'numpy.max', 'np.max', (['hist'], {}), '(hist)\n', (8559, 8565), True, 'import numpy as np\n'), ((13436, 13482), 'numpy.linspace', 'np.linspace', (['init_frame', '(Nentries - 1)', 'int_max'], {}), '(init_frame, Nentries - 1, int_max)\n', (13447, 13482), True, 'import numpy as np\n'), ((13596, 13620), 'numpy.sqrt', 'np.sqrt', (["parameters['N']"], {}), "(parameters['N'])\n", (13603, 13620), True, 'import numpy as np\n'), ((14559, 14609), 'os.path.join', 'joinpath', (['data_dir', 'naming.wrapped_trajectory_file'], {}), '(data_dir, naming.wrapped_trajectory_file)\n', (14567, 14609), True, 'from os.path import join as joinpath\n'), ((15025, 15058), 'os.path.join', 'joinpath', (['data_dir', 'varN_filename'], {}), '(data_dir, varN_filename)\n', (15033, 15058), True, 'from os.path import join as joinpath\n'), ((15327, 15360), 'os.path.join', 'joinpath', (['data_dir', 'varN_filename'], {}), '(data_dir, varN_filename)\n', (15335, 15360), True, 'from os.path import join as joinpath\n'), ((15972, 16005), 'os.path.join', 'joinpath', (['data_dir', 'varN_filename'], {}), '(data_dir, varN_filename)\n', (15980, 16005), True, 'from os.path import join as joinpath\n'), ((17120, 17177), 'active_particles.init.get_env', 'get_env', (['"""PPHILOCMIN"""'], {'default': '_pphilocmin', 'vartype': 'float'}), "('PPHILOCMIN', default=_pphilocmin, vartype=float)\n", (17127, 17177), False, 'from active_particles.init import get_env, slurm_output\n'), ((17246, 17303), 'active_particles.init.get_env', 'get_env', (['"""PPHILOCMIN"""'], {'default': '_pphilocmax', 'vartype': 'float'}), "('PPHILOCMIN', default=_pphilocmax, vartype=float)\n", (17253, 17303), False, 'from active_particles.init import get_env, slurm_output\n'), ((17370, 17421), 'active_particles.init.get_env', 'get_env', (['"""CONTOURS"""'], {'default': '_contours', 'vartype': 'int'}), "('CONTOURS', default=_contours, vartype=int)\n", (17377, 17421), False, 'from active_particles.init import get_env, slurm_output\n'), ((17963, 17993), 'os.path.join', 'joinpath', (['data_dir', 'image_name'], {}), '(data_dir, image_name)\n', (17971, 17993), True, 'from os.path import join as joinpath\n'), ((6117, 6137), 'numpy.array', 'np.array', (['node_index'], {}), '(node_index)\n', (6125, 6137), True, 'import numpy as np\n'), ((8535, 8550), 'numpy.argmax', 'np.argmax', (['hist'], {}), '(hist)\n', (8544, 8550), True, 'import numpy as np\n'), ((10225, 10253), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['self.ax'], {}), '(self.ax)\n', (10244, 10253), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((11556, 11594), 'numpy.transpose', 'np.transpose', (['[time_value, bins, hist]'], {}), '([time_value, bins, hist])\n', (11568, 11594), True, 'import numpy as np\n'), ((15201, 15215), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15213, 15215), False, 'from datetime import datetime\n'), ((17661, 17677), 'numpy.array', 'np.array', (['frames'], {}), '(frames)\n', (17669, 17677), True, 'import numpy as np\n'), ((6549, 6567), 'numpy.array', 'np.array', (['[dx, dy]'], {}), '([dx, dy])\n', (6557, 6567), True, 'import numpy as np\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
# import scipy.io as
def frontalize(vertices):
canonical_vertices = np.load('Data/uv-data/canonical_vertices.npy')
vertices_homo = np.hstack((vertices, np.ones([vertices.shape[0], 1]))) #n x 4
P = np.linalg.lstsq(vertices_homo, canonical_vertices)[0].T # Affine matrix. 3 x 4
front_vertices = vertices_homo.dot(P.T)
return front_vertices
| [
"numpy.load",
"numpy.ones",
"numpy.linalg.lstsq"
] | [((703, 749), 'numpy.load', 'np.load', (['"""Data/uv-data/canonical_vertices.npy"""'], {}), "('Data/uv-data/canonical_vertices.npy')\n", (710, 749), True, 'import numpy as np\n'), ((792, 823), 'numpy.ones', 'np.ones', (['[vertices.shape[0], 1]'], {}), '([vertices.shape[0], 1])\n', (799, 823), True, 'import numpy as np\n'), ((842, 892), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['vertices_homo', 'canonical_vertices'], {}), '(vertices_homo, canonical_vertices)\n', (857, 892), True, 'import numpy as np\n')] |
''' UFL_AuxFunction.py
Auxiliary functions
Author: <NAME>
Date: 20.02.2015
Version: 1.0
'''
import numpy as np
import sys
def computeNumericalGradient(func, params, args=()):
'''
Computes numerical gradients of a function
'''
# Initialize numgrad with zeros
numgrad = np.zeros(np.shape(params));
eps = 0.0001;
for i in range(params.size):
theta_plus = params.copy();
theta_minus = params.copy();
theta_plus[i] = theta_plus[i] + eps;
theta_minus[i] = theta_minus[i] - eps;
f_plus = func(theta_plus, *args);
f_minus = func(theta_minus, *args);
numgrad[i] = (f_plus - f_minus) / (2*eps);
return numgrad.flatten()
def sigmoid(x):
'''
Computes sigmoid response
'''
return 1.0 / (1.0 + np.exp(-x));
def doUnbalancedMatrixOperation(x, y, operation, axis=1):
'''
Computes a given matrix operation between two operands where
the second operand needs to be filled (repeated) to the size of the
first operand
x : first operand
y : second operand
operation : operation, possible values ['add', 'sub', 'mul', 'div']
axis : axis of operation, possible values [0, 1]
'''
oplist = ['add', 'sub', 'mul', 'div'];
if operation not in oplist:
print ('ERROR: operation not recognized, permitted operations: ')
print (oplist)
sys.exit()
assert len(np.shape(x))==2, 'First operand must be two dimensional'
assert len(np.shape(y)) in [1,2], 'Second operand must be one or two dimensional'
assert axis in [0,1], 'Axis should be 0 or 1'
# Reshape the second operand as 2 dimensional vector
if len(np.shape(y))==1:
if axis==0:
resizearray = [1, len(y)]
else:
resizearray = [len(y), 1]
aux1 = np.resize(y, resizearray);
aux2 = np.repeat(aux1, np.shape(x)[axis], axis)
if operation==oplist[0]:
return x + aux1
elif operation==oplist[1]:
return x - aux1
elif operation==oplist[2]:
return x * aux1
elif operation==oplist[3]:
return x / aux1
def checkNetworkParameters(params, topology):
'''
Checks if the structure of given parameters is consistent with a given network
topology
Arguments
params : List of parameters. The length of list corresponds to layer size, contents of the list are the parameter matrices/vectors
topology : List of tuples. The length of list corresponds to layer size, contents of the list is the layer topology i.e. (input dims, output dims)
Returns
result : True if the parameter structure is consistent with the topology, false otherwise
'''
result = True;
# First check the number of layers
nLayers = len(topology);
result = result and len(params)==nLayers;
# Check the topology
for i in range(nLayers):
for j in range(len(np.shape(topology[i]))):
result = result and topology[i][j] == np.shape(params[i])[j];
return result | [
"numpy.exp",
"numpy.shape",
"numpy.resize",
"sys.exit"
] | [((1664, 1689), 'numpy.resize', 'np.resize', (['y', 'resizearray'], {}), '(y, resizearray)\n', (1673, 1689), True, 'import numpy as np\n'), ((292, 308), 'numpy.shape', 'np.shape', (['params'], {}), '(params)\n', (300, 308), True, 'import numpy as np\n'), ((1280, 1290), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1288, 1290), False, 'import sys\n'), ((728, 738), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (734, 738), True, 'import numpy as np\n'), ((1305, 1316), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (1313, 1316), True, 'import numpy as np\n'), ((1374, 1385), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (1382, 1385), True, 'import numpy as np\n'), ((1556, 1567), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (1564, 1567), True, 'import numpy as np\n'), ((1715, 1726), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (1723, 1726), True, 'import numpy as np\n'), ((2663, 2684), 'numpy.shape', 'np.shape', (['topology[i]'], {}), '(topology[i])\n', (2671, 2684), True, 'import numpy as np\n'), ((2729, 2748), 'numpy.shape', 'np.shape', (['params[i]'], {}), '(params[i])\n', (2737, 2748), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import flags
import math
import copy
sys.path.append("../")
from nmutant_model.model_operation import model_load, model_eval
from nmutant_data.data import get_data, get_shape
from nmutant_util.configs import path
FLAGS = flags.FLAGS
def ns(datasets, model_name, ration=0.1, threshold=0.9, batch_size=256, epoch=9):
tf.reset_default_graph()
X_train, Y_train, X_test, Y_test = get_data(datasets)
input_shape, nb_classes = get_shape(datasets)
sess, preds, x, y, model, feed_dict = model_load(datasets, model_name, epoch=epoch)
eval_params = {'batch_size': batch_size}
accuracy = model_eval(sess, x, y, preds, X_test, Y_test, args=eval_params, feed=feed_dict)
print('Test accuracy on legitimate test examples for original model: {0}'.format(accuracy))
for i in range(len(model.layers)):
layer = model.layers[i]
if "Conv2D" in layer.__class__.__name__:
unique_neurons_layer = layer.output_channels
shuffle_num = unique_neurons_layer * ration
if shuffle_num > 1.0:
shuffle_num = math.floor(shuffle_num) if shuffle_num > 2.0 else math.ceil(shuffle_num)
mutated_neurons = np.random.choice(unique_neurons_layer, int(shuffle_num), replace=False)
current_weights = sess.run(layer.kernels).transpose([3,0,1,2])
current_bias = sess.run(layer.b)
shuffle_neurons = copy.copy(mutated_neurons)
np.random.shuffle(shuffle_neurons)
current_weights[mutated_neurons] = current_weights[shuffle_neurons]
current_bias[mutated_neurons] = current_bias[shuffle_neurons]
update_weights = tf.assign(layer.kernels, current_weights.transpose([1,2,3,0]))
update_bias = tf.assign(layer.b, current_bias)
sess.run(update_weights)
sess.run(update_bias)
if "BN" in model.layers[i + 1].__class__.__name__:
layer = model.layers[i + 1]
current_gamma = sess.run(layer.gamma)
current_beta = sess.run(layer.beta)
current_moving_mean = sess.run(layer.moving_mean)
current_moving_variance = sess.run(layer.moving_variance)
current_gamma[mutated_neurons] = current_gamma[shuffle_neurons]
current_beta[mutated_neurons] = current_beta[shuffle_neurons]
current_moving_mean[mutated_neurons] = current_moving_mean[shuffle_neurons]
current_moving_variance[mutated_neurons] = current_moving_variance[shuffle_neurons]
update_gamma = tf.assign(layer.gamma, current_gamma)
update_beta = tf.assign(layer.beta, current_beta)
update_moving_mean = tf.assign(layer.moving_mean, current_moving_mean)
update_moving_variance = tf.assign(layer.moving_variance, current_moving_variance)
sess.run(update_gamma)
sess.run(update_beta)
sess.run(update_moving_mean)
sess.run(update_moving_variance)
elif "Linear" in layer.__class__.__name__ :
unique_neurons_layer = layer.num_hid
shuffle_num = unique_neurons_layer * ration
if shuffle_num > 1.0:
shuffle_num = math.floor(shuffle_num) if shuffle_num > 2.0 else math.ceil(shuffle_num)
mutated_neurons = np.random.choice(unique_neurons_layer, int(shuffle_num), replace=False)
current_weights = sess.run(layer.W).transpose([1,0])
current_bias = sess.run(layer.b)
shuffle_neurons = copy.copy(mutated_neurons)
np.random.shuffle(shuffle_neurons)
current_weights[mutated_neurons] = current_weights[shuffle_neurons]
current_bias[mutated_neurons] = current_bias[shuffle_neurons]
update_weights = tf.assign(layer.W, current_weights.transpose([1,0]))
update_bias = tf.assign(layer.b, current_bias)
sess.run(update_weights)
sess.run(update_bias)
mutated_accuracy = model_eval(sess, x, y, preds, X_test, Y_test, args=eval_params, feed=feed_dict)
print('Test accuracy on legitimate test examples for mutated model: {0}'.format(mutated_accuracy))
if mutated_accuracy >= threshold * accuracy:
train_dir = os.path.join(path.mu_model_path, 'ns', datasets + '_' + model_name, '0')
if not os.path.exists(train_dir):
os.makedirs(train_dir)
save_path = os.path.join(train_dir, datasets + '_' + model_name + '.model')
saver = tf.train.Saver()
saver.save(sess, save_path)
sess.close()
def main(argv=None):
ns(datasets=FLAGS.datasets,
model_name=FLAGS.model,
ration=FLAGS.ration,
threshold=FLAGS.threshold)
if __name__ == '__main__':
flags.DEFINE_string('datasets', 'mnist', 'The target datasets.')
flags.DEFINE_string('model', 'lenet5', 'The name of model.')
flags.DEFINE_float('ration', 0.1, 'The ration of mutated neurons.')
flags.DEFINE_float('threshold', 0.9, 'The threshold of accuacy compared with original.')
tf.app.run() | [
"os.path.exists",
"numpy.random.shuffle",
"tensorflow.reset_default_graph",
"math.ceil",
"os.makedirs",
"math.floor",
"tensorflow.python.platform.flags.DEFINE_string",
"tensorflow.train.Saver",
"os.path.join",
"nmutant_model.model_operation.model_eval",
"nmutant_data.data.get_data",
"tensorflo... | [((285, 307), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (300, 307), False, 'import sys\n'), ((571, 595), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (593, 595), True, 'import tensorflow as tf\n'), ((635, 653), 'nmutant_data.data.get_data', 'get_data', (['datasets'], {}), '(datasets)\n', (643, 653), False, 'from nmutant_data.data import get_data, get_shape\n'), ((684, 703), 'nmutant_data.data.get_shape', 'get_shape', (['datasets'], {}), '(datasets)\n', (693, 703), False, 'from nmutant_data.data import get_data, get_shape\n'), ((747, 792), 'nmutant_model.model_operation.model_load', 'model_load', (['datasets', 'model_name'], {'epoch': 'epoch'}), '(datasets, model_name, epoch=epoch)\n', (757, 792), False, 'from nmutant_model.model_operation import model_load, model_eval\n'), ((854, 933), 'nmutant_model.model_operation.model_eval', 'model_eval', (['sess', 'x', 'y', 'preds', 'X_test', 'Y_test'], {'args': 'eval_params', 'feed': 'feed_dict'}), '(sess, x, y, preds, X_test, Y_test, args=eval_params, feed=feed_dict)\n', (864, 933), False, 'from nmutant_model.model_operation import model_load, model_eval\n'), ((4458, 4537), 'nmutant_model.model_operation.model_eval', 'model_eval', (['sess', 'x', 'y', 'preds', 'X_test', 'Y_test'], {'args': 'eval_params', 'feed': 'feed_dict'}), '(sess, x, y, preds, X_test, Y_test, args=eval_params, feed=feed_dict)\n', (4468, 4537), False, 'from nmutant_model.model_operation import model_load, model_eval\n'), ((5214, 5278), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""datasets"""', '"""mnist"""', '"""The target datasets."""'], {}), "('datasets', 'mnist', 'The target datasets.')\n", (5233, 5278), False, 'from tensorflow.python.platform import flags\n'), ((5283, 5343), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""model"""', '"""lenet5"""', '"""The name of model."""'], {}), "('model', 'lenet5', 'The name of model.')\n", (5302, 5343), False, 'from tensorflow.python.platform import flags\n'), ((5348, 5415), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""ration"""', '(0.1)', '"""The ration of mutated neurons."""'], {}), "('ration', 0.1, 'The ration of mutated neurons.')\n", (5366, 5415), False, 'from tensorflow.python.platform import flags\n'), ((5420, 5512), 'tensorflow.python.platform.flags.DEFINE_float', 'flags.DEFINE_float', (['"""threshold"""', '(0.9)', '"""The threshold of accuacy compared with original."""'], {}), "('threshold', 0.9,\n 'The threshold of accuacy compared with original.')\n", (5438, 5512), False, 'from tensorflow.python.platform import flags\n'), ((5514, 5526), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (5524, 5526), True, 'import tensorflow as tf\n'), ((4711, 4783), 'os.path.join', 'os.path.join', (['path.mu_model_path', '"""ns"""', "(datasets + '_' + model_name)", '"""0"""'], {}), "(path.mu_model_path, 'ns', datasets + '_' + model_name, '0')\n", (4723, 4783), False, 'import os\n'), ((4881, 4944), 'os.path.join', 'os.path.join', (['train_dir', "(datasets + '_' + model_name + '.model')"], {}), "(train_dir, datasets + '_' + model_name + '.model')\n", (4893, 4944), False, 'import os\n'), ((4961, 4977), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4975, 4977), True, 'import tensorflow as tf\n'), ((4799, 4824), 'os.path.exists', 'os.path.exists', (['train_dir'], {}), '(train_dir)\n', (4813, 4824), False, 'import os\n'), ((4838, 4860), 'os.makedirs', 'os.makedirs', (['train_dir'], {}), '(train_dir)\n', (4849, 4860), False, 'import os\n'), ((1669, 1695), 'copy.copy', 'copy.copy', (['mutated_neurons'], {}), '(mutated_neurons)\n', (1678, 1695), False, 'import copy\n'), ((1712, 1746), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffle_neurons'], {}), '(shuffle_neurons)\n', (1729, 1746), True, 'import numpy as np\n'), ((2035, 2067), 'tensorflow.assign', 'tf.assign', (['layer.b', 'current_bias'], {}), '(layer.b, current_bias)\n', (2044, 2067), True, 'import tensorflow as tf\n'), ((1328, 1351), 'math.floor', 'math.floor', (['shuffle_num'], {}), '(shuffle_num)\n', (1338, 1351), False, 'import math\n'), ((1378, 1400), 'math.ceil', 'math.ceil', (['shuffle_num'], {}), '(shuffle_num)\n', (1387, 1400), False, 'import math\n'), ((2925, 2962), 'tensorflow.assign', 'tf.assign', (['layer.gamma', 'current_gamma'], {}), '(layer.gamma, current_gamma)\n', (2934, 2962), True, 'import tensorflow as tf\n'), ((2997, 3032), 'tensorflow.assign', 'tf.assign', (['layer.beta', 'current_beta'], {}), '(layer.beta, current_beta)\n', (3006, 3032), True, 'import tensorflow as tf\n'), ((3074, 3123), 'tensorflow.assign', 'tf.assign', (['layer.moving_mean', 'current_moving_mean'], {}), '(layer.moving_mean, current_moving_mean)\n', (3083, 3123), True, 'import tensorflow as tf\n'), ((3169, 3226), 'tensorflow.assign', 'tf.assign', (['layer.moving_variance', 'current_moving_variance'], {}), '(layer.moving_variance, current_moving_variance)\n', (3178, 3226), True, 'import tensorflow as tf\n'), ((3966, 3992), 'copy.copy', 'copy.copy', (['mutated_neurons'], {}), '(mutated_neurons)\n', (3975, 3992), False, 'import copy\n'), ((4009, 4043), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffle_neurons'], {}), '(shuffle_neurons)\n', (4026, 4043), True, 'import numpy as np\n'), ((4322, 4354), 'tensorflow.assign', 'tf.assign', (['layer.b', 'current_bias'], {}), '(layer.b, current_bias)\n', (4331, 4354), True, 'import tensorflow as tf\n'), ((3635, 3658), 'math.floor', 'math.floor', (['shuffle_num'], {}), '(shuffle_num)\n', (3645, 3658), False, 'import math\n'), ((3685, 3707), 'math.ceil', 'math.ceil', (['shuffle_num'], {}), '(shuffle_num)\n', (3694, 3707), False, 'import math\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri May 1 20:05:53 2015
@author: Ziang
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import TruncatedSVD
from GPSVI.core.GPClassifier import GPClassifier
np.random.seed(0)
data = datasets.load_digits()
xTr, xTe, yTr, yTe = train_test_split(data.data, data.target, test_size=0.50)
fig = plt.figure('Test Digits')
svd = TruncatedSVD(algorithm='randomized', n_components=3, tol=0.0)
svd.fit(xTr)
x = svd.transform(xTe)
ax = fig.add_subplot(121, projection='3d')
ax.scatter(x[:,0], x[:,1], x[:,2], c=yTe, cmap=matplotlib.cm.rainbow)
clf = GPClassifier(xTr, yTr, \
alpha=0.05, max_iter=300, num_inducing_points=200, \
kernel_type='rbf', kernel_args={'gamma':0.01}, \
learning_rate=0.01, verbose=2)
clf.fit()
pd = clf.predict(xTe)
gpsvi_score = clf.score(xTe, yTe)
print(gpsvi_score)
ax = fig.add_subplot(122, projection='3d')
ax.scatter(x[:,0], x[:,1], x[:,2], c=pd, cmap=matplotlib.cm.rainbow)
#clf_lr = LogisticRegression()
#clf_lr.fit(xTr, yTr)
#lr_score = clf_lr.score(xTe, yTe)
#print(lr_score) | [
"GPSVI.core.GPClassifier.GPClassifier",
"sklearn.decomposition.TruncatedSVD",
"sklearn.datasets.load_digits",
"matplotlib.pyplot.figure",
"numpy.random.seed",
"sklearn.cross_validation.train_test_split"
] | [((374, 391), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (388, 391), True, 'import numpy as np\n'), ((399, 421), 'sklearn.datasets.load_digits', 'datasets.load_digits', ([], {}), '()\n', (419, 421), False, 'from sklearn import datasets\n'), ((443, 498), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['data.data', 'data.target'], {'test_size': '(0.5)'}), '(data.data, data.target, test_size=0.5)\n', (459, 498), False, 'from sklearn.cross_validation import train_test_split\n'), ((507, 532), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Test Digits"""'], {}), "('Test Digits')\n", (517, 532), True, 'import matplotlib.pyplot as plt\n'), ((540, 601), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'algorithm': '"""randomized"""', 'n_components': '(3)', 'tol': '(0.0)'}), "(algorithm='randomized', n_components=3, tol=0.0)\n", (552, 601), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((760, 920), 'GPSVI.core.GPClassifier.GPClassifier', 'GPClassifier', (['xTr', 'yTr'], {'alpha': '(0.05)', 'max_iter': '(300)', 'num_inducing_points': '(200)', 'kernel_type': '"""rbf"""', 'kernel_args': "{'gamma': 0.01}", 'learning_rate': '(0.01)', 'verbose': '(2)'}), "(xTr, yTr, alpha=0.05, max_iter=300, num_inducing_points=200,\n kernel_type='rbf', kernel_args={'gamma': 0.01}, learning_rate=0.01,\n verbose=2)\n", (772, 920), False, 'from GPSVI.core.GPClassifier import GPClassifier\n')] |
import logging
import cmath
import numpy as np
LG = logging.getLogger('pyd.hp')
def unit_vectors(thetamax=7.5, ngrid=32):
thetamax = np.radians(thetamax)
pls = np.linspace(0, 2*np.pi, ngrid)
tls = np.linspace(0, thetamax, ngrid)
P, T = np.meshgrid(pls, tls)
# Raux = np.hypot(P, T - np.pi / 2)
# radial unit vec
e_r = np.zeros((3,) + P.shape)
e_r[0, :, :] = np.sin(T)*np.cos(P)
e_r[1, :, :] = np.sin(T)*np.sin(P)
e_r[2, :, :] = np.cos(T)
# theta unit vec
e_t = np.zeros((3,) + P.shape)
e_t[0, :, :] = np.cos(T)*np.cos(P)
e_t[1, :, :] = np.cos(T)*np.sin(P)
e_t[2, :, :] = -np.sin(T)
# phi unit vec
e_p = np.zeros((3,) + P.shape)
e_p[0, :, :] = -np.sin(P)
e_p[1, :, :] = np.cos(P)
return P, T, (e_r, e_t, e_p)
def gen_r(ngrid, reval, onsphere, thetamax=None, rmax=None, xslice=False):
# TODO return tx and ty if onsphere=False
nfirst = 1 if xslice else ngrid
r = np.empty((nfirst, ngrid, 3))
if onsphere:
assert thetamax is not None
if xslice:
tlsp = np.radians(np.linspace(-thetamax, thetamax, ngrid))
r[0, :, 0] = reval * np.sin(tlsp)
r[0, :, 1] = 0
r[0, :, 2] = reval * np.cos(tlsp)
else:
P, T, (e_r, e_t, e_p) = unit_vectors(thetamax=thetamax,
ngrid=ngrid)
r[:, :, 0] = reval * e_r[0, :, :]
r[:, :, 1] = reval * e_r[1, :, :]
r[:, :, 2] = reval * e_r[2, :, :]
else:
if thetamax is not None:
rmax = np.tan(np.radians(thetamax)) * reval
assert rmax is not None
LG.info(" %s deg", np.degrees(cmath.phase(reval + 1j*np.sqrt(2)*rmax)))
rng = np.linspace(-rmax, rmax, ngrid)
X, Y = np.meshgrid(rng, rng)
r[:, :, 0] = X
r[:, :, 1] = Y
r[:, :, 2] = reval
LG.debug("onsphere: %s\tfirst r vec %s", onsphere, r[0, 0, :])
if onsphere:
if xslice:
return tlsp, r
else:
return T, P, r
else:
if xslice:
raise NotImplementedError
return X, Y, r
| [
"logging.getLogger",
"numpy.radians",
"numpy.sqrt",
"numpy.linspace",
"numpy.zeros",
"numpy.empty",
"numpy.cos",
"numpy.sin",
"numpy.meshgrid"
] | [((54, 81), 'logging.getLogger', 'logging.getLogger', (['"""pyd.hp"""'], {}), "('pyd.hp')\n", (71, 81), False, 'import logging\n'), ((141, 161), 'numpy.radians', 'np.radians', (['thetamax'], {}), '(thetamax)\n', (151, 161), True, 'import numpy as np\n'), ((172, 204), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'ngrid'], {}), '(0, 2 * np.pi, ngrid)\n', (183, 204), True, 'import numpy as np\n'), ((213, 244), 'numpy.linspace', 'np.linspace', (['(0)', 'thetamax', 'ngrid'], {}), '(0, thetamax, ngrid)\n', (224, 244), True, 'import numpy as np\n'), ((256, 277), 'numpy.meshgrid', 'np.meshgrid', (['pls', 'tls'], {}), '(pls, tls)\n', (267, 277), True, 'import numpy as np\n'), ((352, 376), 'numpy.zeros', 'np.zeros', (['((3,) + P.shape)'], {}), '((3,) + P.shape)\n', (360, 376), True, 'import numpy as np\n'), ((474, 483), 'numpy.cos', 'np.cos', (['T'], {}), '(T)\n', (480, 483), True, 'import numpy as np\n'), ((516, 540), 'numpy.zeros', 'np.zeros', (['((3,) + P.shape)'], {}), '((3,) + P.shape)\n', (524, 540), True, 'import numpy as np\n'), ((679, 703), 'numpy.zeros', 'np.zeros', (['((3,) + P.shape)'], {}), '((3,) + P.shape)\n', (687, 703), True, 'import numpy as np\n'), ((753, 762), 'numpy.cos', 'np.cos', (['P'], {}), '(P)\n', (759, 762), True, 'import numpy as np\n'), ((965, 993), 'numpy.empty', 'np.empty', (['(nfirst, ngrid, 3)'], {}), '((nfirst, ngrid, 3))\n', (973, 993), True, 'import numpy as np\n'), ((396, 405), 'numpy.sin', 'np.sin', (['T'], {}), '(T)\n', (402, 405), True, 'import numpy as np\n'), ((406, 415), 'numpy.cos', 'np.cos', (['P'], {}), '(P)\n', (412, 415), True, 'import numpy as np\n'), ((435, 444), 'numpy.sin', 'np.sin', (['T'], {}), '(T)\n', (441, 444), True, 'import numpy as np\n'), ((445, 454), 'numpy.sin', 'np.sin', (['P'], {}), '(P)\n', (451, 454), True, 'import numpy as np\n'), ((560, 569), 'numpy.cos', 'np.cos', (['T'], {}), '(T)\n', (566, 569), True, 'import numpy as np\n'), ((570, 579), 'numpy.cos', 'np.cos', (['P'], {}), '(P)\n', (576, 579), True, 'import numpy as np\n'), ((599, 608), 'numpy.cos', 'np.cos', (['T'], {}), '(T)\n', (605, 608), True, 'import numpy as np\n'), ((609, 618), 'numpy.sin', 'np.sin', (['P'], {}), '(P)\n', (615, 618), True, 'import numpy as np\n'), ((639, 648), 'numpy.sin', 'np.sin', (['T'], {}), '(T)\n', (645, 648), True, 'import numpy as np\n'), ((724, 733), 'numpy.sin', 'np.sin', (['P'], {}), '(P)\n', (730, 733), True, 'import numpy as np\n'), ((1763, 1794), 'numpy.linspace', 'np.linspace', (['(-rmax)', 'rmax', 'ngrid'], {}), '(-rmax, rmax, ngrid)\n', (1774, 1794), True, 'import numpy as np\n'), ((1810, 1831), 'numpy.meshgrid', 'np.meshgrid', (['rng', 'rng'], {}), '(rng, rng)\n', (1821, 1831), True, 'import numpy as np\n'), ((1096, 1135), 'numpy.linspace', 'np.linspace', (['(-thetamax)', 'thetamax', 'ngrid'], {}), '(-thetamax, thetamax, ngrid)\n', (1107, 1135), True, 'import numpy as np\n'), ((1170, 1182), 'numpy.sin', 'np.sin', (['tlsp'], {}), '(tlsp)\n', (1176, 1182), True, 'import numpy as np\n'), ((1243, 1255), 'numpy.cos', 'np.cos', (['tlsp'], {}), '(tlsp)\n', (1249, 1255), True, 'import numpy as np\n'), ((1607, 1627), 'numpy.radians', 'np.radians', (['thetamax'], {}), '(thetamax)\n', (1617, 1627), True, 'import numpy as np\n'), ((1730, 1740), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1737, 1740), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import random
import numpy as np
import pandas as pd
from pandas.compat import lrange
from pandas.api.types import CategoricalDtype
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range, NaT, IntervalIndex, Categorical)
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSorting(TestData):
def test_sort_values(self):
frame = DataFrame([[1, 1, 2], [3, 1, 0], [4, 5, 6]],
index=[1, 2, 3], columns=list('ABC'))
# by column (axis=0)
sorted_df = frame.sort_values(by='A')
indexer = frame['A'].argsort().values
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=['B', 'C'])
expected = frame.loc[[2, 1, 3]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=['B', 'C'], ascending=False)
assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=['B', 'A'], ascending=[True, False])
assert_frame_equal(sorted_df, expected)
pytest.raises(ValueError, lambda: frame.sort_values(
by=['A', 'B'], axis=2, inplace=True))
# by row (axis=1): GH 10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis='columns')
expected = frame.reindex(columns=['B', 'A', 'C'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1,
ascending=[True, False])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with tm.assert_raises_regex(ValueError, msg):
frame.sort_values(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_values_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
sorted_df.sort_values(by='A', inplace=True)
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=1, axis=1, inplace=True)
expected = frame.sort_values(by=1, axis=1)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by='A', ascending=False, inplace=True)
expected = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=['A', 'B'], ascending=False, inplace=True)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_nan(self):
# GH3917
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# sort one column only
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A'], na_position='first')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A'], na_position='first', ascending=False)
assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=['B', 'A'])
sorted_df = df.sort_values(by=1, axis=1, na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{'A': [1, 1, 2, 4, 6, 8, nan],
'B': [2, 9, nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2])
sorted_df = df.sort_values(['A', 'B'])
assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 2, 9, nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], ascending=[
1, 0], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{'A': [8, 6, 4, 2, 1, 1, nan],
'B': [4, 5, 5, nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2])
sorted_df = df.sort_values(['A', 'B'], ascending=[
0, 1], na_position='last')
assert_frame_equal(sorted_df, expected)
# Test DataFrame with nan label
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(
kind='quicksort', ascending=True, na_position='last')
expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position='first')
expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],
'B': [5, 9, nan, 5, 2, 5, 4]},
index=[nan, 1, 2, 3, 4, 5, 6])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind='quicksort', ascending=False)
expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],
'B': [4, 5, 2, 5, nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind='quicksort', ascending=False, na_position='first')
expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],
'B': [5, 4, 5, 2, 5, nan, 9]},
index=[nan, 6, 5, 4, 3, 2, 1])
assert_frame_equal(sorted_df, expected)
def test_stable_descending_sort(self):
# GH #6399
df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],
columns=['sort_col', 'order'])
sorted_df = df.sort_values(by='sort_col', kind='mergesort',
ascending=False)
assert_frame_equal(df, sorted_df)
def test_stable_descending_multicolumn_sort(self):
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# test stable mergesort
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 1],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 0],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_stable_categorial(self):
# GH 16793
df = DataFrame({
'x': pd.Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)
})
expected = df.copy()
sorted_df = df.sort_values('x', kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_sort_datetimes(self):
# GH 3461, argsort / lexsort differences for a datetime column
df = DataFrame(['a', 'a', 'a', 'b', 'c', 'd', 'e', 'f', 'g'],
columns=['A'],
index=date_range('20130101', periods=9))
dts = [Timestamp(x)
for x in ['2004-02-11', '2004-01-21', '2004-01-26',
'2005-09-20', '2010-10-04', '2009-05-12',
'2008-11-12', '2010-09-28', '2010-09-28']]
df['B'] = dts[::2] + dts[1::2]
df['C'] = 2.
df['A1'] = 3.
df1 = df.sort_values(by='A')
df2 = df.sort_values(by=['A'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['B'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['C', 'B'])
assert_frame_equal(df1, df2)
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
with tm.assert_raises_regex(ValueError, "This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_nat_values_in_int_column(self):
# GH 14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT))
float_values = (2.0, -1.797693e308)
df = DataFrame(dict(int=int_values, float=float_values),
columns=["int", "float"])
df_reversed = DataFrame(dict(int=int_values[::-1],
float=float_values[::-1]),
columns=["int", "float"],
index=[1, 0])
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(dict(datetime=[Timestamp("2016-01-01"), NaT],
float=float_values), columns=["datetime", "float"])
df_reversed = DataFrame(dict(datetime=[NaT, Timestamp("2016-01-01")],
float=float_values[::-1]),
columns=["datetime", "float"],
index=[1, 0])
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ['2016-01-01', '2015-01-01',
np.nan, '2016-01-01']]
d2 = [Timestamp(x) for x in ['2017-01-01', '2014-01-01',
'2016-01-01', '2015-01-01']]
df = pd.DataFrame({'a': d1, 'b': d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ['2015-01-01', '2016-01-01',
'2016-01-01', np.nan]]
d4 = [Timestamp(x) for x in ['2014-01-01', '2015-01-01',
'2017-01-01', '2016-01-01']]
expected = pd.DataFrame({'a': d3, 'b': d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=['a', 'b'], )
tm.assert_frame_equal(sorted_df, expected)
class TestDataFrameSortIndexKinds(TestData):
def test_sort_index_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'])
result = frame.sort_values(by=['A', 'B'])
indexer = np.lexsort((frame['B'], frame['A']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'], ascending=False)
result = frame.sort_values(by=['A', 'B'], ascending=False)
indexer = np.lexsort((frame['B'].rank(ascending=False),
frame['A'].rank(ascending=False)))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['B', 'A'])
result = frame.sort_values(by=['B', 'A'])
indexer = np.lexsort((frame['A'], frame['B']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.loc[[3, 2, 4, 1]]
a_id = id(unordered['A'])
df = unordered.copy()
df.sort_index(inplace=True)
expected = frame
assert_frame_equal(df, expected)
assert a_id != id(df['A'])
df = unordered.copy()
df.sort_index(ascending=False, inplace=True)
expected = frame[::-1]
assert_frame_equal(df, expected)
# axis=1
unordered = frame.loc[:, ['D', 'B', 'C', 'A']]
df = unordered.copy()
df.sort_index(axis=1, inplace=True)
expected = frame
assert_frame_equal(df, expected)
df = unordered.copy()
df.sort_index(axis=1, ascending=False, inplace=True)
expected = frame.iloc[:, ::-1]
assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['A', 'B'], ascending=[1, 0])
result = df.sort_values(by=['A', 'B'], ascending=[1, 0])
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
assert_frame_equal(result, expected)
# test with multiindex, too
idf = df.set_index(['A', 'B'])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
assert_frame_equal(result, expected)
# also, Series!
result = idf['C'].sort_index(ascending=[1, 0])
assert_series_equal(result, expected['C'])
def test_sort_index_duplicates(self):
# with 9816, these are all translated to .sort_values
df = DataFrame([lrange(5, 9), lrange(4)],
columns=['a', 'a', 'b', 'b'])
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with tm.assert_raises_regex(ValueError, 'not unique'):
df.sort_values(by='a')
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['a'])
with tm.assert_raises_regex(ValueError, 'not unique'):
df.sort_values(by=['a'])
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
# multi-column 'by' is separate codepath
df.sort_index(by=['a', 'b'])
with tm.assert_raises_regex(ValueError, 'not unique'):
# multi-column 'by' is separate codepath
df.sort_values(by=['a', 'b'])
# with multi-index
# GH4370
df = DataFrame(np.random.randn(4, 2),
columns=MultiIndex.from_tuples([('a', 0), ('a', 1)]))
with tm.assert_raises_regex(ValueError, 'level'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with tm.assert_raises_regex(ValueError, 'level'):
df.sort_values(by='a')
# convert tuples to a list of tuples
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=[('a', 1)])
expected = df.sort_values(by=[('a', 1)])
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=('a', 1))
result = df.sort_values(by=('a', 1))
assert_frame_equal(result, expected)
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
res = df.sort_index(level='A', sort_remaining=False)
assert_frame_equal(df, res)
res = df.sort_index(level=['A', 'B'], sort_remaining=False)
assert_frame_equal(df, res)
def test_sort_index_categorical_index(self):
df = (DataFrame({'A': np.arange(6, dtype='int64'),
'B': Series(list('aabbca'))
.astype(CategoricalDtype(list('cab')))})
.set_index('B'))
result = df.sort_index()
expected = df.iloc[[4, 0, 1, 5, 2, 3]]
assert_frame_equal(result, expected)
result = df.sort_index(ascending=False)
expected = df.iloc[[3, 2, 5, 1, 0, 4]]
assert_frame_equal(result, expected)
def test_sort_index(self):
# GH13496
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0 : sort rows by index labels
unordered = frame.loc[[3, 2, 4, 1]]
result = unordered.sort_index(axis=0)
expected = frame
assert_frame_equal(result, expected)
result = unordered.sort_index(ascending=False)
expected = frame[::-1]
assert_frame_equal(result, expected)
# axis=1 : sort columns by column names
unordered = frame.iloc[:, [2, 1, 3, 0]]
result = unordered.sort_index(axis=1)
assert_frame_equal(result, frame)
result = unordered.sort_index(axis=1, ascending=False)
expected = frame.iloc[:, ::-1]
assert_frame_equal(result, expected)
@pytest.mark.parametrize("level", ['A', 0]) # GH 21052
def test_sort_index_multiindex(self, level):
# GH13496
# sort rows by specified level of multi-index
mi = MultiIndex.from_tuples([
[2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi)
expected_mi = MultiIndex.from_tuples([
[1, 1, 1],
[2, 1, 2],
[2, 1, 3]], names=list('ABC'))
expected = pd.DataFrame([
[5, 6],
[3, 4],
[1, 2]], index=expected_mi)
result = df.sort_index(level=level)
assert_frame_equal(result, expected)
# sort_remaining=False
expected_mi = MultiIndex.from_tuples([
[1, 1, 1],
[2, 1, 3],
[2, 1, 2]], names=list('ABC'))
expected = pd.DataFrame([
[5, 6],
[1, 2],
[3, 4]], index=expected_mi)
result = df.sort_index(level=level, sort_remaining=False)
assert_frame_equal(result, expected)
def test_sort_index_intervalindex(self):
# this is a de-facto sort via unstack
# confirming that we sort in the order of the bins
y = Series(np.random.randn(100))
x1 = Series(np.sign(np.random.randn(100)))
x2 = pd.cut(Series(np.random.randn(100)),
bins=[-3, -0.5, 0, 0.5, 3])
model = pd.concat([y, x1, x2], axis=1, keys=['Y', 'X1', 'X2'])
result = model.groupby(['X1', 'X2'], observed=True).mean().unstack()
expected = IntervalIndex.from_tuples(
[(-3.0, -0.5), (-0.5, 0.0),
(0.0, 0.5), (0.5, 3.0)],
closed='right')
result = result.columns.levels[1].categories
tm.assert_index_equal(result, expected)
def test_sort_index_na_position_with_categories(self):
# GH 22556
# Positioning missing value properly when column is Categorical.
categories = ['A', 'B', 'C']
category_indices = [0, 2, 4]
list_of_nans = [np.nan, np.nan]
na_indices = [1, 3]
na_position_first = 'first'
na_position_last = 'last'
column_name = 'c'
reversed_categories = sorted(categories, reverse=True)
reversed_category_indices = sorted(category_indices, reverse=True)
reversed_na_indices = sorted(na_indices, reverse=True)
df = pd.DataFrame({
column_name: pd.Categorical(['A', np.nan, 'B', np.nan, 'C'],
categories=categories,
ordered=True)})
# sort ascending with na first
result = df.sort_values(by=column_name,
ascending=True,
na_position=na_position_first)
expected = DataFrame({
column_name: Categorical(list_of_nans + categories,
categories=categories,
ordered=True)
}, index=na_indices + category_indices)
assert_frame_equal(result, expected)
# sort ascending with na last
result = df.sort_values(by=column_name,
ascending=True,
na_position=na_position_last)
expected = DataFrame({
column_name: Categorical(categories + list_of_nans,
categories=categories,
ordered=True)
}, index=category_indices + na_indices)
assert_frame_equal(result, expected)
# sort descending with na first
result = df.sort_values(by=column_name,
ascending=False,
na_position=na_position_first)
expected = DataFrame({
column_name: Categorical(list_of_nans + reversed_categories,
categories=categories,
ordered=True)
}, index=reversed_na_indices + reversed_category_indices)
assert_frame_equal(result, expected)
# sort descending with na last
result = df.sort_values(by=column_name,
ascending=False,
na_position=na_position_last)
expected = DataFrame({
column_name: Categorical(reversed_categories + list_of_nans,
categories=categories,
ordered=True)
}, index=reversed_category_indices + reversed_na_indices)
assert_frame_equal(result, expected)
def test_sort_index_na_position_with_categories_raises(self):
df = pd.DataFrame({
'c': pd.Categorical(['A', np.nan, 'B', np.nan, 'C'],
categories=['A', 'B', 'C'],
ordered=True)})
with pytest.raises(ValueError):
df.sort_values(by='c',
ascending=False,
na_position='bad_position')
| [
"pandas.util.testing.assert_raises_regex",
"pandas.MultiIndex.from_tuples",
"pandas.date_range",
"numpy.arange",
"pandas.util.testing.assert_frame_equal",
"numpy.repeat",
"pandas.Categorical",
"pandas.DataFrame",
"pandas.util.testing.assert_produces_warning",
"pandas.compat.lrange",
"numpy.rando... | [((20813, 20855), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""level"""', "['A', 0]"], {}), "('level', ['A', 0])\n", (20836, 20855), False, 'import pytest\n'), ((872, 911), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (890, 911), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((1067, 1106), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (1085, 1106), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((1179, 1218), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (1197, 1218), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((1312, 1351), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (1330, 1351), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((1477, 1516), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (1495, 1516), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((1596, 1641), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected[::-1]'], {}), '(sorted_df, expected[::-1])\n', (1614, 1641), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((1729, 1768), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (1747, 1768), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((2003, 2042), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (2021, 2042), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((2179, 2218), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (2197, 2218), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((2351, 2390), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (2369, 2390), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((2520, 2559), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (2538, 2559), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((2701, 2740), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (2719, 2740), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((3241, 3280), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (3259, 3280), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((3432, 3471), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (3450, 3471), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((3645, 3684), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (3663, 3684), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((3872, 3911), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (3890, 3911), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((3993, 4064), 'pandas.DataFrame', 'DataFrame', (["{'A': [1, 2, nan, 1, 6, 8, 4], 'B': [9, nan, 5, 2, 5, 4, 5]}"], {}), "({'A': [1, 2, nan, 1, 6, 8, 4], 'B': [9, nan, 5, 2, 5, 4, 5]})\n", (4002, 4064), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((4140, 4244), 'pandas.DataFrame', 'DataFrame', (["{'A': [nan, 1, 1, 2, 4, 6, 8], 'B': [5, 9, 2, nan, 5, 5, 4]}"], {'index': '[2, 0, 3, 1, 6, 4, 5]'}), "({'A': [nan, 1, 1, 2, 4, 6, 8], 'B': [5, 9, 2, nan, 5, 5, 4]},\n index=[2, 0, 3, 1, 6, 4, 5])\n", (4149, 4244), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((4350, 4389), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (4368, 4389), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((4410, 4514), 'pandas.DataFrame', 'DataFrame', (["{'A': [nan, 8, 6, 4, 2, 1, 1], 'B': [5, 4, 5, 5, nan, 9, 2]}"], {'index': '[2, 5, 4, 6, 1, 0, 3]'}), "({'A': [nan, 8, 6, 4, 2, 1, 1], 'B': [5, 4, 5, 5, nan, 9, 2]},\n index=[2, 5, 4, 6, 1, 0, 3])\n", (4419, 4514), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((4637, 4676), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (4655, 4676), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((4806, 4845), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (4824, 4845), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((4902, 5006), 'pandas.DataFrame', 'DataFrame', (["{'A': [1, 1, 2, 4, 6, 8, nan], 'B': [2, 9, nan, 5, 5, 4, 5]}"], {'index': '[3, 0, 1, 6, 4, 5, 2]'}), "({'A': [1, 1, 2, 4, 6, 8, nan], 'B': [2, 9, nan, 5, 5, 4, 5]},\n index=[3, 0, 1, 6, 4, 5, 2])\n", (4911, 5006), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((5096, 5135), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (5114, 5135), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((5193, 5297), 'pandas.DataFrame', 'DataFrame', (["{'A': [nan, 1, 1, 2, 4, 6, 8], 'B': [5, 2, 9, nan, 5, 5, 4]}"], {'index': '[2, 3, 0, 1, 6, 4, 5]'}), "({'A': [nan, 1, 1, 2, 4, 6, 8], 'B': [5, 2, 9, nan, 5, 5, 4]},\n index=[2, 3, 0, 1, 6, 4, 5])\n", (5202, 5297), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((5408, 5447), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (5426, 5447), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((5509, 5613), 'pandas.DataFrame', 'DataFrame', (["{'A': [nan, 1, 1, 2, 4, 6, 8], 'B': [5, 9, 2, nan, 5, 5, 4]}"], {'index': '[2, 0, 3, 1, 6, 4, 5]'}), "({'A': [nan, 1, 1, 2, 4, 6, 8], 'B': [5, 9, 2, nan, 5, 5, 4]},\n index=[2, 0, 3, 1, 6, 4, 5])\n", (5518, 5613), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((5755, 5794), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (5773, 5794), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((5855, 5959), 'pandas.DataFrame', 'DataFrame', (["{'A': [8, 6, 4, 2, 1, 1, nan], 'B': [4, 5, 5, nan, 2, 9, 5]}"], {'index': '[5, 4, 6, 1, 3, 0, 2]'}), "({'A': [8, 6, 4, 2, 1, 1, nan], 'B': [4, 5, 5, nan, 2, 9, 5]},\n index=[5, 4, 6, 1, 3, 0, 2])\n", (5864, 5959), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((6100, 6139), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (6118, 6139), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((6194, 6300), 'pandas.DataFrame', 'DataFrame', (["{'A': [1, 2, nan, 1, 6, 8, 4], 'B': [9, nan, 5, 2, 5, 4, 5]}"], {'index': '[1, 2, 3, 4, 5, 6, nan]'}), "({'A': [1, 2, nan, 1, 6, 8, 4], 'B': [9, nan, 5, 2, 5, 4, 5]},\n index=[1, 2, 3, 4, 5, 6, nan])\n", (6203, 6300), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((6521, 6627), 'pandas.DataFrame', 'DataFrame', (["{'A': [1, 2, nan, 1, 6, 8, 4], 'B': [9, nan, 5, 2, 5, 4, 5]}"], {'index': '[1, 2, 3, 4, 5, 6, nan]'}), "({'A': [1, 2, nan, 1, 6, 8, 4], 'B': [9, nan, 5, 2, 5, 4, 5]},\n index=[1, 2, 3, 4, 5, 6, nan])\n", (6530, 6627), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((6691, 6730), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (6709, 6730), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((6863, 6969), 'pandas.DataFrame', 'DataFrame', (["{'A': [4, 1, 2, nan, 1, 6, 8], 'B': [5, 9, nan, 5, 2, 5, 4]}"], {'index': '[nan, 1, 2, 3, 4, 5, 6]'}), "({'A': [4, 1, 2, nan, 1, 6, 8], 'B': [5, 9, nan, 5, 2, 5, 4]},\n index=[nan, 1, 2, 3, 4, 5, 6])\n", (6872, 6969), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((7033, 7072), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (7051, 7072), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((7219, 7325), 'pandas.DataFrame', 'DataFrame', (["{'A': [8, 6, 1, nan, 2, 1, 4], 'B': [4, 5, 2, 5, nan, 9, 5]}"], {'index': '[6, 5, 4, 3, 2, 1, nan]'}), "({'A': [8, 6, 1, nan, 2, 1, 4], 'B': [4, 5, 2, 5, nan, 9, 5]},\n index=[6, 5, 4, 3, 2, 1, nan])\n", (7228, 7325), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((7389, 7428), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (7407, 7428), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((7610, 7716), 'pandas.DataFrame', 'DataFrame', (["{'A': [4, 8, 6, 1, nan, 2, 1], 'B': [5, 4, 5, 2, 5, nan, 9]}"], {'index': '[nan, 6, 5, 4, 3, 2, 1]'}), "({'A': [4, 8, 6, 1, nan, 2, 1], 'B': [5, 4, 5, 2, 5, nan, 9]},\n index=[nan, 6, 5, 4, 3, 2, 1])\n", (7619, 7716), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((7780, 7819), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (7798, 7819), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((7896, 7992), 'pandas.DataFrame', 'DataFrame', (["[[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']]"], {'columns': "['sort_col', 'order']"}), "([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']], columns=[\n 'sort_col', 'order'])\n", (7905, 7992), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((8139, 8172), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['df', 'sorted_df'], {}), '(df, sorted_df)\n', (8157, 8172), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((8263, 8334), 'pandas.DataFrame', 'DataFrame', (["{'A': [1, 2, nan, 1, 6, 8, 4], 'B': [9, nan, 5, 2, 5, 4, 5]}"], {}), "({'A': [1, 2, nan, 1, 6, 8, 4], 'B': [9, nan, 5, 2, 5, 4, 5]})\n", (8272, 8334), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((8410, 8514), 'pandas.DataFrame', 'DataFrame', (["{'A': [nan, 8, 6, 4, 2, 1, 1], 'B': [5, 4, 5, 5, nan, 2, 9]}"], {'index': '[2, 5, 4, 6, 1, 3, 0]'}), "({'A': [nan, 8, 6, 4, 2, 1, 1], 'B': [5, 4, 5, 5, nan, 2, 9]},\n index=[2, 5, 4, 6, 1, 3, 0])\n", (8419, 8514), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((8731, 8770), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (8749, 8770), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((8791, 8895), 'pandas.DataFrame', 'DataFrame', (["{'A': [nan, 8, 6, 4, 2, 1, 1], 'B': [5, 4, 5, 5, nan, 9, 2]}"], {'index': '[2, 5, 4, 6, 1, 0, 3]'}), "({'A': [nan, 8, 6, 4, 2, 1, 1], 'B': [5, 4, 5, 5, nan, 9, 2]},\n index=[2, 5, 4, 6, 1, 0, 3])\n", (8800, 8895), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((9112, 9151), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (9130, 9151), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((9415, 9454), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (9433, 9454), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((10132, 10160), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['df1', 'df2'], {}), '(df1, df2)\n', (10150, 10160), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((10246, 10274), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['df1', 'df2'], {}), '(df1, df2)\n', (10264, 10274), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((10366, 10394), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['df1', 'df2'], {}), '(df1, df2)\n', (10384, 10394), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((11519, 11561), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['df_sorted', 'df_reversed'], {}), '(df_sorted, df_reversed)\n', (11537, 11561), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((11645, 11687), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['df_sorted', 'df_reversed'], {}), '(df_sorted, df_reversed)\n', (11663, 11687), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((11799, 11832), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['df_sorted', 'df'], {}), '(df_sorted, df)\n', (11817, 11832), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((12416, 12458), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['df_sorted', 'df_reversed'], {}), '(df_sorted, df_reversed)\n', (12434, 12458), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((12546, 12579), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['df_sorted', 'df'], {}), '(df_sorted, df)\n', (12564, 12579), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((12715, 12748), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['df_sorted', 'df'], {}), '(df_sorted, df)\n', (12733, 12748), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((13069, 13121), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': d1, 'b': d2}"], {'index': '[0, 1, 2, 3]'}), "({'a': d1, 'b': d2}, index=[0, 1, 2, 3])\n", (13081, 13121), True, 'import pandas as pd\n'), ((13398, 13450), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': d3, 'b': d4}"], {'index': '[1, 3, 0, 2]'}), "({'a': d3, 'b': d4}, index=[1, 3, 0, 2])\n", (13410, 13450), True, 'import pandas as pd\n'), ((13511, 13553), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['sorted_df', 'expected'], {}), '(sorted_df, expected)\n', (13532, 13553), True, 'import pandas.util.testing as tm\n'), ((13727, 13744), 'random.shuffle', 'random.shuffle', (['A'], {}), '(A)\n', (13741, 13744), False, 'import random\n'), ((13753, 13770), 'random.shuffle', 'random.shuffle', (['B'], {}), '(B)\n', (13767, 13770), False, 'import random\n'), ((14071, 14107), 'numpy.lexsort', 'np.lexsort', (["(frame['B'], frame['A'])"], {}), "((frame['B'], frame['A']))\n", (14081, 14107), True, 'import numpy as np\n'), ((14155, 14191), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (14173, 14191), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((14586, 14622), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (14604, 14622), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((14825, 14861), 'numpy.lexsort', 'np.lexsort', (["(frame['A'], frame['B'])"], {}), "((frame['A'], frame['B']))\n", (14835, 14861), True, 'import numpy as np\n'), ((14909, 14945), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (14927, 14945), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((15306, 15338), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['df', 'expected'], {}), '(df, expected)\n', (15324, 15338), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((15497, 15529), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['df', 'expected'], {}), '(df, expected)\n', (15515, 15529), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((15710, 15742), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['df', 'expected'], {}), '(df, expected)\n', (15728, 15742), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((15882, 15914), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['df', 'expected'], {}), '(df, expected)\n', (15900, 15914), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((16060, 16086), 'numpy.random.permutation', 'np.random.permutation', (['(100)'], {}), '(100)\n', (16081, 16086), True, 'import numpy as np\n'), ((16557, 16593), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (16575, 16593), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((16769, 16805), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (16787, 16805), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((16894, 16936), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['result', "expected['C']"], {}), "(result, expected['C'])\n", (16913, 16936), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((19027, 19063), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (19045, 19063), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((19194, 19225), 'pandas.DataFrame', 'DataFrame', (['[[1, 2], [3, 4]]', 'mi'], {}), '([[1, 2], [3, 4]], mi)\n', (19203, 19225), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((19295, 19322), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['df', 'res'], {}), '(df, res)\n', (19313, 19322), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((19400, 19427), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['df', 'res'], {}), '(df, res)\n', (19418, 19427), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((19776, 19812), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (19794, 19812), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((19917, 19953), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (19935, 19953), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((20305, 20341), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (20323, 20341), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((20437, 20473), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (20455, 20473), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((20625, 20658), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'frame'], {}), '(result, frame)\n', (20643, 20658), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((20770, 20806), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (20788, 20806), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((21106, 21151), 'pandas.DataFrame', 'DataFrame', (['[[1, 2], [3, 4], [5, 6]]'], {'index': 'mi'}), '([[1, 2], [3, 4], [5, 6]], index=mi)\n', (21115, 21151), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((21308, 21365), 'pandas.DataFrame', 'pd.DataFrame', (['[[5, 6], [3, 4], [1, 2]]'], {'index': 'expected_mi'}), '([[5, 6], [3, 4], [1, 2]], index=expected_mi)\n', (21320, 21365), True, 'import pandas as pd\n'), ((21455, 21491), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (21473, 21491), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((21679, 21736), 'pandas.DataFrame', 'pd.DataFrame', (['[[5, 6], [1, 2], [3, 4]]'], {'index': 'expected_mi'}), '([[5, 6], [1, 2], [3, 4]], index=expected_mi)\n', (21691, 21736), True, 'import pandas as pd\n'), ((21848, 21884), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (21866, 21884), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((22242, 22296), 'pandas.concat', 'pd.concat', (['[y, x1, x2]'], {'axis': '(1)', 'keys': "['Y', 'X1', 'X2']"}), "([y, x1, x2], axis=1, keys=['Y', 'X1', 'X2'])\n", (22251, 22296), True, 'import pandas as pd\n'), ((22394, 22493), 'pandas.IntervalIndex.from_tuples', 'IntervalIndex.from_tuples', (['[(-3.0, -0.5), (-0.5, 0.0), (0.0, 0.5), (0.5, 3.0)]'], {'closed': '"""right"""'}), "([(-3.0, -0.5), (-0.5, 0.0), (0.0, 0.5), (0.5, 3.0\n )], closed='right')\n", (22419, 22493), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((22588, 22627), 'pandas.util.testing.assert_index_equal', 'tm.assert_index_equal', (['result', 'expected'], {}), '(result, expected)\n', (22609, 22627), True, 'import pandas.util.testing as tm\n'), ((23902, 23938), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (23920, 23938), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((24399, 24435), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (24417, 24435), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((24927, 24963), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (24945, 24963), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((25453, 25489), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (25471, 25489), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal\n'), ((2820, 2859), 'pandas.util.testing.assert_raises_regex', 'tm.assert_raises_regex', (['ValueError', 'msg'], {}), '(ValueError, msg)\n', (2842, 2859), True, 'import pandas.util.testing as tm\n'), ((3003, 3024), 'numpy.random.randn', 'np.random.randn', (['(4)', '(4)'], {}), '(4, 4)\n', (3018, 3024), True, 'import numpy as np\n'), ((9750, 9762), 'pandas.Timestamp', 'Timestamp', (['x'], {}), '(x)\n', (9759, 9762), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((10493, 10552), 'pandas.util.testing.assert_raises_regex', 'tm.assert_raises_regex', (['ValueError', '"""This Series is a view"""'], {}), "(ValueError, 'This Series is a view')\n", (10515, 10552), True, 'import pandas.util.testing as tm\n'), ((12814, 12826), 'pandas.Timestamp', 'Timestamp', (['x'], {}), '(x)\n', (12823, 12826), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((12939, 12951), 'pandas.Timestamp', 'Timestamp', (['x'], {}), '(x)\n', (12948, 12951), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((13137, 13149), 'pandas.Timestamp', 'Timestamp', (['x'], {}), '(x)\n', (13146, 13149), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((13262, 13274), 'pandas.Timestamp', 'Timestamp', (['x'], {}), '(x)\n', (13271, 13274), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((13701, 13713), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (13710, 13713), True, 'import numpy as np\n'), ((13916, 13957), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (13942, 13957), True, 'import pandas.util.testing as tm\n'), ((14239, 14280), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (14265, 14280), True, 'import pandas.util.testing as tm\n'), ((14670, 14711), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (14696, 14711), True, 'import pandas.util.testing as tm\n'), ((15012, 15033), 'numpy.random.randn', 'np.random.randn', (['(4)', '(4)'], {}), '(4, 4)\n', (15027, 15033), True, 'import numpy as np\n'), ((16023, 16035), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (16032, 16035), True, 'import numpy as np\n'), ((16283, 16324), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (16309, 16324), True, 'import pandas.util.testing as tm\n'), ((17161, 17209), 'pandas.util.testing.assert_raises_regex', 'tm.assert_raises_regex', (['ValueError', '"""not unique"""'], {}), "(ValueError, 'not unique')\n", (17183, 17209), True, 'import pandas.util.testing as tm\n'), ((17359, 17407), 'pandas.util.testing.assert_raises_regex', 'tm.assert_raises_regex', (['ValueError', '"""not unique"""'], {}), "(ValueError, 'not unique')\n", (17381, 17407), True, 'import pandas.util.testing as tm\n'), ((17458, 17506), 'pandas.util.testing.assert_raises_regex', 'tm.assert_raises_regex', (['ValueError', '"""not unique"""'], {}), "(ValueError, 'not unique')\n", (17480, 17506), True, 'import pandas.util.testing as tm\n'), ((17658, 17706), 'pandas.util.testing.assert_raises_regex', 'tm.assert_raises_regex', (['ValueError', '"""not unique"""'], {}), "(ValueError, 'not unique')\n", (17680, 17706), True, 'import pandas.util.testing as tm\n'), ((17759, 17807), 'pandas.util.testing.assert_raises_regex', 'tm.assert_raises_regex', (['ValueError', '"""not unique"""'], {}), "(ValueError, 'not unique')\n", (17781, 17807), True, 'import pandas.util.testing as tm\n'), ((18021, 18069), 'pandas.util.testing.assert_raises_regex', 'tm.assert_raises_regex', (['ValueError', '"""not unique"""'], {}), "(ValueError, 'not unique')\n", (18043, 18069), True, 'import pandas.util.testing as tm\n'), ((18234, 18255), 'numpy.random.randn', 'np.random.randn', (['(4)', '(2)'], {}), '(4, 2)\n', (18249, 18255), True, 'import numpy as np\n'), ((18347, 18390), 'pandas.util.testing.assert_raises_regex', 'tm.assert_raises_regex', (['ValueError', '"""level"""'], {}), "(ValueError, 'level')\n", (18369, 18390), True, 'import pandas.util.testing as tm\n'), ((18540, 18583), 'pandas.util.testing.assert_raises_regex', 'tm.assert_raises_regex', (['ValueError', '"""level"""'], {}), "(ValueError, 'level')\n", (18562, 18583), True, 'import pandas.util.testing as tm\n'), ((18712, 18753), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (18738, 18753), True, 'import pandas.util.testing as tm\n'), ((18892, 18933), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (18918, 18933), True, 'import pandas.util.testing as tm\n'), ((22055, 22075), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (22070, 22075), True, 'import numpy as np\n'), ((25772, 25797), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (25785, 25797), False, 'import pytest\n'), ((9700, 9733), 'pandas.date_range', 'date_range', (['"""20130101"""'], {'periods': '(9)'}), "('20130101', periods=9)\n", (9710, 9733), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((13657, 13669), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (13666, 13669), True, 'import numpy as np\n'), ((13846, 13866), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (13861, 13866), True, 'import numpy as np\n'), ((15979, 15992), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (15988, 15992), True, 'import numpy as np\n'), ((16213, 16233), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (16228, 16233), True, 'import numpy as np\n'), ((17068, 17080), 'pandas.compat.lrange', 'lrange', (['(5)', '(9)'], {}), '(5, 9)\n', (17074, 17080), False, 'from pandas.compat import lrange\n'), ((17082, 17091), 'pandas.compat.lrange', 'lrange', (['(4)'], {}), '(4)\n', (17088, 17091), False, 'from pandas.compat import lrange\n'), ((17265, 17306), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (17291, 17306), True, 'import pandas.util.testing as tm\n'), ((17562, 17603), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (17588, 17603), True, 'import pandas.util.testing as tm\n'), ((17863, 17904), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (17889, 17904), True, 'import pandas.util.testing as tm\n'), ((18288, 18332), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('a', 0), ('a', 1)]"], {}), "([('a', 0), ('a', 1)])\n", (18310, 18332), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((18446, 18487), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (18472, 18487), True, 'import pandas.util.testing as tm\n'), ((22105, 22125), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (22120, 22125), True, 'import numpy as np\n'), ((22155, 22175), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (22170, 22175), True, 'import numpy as np\n'), ((23274, 23362), 'pandas.Categorical', 'pd.Categorical', (["['A', np.nan, 'B', np.nan, 'C']"], {'categories': 'categories', 'ordered': '(True)'}), "(['A', np.nan, 'B', np.nan, 'C'], categories=categories,\n ordered=True)\n", (23288, 23362), True, 'import pandas as pd\n'), ((23695, 23770), 'pandas.Categorical', 'Categorical', (['(list_of_nans + categories)'], {'categories': 'categories', 'ordered': '(True)'}), '(list_of_nans + categories, categories=categories, ordered=True)\n', (23706, 23770), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((24192, 24267), 'pandas.Categorical', 'Categorical', (['(categories + list_of_nans)'], {'categories': 'categories', 'ordered': '(True)'}), '(categories + list_of_nans, categories=categories, ordered=True)\n', (24203, 24267), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((24693, 24781), 'pandas.Categorical', 'Categorical', (['(list_of_nans + reversed_categories)'], {'categories': 'categories', 'ordered': '(True)'}), '(list_of_nans + reversed_categories, categories=categories,\n ordered=True)\n', (24704, 24781), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((25219, 25307), 'pandas.Categorical', 'Categorical', (['(reversed_categories + list_of_nans)'], {'categories': 'categories', 'ordered': '(True)'}), '(reversed_categories + list_of_nans, categories=categories,\n ordered=True)\n', (25230, 25307), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((25602, 25695), 'pandas.Categorical', 'pd.Categorical', (["['A', np.nan, 'B', np.nan, 'C']"], {'categories': "['A', 'B', 'C']", 'ordered': '(True)'}), "(['A', np.nan, 'B', np.nan, 'C'], categories=['A', 'B', 'C'],\n ordered=True)\n", (25616, 25695), True, 'import pandas as pd\n'), ((9267, 9293), 'numpy.repeat', 'np.repeat', (['[1, 2, 3, 4]', '(5)'], {}), '([1, 2, 3, 4], 5)\n', (9276, 9293), True, 'import numpy as np\n'), ((20031, 20044), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (20040, 20044), True, 'import numpy as np\n'), ((11965, 11988), 'pandas.Timestamp', 'Timestamp', (['"""2016-01-01"""'], {}), "('2016-01-01')\n", (11974, 11988), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((12129, 12152), 'pandas.Timestamp', 'Timestamp', (['"""2016-01-01"""'], {}), "('2016-01-01')\n", (12138, 12152), False, 'from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range, NaT, IntervalIndex, Categorical\n'), ((19509, 19536), 'numpy.arange', 'np.arange', (['(6)'], {'dtype': '"""int64"""'}), "(6, dtype='int64')\n", (19518, 19536), True, 'import numpy as np\n')] |
__copyright__ = "Copyright (c) Microsoft Corporation and Mila - Quebec AI Institute"
__license__ = "MIT"
import unittest
import numpy as np
from segar.factors import (
Charge,
Magnetism,
Mass,
Floor,
Heat,
Friction,
GaussianNoise,
Label,
Position,
ID,
)
from segar.mdps.initializations import ArenaInitialization
from segar.rules import (
SetFactor,
IsEqual,
conditional_transition,
TransitionFunction,
Differential,
DidNotMatch,
DidNotPass,
inspect_signature,
IsOn,
Contains,
Prior,
)
from segar.sim import Simulator
from segar.parameters import Gravity
from segar.things import Object, Magnet, Tile, Charger, Entity
Simulator()
def _factors_and_parameters(
charge: Charge, magnetism: Magnetism, gravity: Gravity
) -> SetFactor[Charge]:
return SetFactor[Charge](charge, gravity)
_factors_and_parameters_rule = TransitionFunction(_factors_and_parameters)
_factors_and_parameters_rule_with_entity = TransitionFunction(
_factors_and_parameters, entity_type=Object
)
_factors_and_parameters_rule_with_factor = TransitionFunction(
_factors_and_parameters, factor_type=Mass
)
_factors_and_parameters_rule_with_condition = conditional_transition(relation=IsEqual(Charge, 1.0))(
_factors_and_parameters
)
_factors_and_parameters_sig = {
"has_entity": False,
"has_factors": True,
"n_objects": 1,
"input_patterns": [Charge, Magnetism, Gravity],
"returns": SetFactor[Charge],
"hints": {"charge": Charge, "magnetism": Magnetism, "return": SetFactor[Charge]},
}
_factors_and_parameters_ios = [
(
(Charge(0.3), Magnetism(0.5), Gravity(0.7)),
SetFactor[Charge](Charge(0.3), Gravity(0.7)),
),
(
(Object({Charge: 0.3, Magnetism: 0.5}), Gravity(0.7)),
SetFactor[Charge](Charge(0.3), Gravity(0.7)),
),
]
# Tuples and parameters
def _tuples_and_parameters(
o1_factors: tuple[Charge, Magnetism],
o2_factors: tuple[Floor, Heat, Friction],
gravity: Gravity,
) -> Differential[Charge]:
charge, _ = o1_factors
return Differential[Charge](charge, gravity)
_tuples_and_parameters_sig = {
"has_entity": True,
"has_factors": False,
"n_objects": 2,
"input_patterns": [(Charge, Magnetism), (Floor, Heat, Friction), Gravity],
"returns": Differential[Charge],
"hints": {
"o1_factors": tuple[Charge, Magnetism],
"o2_factors": tuple[Floor, Heat, Friction],
"gravity": Gravity,
"return": Differential[Charge],
},
}
_tuples_and_parameters_rule = TransitionFunction(_tuples_and_parameters)
_tuples_and_parameters_with_entity_rule = TransitionFunction(
_tuples_and_parameters, entity_type=Magnet
)
_tuples_and_parameters_with_factor_rule = TransitionFunction(
_tuples_and_parameters, factor_type=Mass
)
_tuples_and_parameters_with_condition_rule = conditional_transition(relation=IsEqual(Charge, 0.3))(
_tuples_and_parameters
)
_tuples_and_parameters_ios = [
(
((Charge(0.3), Magnetism(0.5)), (Floor(), Heat(0.11), Friction(0.13)), Gravity(0.7)),
Differential[Charge](Charge(0.3), Gravity(0.7)),
),
(
(Object({Charge: 0.3, Magnetism: 0.5}), Tile({Heat: 0.11, Friction: 0.13}), Gravity(0.7)),
Differential[Charge](Charge(0.3), Gravity(0.7)),
),
]
_tuples_and_parameters_with_entity_pass_ios = [
(
(Magnet({Charge: 0.3, Magnetism: 0.5}), Tile({Heat: 0.11, Friction: 0.13}), Gravity(0.7)),
Differential[Charge](Charge(0.3), Gravity(0.7)),
)
]
_tuples_and_parameters_with_entity_fail_ios = [
(
(Charger({Charge: 0.3, Magnetism: 0.5}), Tile({Heat: 0.11, Friction: 0.13}), Gravity(0.7)),
Differential[Charge](Charge(0.3), Gravity(0.7)),
)
]
_tuples_and_parameters_with_factor_pass_ios = [
(
(
Entity({Charge: 0.3, Magnetism: 0.5, Mass: 0.17}),
Tile({Heat: 0.11, Friction: 0.13}),
Gravity(0.7),
),
Differential[Charge](Charge(0.3), Gravity(0.7)),
)
]
_tuples_and_parameters_with_factor_fail_ios = [
(
(Entity({Charge: 0.3, Magnetism: 0.5}), Tile({Heat: 0.11, Friction: 0.13}), Gravity(0.7)),
Differential[Charge](Charge(0.3), Gravity(0.7)),
)
]
_tuples_and_parameters_with_condition_pass_ios = [
(
(Entity({Charge: 0.3, Magnetism: 0.5}), Tile({Heat: 0.11, Friction: 0.13}), Gravity(0.7)),
Differential[Charge](Charge(0.3), Gravity(0.7)),
)
]
_tuples_and_parameters_with_condition_fail_ios = [
(
(Entity({Charge: 0.4, Magnetism: 0.5}), Tile({Heat: 0.11, Friction: 0.13}), Gravity(0.7)),
Differential[Charge](Charge(0.4), Gravity(0.7)),
)
]
# Entities and parameters
def _entities_and_parameters(o1: Object, o2: Object, gravity: Gravity) -> Differential[Charge]:
charge = o1[Charge]
return Differential[Charge](charge, gravity)
_entities_and_parameters_sig = {
"has_entity": True,
"has_factors": False,
"n_objects": 2,
"input_patterns": [Object, Object, Gravity],
"returns": Differential[Charge],
"hints": {"o1": Object, "o2": Object, "gravity": Gravity, "return": Differential[Charge]},
}
_entities_and_parameters_rule = TransitionFunction(_entities_and_parameters)
_entities_and_parameters_ios = [
(
(
Object({Charge: 0.3, Magnetism: 0.5}),
Object({Charge: 0.11, Magnetism: 0.13}),
Gravity(0.7),
),
Differential[Charge](Charge(0.3), Gravity(0.7)),
)
]
# Should fail signature
def _factors_and_entities(
charge: Charge, o2_factors: tuple[Floor, Heat, Friction], gravity: Gravity
) -> Differential[Charge]:
pass
class TestRules(unittest.TestCase):
def test_signatures(self):
print("Testing rule signatures.")
rule_names = (
"factors_and_parameters",
"tuples_and_parameters",
"entities_and_parameters",
)
for rule_name in rule_names:
rule_fn = globals()["_" + rule_name]
target_info = globals()["_" + rule_name + "_sig"]
signature_info = inspect_signature(rule_fn)
for k in (
"has_entity",
"has_factors",
"input_patterns",
"returns",
):
self.assertEqual(
signature_info[k],
target_info[k],
f"Signature on {rule_name} failed on {k}.",
)
bad_rule_names = ("factors_and_entities",)
for rule_name in bad_rule_names:
rule_fn = globals()["_" + rule_name]
with self.assertRaises(TypeError, msg=f"{rule_name} should fail with " f"TypeError"):
inspect_signature(rule_fn)
def test_rules(self):
print("Testing rules with inputs that should work.")
rule_names = (
"factors_and_parameters",
"tuples_and_parameters",
"entities_and_parameters",
)
for rule_name in rule_names:
rule = globals()["_" + rule_name + "_rule"]
rule_ios = globals()["_" + rule_name + "_ios"]
print(f"Testing {rule_name}")
for inp, target in rule_ios:
out = rule(*inp)
if isinstance(out, DidNotMatch):
raise ValueError(f"Input {inp} did not match on" f" {rule_name}.")
self.assertEqual(out, target)
def test_rule_breaking(self):
print("Testing inputs that should fail with rules.")
rule_names = (
"factors_and_parameters",
"tuples_and_parameters",
"entities_and_parameters",
)
for rule_name in rule_names:
for rule_name_ in rule_names:
if rule_name_ == rule_name:
continue
rule = globals()["_" + rule_name + "_rule"]
rule_ios = globals()["_" + rule_name_ + "_ios"]
print(f"Testing {rule_name} with {rule_name_} IOs")
for inp, target in rule_ios:
out = rule(*inp)
self.assertIsInstance(out, DidNotMatch)
def test_typevar_rules(self):
print("Testing rules with inferred types")
rules = (
IsEqual(Charge, 0.3),
IsOn(),
Contains(),
Prior(Charge, GaussianNoise()),
)
for rule in rules:
sig = rule.signature_info
self.assertIsNotNone(sig)
def test_rule_with_requirements(self):
print("Testing rules with required entities or factors")
rule_names = (
"tuples_and_parameters_with_factor",
"tuples_and_parameters_with_entity",
"tuples_and_parameters_with_condition",
)
for rule_name in rule_names:
rule = globals()["_" + rule_name + "_rule"]
rule_ios_pass = globals()["_" + rule_name + "_pass_ios"]
rule_ios_fail = globals()["_" + rule_name + "_fail_ios"]
for inp, target in rule_ios_pass:
out = rule(*inp)
if isinstance(out, DidNotMatch):
raise ValueError(f"Input {inp} did not match on" f" {rule_name}.")
self.assertEqual(out, target)
for inp, target in rule_ios_fail:
out = rule(*inp)
self.assertIsInstance(
out,
(DidNotMatch, DidNotPass),
msg=f"Rule {rule_name} with inputs"
f" {inp} was supposed to fail but "
f"did not. Got {out}.",
)
def test_priors(self):
print("Testing priors")
p1 = Prior(Charge, 0.3)
p2 = Prior(Charge, GaussianNoise())
m = Magnetism(0.1)
p3 = Prior(Charge, m)
for prior in (p1, p2, p3):
c = Charge(0.0)
out = prior(c)
out()
def test_rule_priorities(self):
sim = Simulator(local_sim=True)
sim.reset()
class Ball(Object, default={Label: "ball"}):
pass
class Ball2(Object, default={ID: "golfball", Label: "ball"}):
pass
numbers = [(Object, 1), (Ball, 1), (Ball2, 1), (Charger, 1)]
# Copying for rules is necessary for multi-sim needs.
priors = [
Prior(Position, [0.5, 0.5], entity_type=Object),
Prior(Position, [1.0, 1.0], entity_type=Charger),
Prior(Position, [1.5, 1.5], relation=IsEqual(Label, "ball")),
Prior(Position, [2.0, 2.0], relation=IsEqual(ID, "golfball")),
]
init = ArenaInitialization(config={"numbers": numbers, "priors": priors})
init.set_sim(sim)
if init.sim is not sim:
raise ValueError("init has wrong sim.")
for prior in init._priors:
if prior.sim is not sim:
raise ValueError("Prior should have same sim.")
init.sample()
init.set_arena()
for thing in sim.things.values():
if thing.has_factor(Position):
if thing[ID] == "golfball":
self.assertEqual(thing[Position], np.array([2.0, 2.0]))
elif thing.has_factor(Label) and thing[Label] == "ball":
self.assertEqual(thing[Position], np.array([1.5, 1.5]))
elif isinstance(thing, Charger):
self.assertEqual(thing[Position], np.array([1.0, 1.0]))
else:
self.assertEqual(thing[Position], np.array([0.5, 0.5]))
def test():
unittest.main()
if __name__ == "__main__":
test()
| [
"segar.things.Entity",
"segar.rules.inspect_signature",
"segar.rules.IsEqual",
"numpy.array",
"segar.sim.Simulator",
"unittest.main",
"segar.rules.Prior",
"segar.rules.IsOn",
"segar.factors.Heat",
"segar.things.Charger",
"segar.things.Magnet",
"segar.factors.Magnetism",
"segar.factors.Gaussi... | [((706, 717), 'segar.sim.Simulator', 'Simulator', ([], {}), '()\n', (715, 717), False, 'from segar.sim import Simulator\n'), ((911, 954), 'segar.rules.TransitionFunction', 'TransitionFunction', (['_factors_and_parameters'], {}), '(_factors_and_parameters)\n', (929, 954), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((998, 1061), 'segar.rules.TransitionFunction', 'TransitionFunction', (['_factors_and_parameters'], {'entity_type': 'Object'}), '(_factors_and_parameters, entity_type=Object)\n', (1016, 1061), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((1111, 1172), 'segar.rules.TransitionFunction', 'TransitionFunction', (['_factors_and_parameters'], {'factor_type': 'Mass'}), '(_factors_and_parameters, factor_type=Mass)\n', (1129, 1172), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((2587, 2629), 'segar.rules.TransitionFunction', 'TransitionFunction', (['_tuples_and_parameters'], {}), '(_tuples_and_parameters)\n', (2605, 2629), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((2672, 2734), 'segar.rules.TransitionFunction', 'TransitionFunction', (['_tuples_and_parameters'], {'entity_type': 'Magnet'}), '(_tuples_and_parameters, entity_type=Magnet)\n', (2690, 2734), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((2783, 2843), 'segar.rules.TransitionFunction', 'TransitionFunction', (['_tuples_and_parameters'], {'factor_type': 'Mass'}), '(_tuples_and_parameters, factor_type=Mass)\n', (2801, 2843), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((5255, 5299), 'segar.rules.TransitionFunction', 'TransitionFunction', (['_entities_and_parameters'], {}), '(_entities_and_parameters)\n', (5273, 5299), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((11690, 11705), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11703, 11705), False, 'import unittest\n'), ((9798, 9816), 'segar.rules.Prior', 'Prior', (['Charge', '(0.3)'], {}), '(Charge, 0.3)\n', (9803, 9816), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((9873, 9887), 'segar.factors.Magnetism', 'Magnetism', (['(0.1)'], {}), '(0.1)\n', (9882, 9887), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((9901, 9917), 'segar.rules.Prior', 'Prior', (['Charge', 'm'], {}), '(Charge, m)\n', (9906, 9917), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((10078, 10103), 'segar.sim.Simulator', 'Simulator', ([], {'local_sim': '(True)'}), '(local_sim=True)\n', (10087, 10103), False, 'from segar.sim import Simulator\n'), ((10732, 10798), 'segar.mdps.initializations.ArenaInitialization', 'ArenaInitialization', ([], {'config': "{'numbers': numbers, 'priors': priors}"}), "(config={'numbers': numbers, 'priors': priors})\n", (10751, 10798), False, 'from segar.mdps.initializations import ArenaInitialization\n'), ((1257, 1277), 'segar.rules.IsEqual', 'IsEqual', (['Charge', '(1.0)'], {}), '(Charge, 1.0)\n', (1264, 1277), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((1637, 1648), 'segar.factors.Charge', 'Charge', (['(0.3)'], {}), '(0.3)\n', (1643, 1648), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((1650, 1664), 'segar.factors.Magnetism', 'Magnetism', (['(0.5)'], {}), '(0.5)\n', (1659, 1664), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((1666, 1678), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (1673, 1678), False, 'from segar.parameters import Gravity\n'), ((1707, 1718), 'segar.factors.Charge', 'Charge', (['(0.3)'], {}), '(0.3)\n', (1713, 1718), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((1720, 1732), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (1727, 1732), False, 'from segar.parameters import Gravity\n'), ((1757, 1794), 'segar.things.Object', 'Object', (['{Charge: 0.3, Magnetism: 0.5}'], {}), '({Charge: 0.3, Magnetism: 0.5})\n', (1763, 1794), False, 'from segar.things import Object, Magnet, Tile, Charger, Entity\n'), ((1796, 1808), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (1803, 1808), False, 'from segar.parameters import Gravity\n'), ((1837, 1848), 'segar.factors.Charge', 'Charge', (['(0.3)'], {}), '(0.3)\n', (1843, 1848), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((1850, 1862), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (1857, 1862), False, 'from segar.parameters import Gravity\n'), ((2927, 2947), 'segar.rules.IsEqual', 'IsEqual', (['Charge', '(0.3)'], {}), '(Charge, 0.3)\n', (2934, 2947), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((3097, 3109), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (3104, 3109), False, 'from segar.parameters import Gravity\n'), ((3141, 3152), 'segar.factors.Charge', 'Charge', (['(0.3)'], {}), '(0.3)\n', (3147, 3152), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((3154, 3166), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (3161, 3166), False, 'from segar.parameters import Gravity\n'), ((3191, 3228), 'segar.things.Object', 'Object', (['{Charge: 0.3, Magnetism: 0.5}'], {}), '({Charge: 0.3, Magnetism: 0.5})\n', (3197, 3228), False, 'from segar.things import Object, Magnet, Tile, Charger, Entity\n'), ((3230, 3264), 'segar.things.Tile', 'Tile', (['{Heat: 0.11, Friction: 0.13}'], {}), '({Heat: 0.11, Friction: 0.13})\n', (3234, 3264), False, 'from segar.things import Object, Magnet, Tile, Charger, Entity\n'), ((3266, 3278), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (3273, 3278), False, 'from segar.parameters import Gravity\n'), ((3310, 3321), 'segar.factors.Charge', 'Charge', (['(0.3)'], {}), '(0.3)\n', (3316, 3321), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((3323, 3335), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (3330, 3335), False, 'from segar.parameters import Gravity\n'), ((3412, 3449), 'segar.things.Magnet', 'Magnet', (['{Charge: 0.3, Magnetism: 0.5}'], {}), '({Charge: 0.3, Magnetism: 0.5})\n', (3418, 3449), False, 'from segar.things import Object, Magnet, Tile, Charger, Entity\n'), ((3451, 3485), 'segar.things.Tile', 'Tile', (['{Heat: 0.11, Friction: 0.13}'], {}), '({Heat: 0.11, Friction: 0.13})\n', (3455, 3485), False, 'from segar.things import Object, Magnet, Tile, Charger, Entity\n'), ((3487, 3499), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (3494, 3499), False, 'from segar.parameters import Gravity\n'), ((3531, 3542), 'segar.factors.Charge', 'Charge', (['(0.3)'], {}), '(0.3)\n', (3537, 3542), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((3544, 3556), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (3551, 3556), False, 'from segar.parameters import Gravity\n'), ((3632, 3670), 'segar.things.Charger', 'Charger', (['{Charge: 0.3, Magnetism: 0.5}'], {}), '({Charge: 0.3, Magnetism: 0.5})\n', (3639, 3670), False, 'from segar.things import Object, Magnet, Tile, Charger, Entity\n'), ((3672, 3706), 'segar.things.Tile', 'Tile', (['{Heat: 0.11, Friction: 0.13}'], {}), '({Heat: 0.11, Friction: 0.13})\n', (3676, 3706), False, 'from segar.things import Object, Magnet, Tile, Charger, Entity\n'), ((3708, 3720), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (3715, 3720), False, 'from segar.parameters import Gravity\n'), ((3752, 3763), 'segar.factors.Charge', 'Charge', (['(0.3)'], {}), '(0.3)\n', (3758, 3763), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((3765, 3777), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (3772, 3777), False, 'from segar.parameters import Gravity\n'), ((3866, 3915), 'segar.things.Entity', 'Entity', (['{Charge: 0.3, Magnetism: 0.5, Mass: 0.17}'], {}), '({Charge: 0.3, Magnetism: 0.5, Mass: 0.17})\n', (3872, 3915), False, 'from segar.things import Object, Magnet, Tile, Charger, Entity\n'), ((3929, 3963), 'segar.things.Tile', 'Tile', (['{Heat: 0.11, Friction: 0.13}'], {}), '({Heat: 0.11, Friction: 0.13})\n', (3933, 3963), False, 'from segar.things import Object, Magnet, Tile, Charger, Entity\n'), ((3977, 3989), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (3984, 3989), False, 'from segar.parameters import Gravity\n'), ((4031, 4042), 'segar.factors.Charge', 'Charge', (['(0.3)'], {}), '(0.3)\n', (4037, 4042), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((4044, 4056), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (4051, 4056), False, 'from segar.parameters import Gravity\n'), ((4132, 4169), 'segar.things.Entity', 'Entity', (['{Charge: 0.3, Magnetism: 0.5}'], {}), '({Charge: 0.3, Magnetism: 0.5})\n', (4138, 4169), False, 'from segar.things import Object, Magnet, Tile, Charger, Entity\n'), ((4171, 4205), 'segar.things.Tile', 'Tile', (['{Heat: 0.11, Friction: 0.13}'], {}), '({Heat: 0.11, Friction: 0.13})\n', (4175, 4205), False, 'from segar.things import Object, Magnet, Tile, Charger, Entity\n'), ((4207, 4219), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (4214, 4219), False, 'from segar.parameters import Gravity\n'), ((4251, 4262), 'segar.factors.Charge', 'Charge', (['(0.3)'], {}), '(0.3)\n', (4257, 4262), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((4264, 4276), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (4271, 4276), False, 'from segar.parameters import Gravity\n'), ((4355, 4392), 'segar.things.Entity', 'Entity', (['{Charge: 0.3, Magnetism: 0.5}'], {}), '({Charge: 0.3, Magnetism: 0.5})\n', (4361, 4392), False, 'from segar.things import Object, Magnet, Tile, Charger, Entity\n'), ((4394, 4428), 'segar.things.Tile', 'Tile', (['{Heat: 0.11, Friction: 0.13}'], {}), '({Heat: 0.11, Friction: 0.13})\n', (4398, 4428), False, 'from segar.things import Object, Magnet, Tile, Charger, Entity\n'), ((4430, 4442), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (4437, 4442), False, 'from segar.parameters import Gravity\n'), ((4474, 4485), 'segar.factors.Charge', 'Charge', (['(0.3)'], {}), '(0.3)\n', (4480, 4485), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((4487, 4499), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (4494, 4499), False, 'from segar.parameters import Gravity\n'), ((4578, 4615), 'segar.things.Entity', 'Entity', (['{Charge: 0.4, Magnetism: 0.5}'], {}), '({Charge: 0.4, Magnetism: 0.5})\n', (4584, 4615), False, 'from segar.things import Object, Magnet, Tile, Charger, Entity\n'), ((4617, 4651), 'segar.things.Tile', 'Tile', (['{Heat: 0.11, Friction: 0.13}'], {}), '({Heat: 0.11, Friction: 0.13})\n', (4621, 4651), False, 'from segar.things import Object, Magnet, Tile, Charger, Entity\n'), ((4653, 4665), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (4660, 4665), False, 'from segar.parameters import Gravity\n'), ((4697, 4708), 'segar.factors.Charge', 'Charge', (['(0.4)'], {}), '(0.4)\n', (4703, 4708), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((4710, 4722), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (4717, 4722), False, 'from segar.parameters import Gravity\n'), ((5363, 5400), 'segar.things.Object', 'Object', (['{Charge: 0.3, Magnetism: 0.5}'], {}), '({Charge: 0.3, Magnetism: 0.5})\n', (5369, 5400), False, 'from segar.things import Object, Magnet, Tile, Charger, Entity\n'), ((5414, 5453), 'segar.things.Object', 'Object', (['{Charge: 0.11, Magnetism: 0.13}'], {}), '({Charge: 0.11, Magnetism: 0.13})\n', (5420, 5453), False, 'from segar.things import Object, Magnet, Tile, Charger, Entity\n'), ((5467, 5479), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (5474, 5479), False, 'from segar.parameters import Gravity\n'), ((5521, 5532), 'segar.factors.Charge', 'Charge', (['(0.3)'], {}), '(0.3)\n', (5527, 5532), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((5534, 5546), 'segar.parameters.Gravity', 'Gravity', (['(0.7)'], {}), '(0.7)\n', (5541, 5546), False, 'from segar.parameters import Gravity\n'), ((6161, 6187), 'segar.rules.inspect_signature', 'inspect_signature', (['rule_fn'], {}), '(rule_fn)\n', (6178, 6187), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((8352, 8372), 'segar.rules.IsEqual', 'IsEqual', (['Charge', '(0.3)'], {}), '(Charge, 0.3)\n', (8359, 8372), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((8386, 8392), 'segar.rules.IsOn', 'IsOn', ([], {}), '()\n', (8390, 8392), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((8406, 8416), 'segar.rules.Contains', 'Contains', ([], {}), '()\n', (8414, 8416), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((9844, 9859), 'segar.factors.GaussianNoise', 'GaussianNoise', ([], {}), '()\n', (9857, 9859), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((9970, 9981), 'segar.factors.Charge', 'Charge', (['(0.0)'], {}), '(0.0)\n', (9976, 9981), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((10447, 10494), 'segar.rules.Prior', 'Prior', (['Position', '[0.5, 0.5]'], {'entity_type': 'Object'}), '(Position, [0.5, 0.5], entity_type=Object)\n', (10452, 10494), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((10508, 10556), 'segar.rules.Prior', 'Prior', (['Position', '[1.0, 1.0]'], {'entity_type': 'Charger'}), '(Position, [1.0, 1.0], entity_type=Charger)\n', (10513, 10556), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((3028, 3039), 'segar.factors.Charge', 'Charge', (['(0.3)'], {}), '(0.3)\n', (3034, 3039), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((3041, 3055), 'segar.factors.Magnetism', 'Magnetism', (['(0.5)'], {}), '(0.5)\n', (3050, 3055), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((3059, 3066), 'segar.factors.Floor', 'Floor', ([], {}), '()\n', (3064, 3066), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((3068, 3078), 'segar.factors.Heat', 'Heat', (['(0.11)'], {}), '(0.11)\n', (3072, 3078), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((3080, 3094), 'segar.factors.Friction', 'Friction', (['(0.13)'], {}), '(0.13)\n', (3088, 3094), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((6795, 6821), 'segar.rules.inspect_signature', 'inspect_signature', (['rule_fn'], {}), '(rule_fn)\n', (6812, 6821), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((8444, 8459), 'segar.factors.GaussianNoise', 'GaussianNoise', ([], {}), '()\n', (8457, 8459), False, 'from segar.factors import Charge, Magnetism, Mass, Floor, Heat, Friction, GaussianNoise, Label, Position, ID\n'), ((10607, 10629), 'segar.rules.IsEqual', 'IsEqual', (['Label', '"""ball"""'], {}), "(Label, 'ball')\n", (10614, 10629), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((10681, 10704), 'segar.rules.IsEqual', 'IsEqual', (['ID', '"""golfball"""'], {}), "(ID, 'golfball')\n", (10688, 10704), False, 'from segar.rules import SetFactor, IsEqual, conditional_transition, TransitionFunction, Differential, DidNotMatch, DidNotPass, inspect_signature, IsOn, Contains, Prior\n'), ((11278, 11298), 'numpy.array', 'np.array', (['[2.0, 2.0]'], {}), '([2.0, 2.0])\n', (11286, 11298), True, 'import numpy as np\n'), ((11427, 11447), 'numpy.array', 'np.array', (['[1.5, 1.5]'], {}), '([1.5, 1.5])\n', (11435, 11447), True, 'import numpy as np\n'), ((11552, 11572), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (11560, 11572), True, 'import numpy as np\n'), ((11650, 11670), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (11658, 11670), True, 'import numpy as np\n')] |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common fixtures for the qnn module.
"""
import pytest
import pennylane as qml
import numpy as np
@pytest.fixture
def get_circuit(n_qubits, output_dim, interface):
"""Fixture for getting a sample quantum circuit with a controllable qubit number and output
dimension. Returns both the circuit and the shape of the weights."""
dev = qml.device("default.qubit", wires=n_qubits)
weight_shapes = {
"w1": (3, n_qubits, 3),
"w2": (1,),
"w3": 1,
"w4": [3],
"w5": (2, n_qubits, 3),
"w6": 3,
"w7": 0,
}
@qml.qnode(dev, interface=interface)
def circuit(inputs, w1, w2, w3, w4, w5, w6, w7):
"""A circuit that embeds data using the AngleEmbedding and then performs a variety of
operations. The output is a PauliZ measurement on the first output_dim qubits. One set of
parameters, w5, are specified as non-trainable."""
qml.templates.AngleEmbedding(inputs, wires=list(range(n_qubits)))
qml.templates.StronglyEntanglingLayers(w1, wires=list(range(n_qubits)))
qml.RX(w2[0], wires=0 % n_qubits)
qml.RX(w3, wires=1 % n_qubits)
qml.Rot(*w4, wires=2 % n_qubits)
qml.templates.StronglyEntanglingLayers(w5, wires=list(range(n_qubits)))
qml.Rot(*w6, wires=3 % n_qubits)
qml.RX(w7, wires=4 % n_qubits)
return [qml.expval(qml.PauliZ(i)) for i in range(output_dim)]
return circuit, weight_shapes
@pytest.fixture
def get_circuit_dm(n_qubits, output_dim, interface):
"""Fixture for getting a sample quantum circuit with a controllable qubit number and output
dimension for density matrix return type. Returns both the circuit and the shape of the weights."""
dev = qml.device("default.qubit", wires=n_qubits)
weight_shapes = {
"w1": (3, n_qubits, 3),
"w2": (1,),
"w3": 1,
"w4": [3],
"w5": (2, n_qubits, 3),
"w6": 3,
"w7": 0,
}
@qml.qnode(dev, interface=interface)
def circuit(inputs, w1, w2, w3, w4, w5, w6, w7):
"""Sample circuit to be used for testing density_matrix() return type."""
qml.templates.AngleEmbedding(inputs, wires=list(range(n_qubits)))
qml.templates.StronglyEntanglingLayers(w1, wires=list(range(n_qubits)))
qml.RX(w2[0], wires=0 % n_qubits)
qml.RX(w3, wires=1 % n_qubits)
qml.Rot(*w4, wires=2 % n_qubits)
qml.templates.StronglyEntanglingLayers(w5, wires=list(range(n_qubits)))
qml.Rot(*w6, wires=3 % n_qubits)
qml.RX(w7, wires=4 % n_qubits)
# Using np.log2() here because output_dim is sampled from varying the number of
# qubits (say, nq) and calculated as (2 ** nq, 2 ** nq)
return qml.density_matrix(wires=[i for i in range(int(np.log2(output_dim[0])))])
return circuit, weight_shapes
| [
"pennylane.Rot",
"pennylane.qnode",
"pennylane.device",
"pennylane.PauliZ",
"pennylane.RX",
"numpy.log2"
] | [((947, 990), 'pennylane.device', 'qml.device', (['"""default.qubit"""'], {'wires': 'n_qubits'}), "('default.qubit', wires=n_qubits)\n", (957, 990), True, 'import pennylane as qml\n'), ((1179, 1214), 'pennylane.qnode', 'qml.qnode', (['dev'], {'interface': 'interface'}), '(dev, interface=interface)\n', (1188, 1214), True, 'import pennylane as qml\n'), ((2342, 2385), 'pennylane.device', 'qml.device', (['"""default.qubit"""'], {'wires': 'n_qubits'}), "('default.qubit', wires=n_qubits)\n", (2352, 2385), True, 'import pennylane as qml\n'), ((2574, 2609), 'pennylane.qnode', 'qml.qnode', (['dev'], {'interface': 'interface'}), '(dev, interface=interface)\n', (2583, 2609), True, 'import pennylane as qml\n'), ((1681, 1714), 'pennylane.RX', 'qml.RX', (['w2[0]'], {'wires': '(0 % n_qubits)'}), '(w2[0], wires=0 % n_qubits)\n', (1687, 1714), True, 'import pennylane as qml\n'), ((1723, 1753), 'pennylane.RX', 'qml.RX', (['w3'], {'wires': '(1 % n_qubits)'}), '(w3, wires=1 % n_qubits)\n', (1729, 1753), True, 'import pennylane as qml\n'), ((1762, 1794), 'pennylane.Rot', 'qml.Rot', (['*w4'], {'wires': '(2 % n_qubits)'}), '(*w4, wires=2 % n_qubits)\n', (1769, 1794), True, 'import pennylane as qml\n'), ((1883, 1915), 'pennylane.Rot', 'qml.Rot', (['*w6'], {'wires': '(3 % n_qubits)'}), '(*w6, wires=3 % n_qubits)\n', (1890, 1915), True, 'import pennylane as qml\n'), ((1924, 1954), 'pennylane.RX', 'qml.RX', (['w7'], {'wires': '(4 % n_qubits)'}), '(w7, wires=4 % n_qubits)\n', (1930, 1954), True, 'import pennylane as qml\n'), ((2907, 2940), 'pennylane.RX', 'qml.RX', (['w2[0]'], {'wires': '(0 % n_qubits)'}), '(w2[0], wires=0 % n_qubits)\n', (2913, 2940), True, 'import pennylane as qml\n'), ((2949, 2979), 'pennylane.RX', 'qml.RX', (['w3'], {'wires': '(1 % n_qubits)'}), '(w3, wires=1 % n_qubits)\n', (2955, 2979), True, 'import pennylane as qml\n'), ((2988, 3020), 'pennylane.Rot', 'qml.Rot', (['*w4'], {'wires': '(2 % n_qubits)'}), '(*w4, wires=2 % n_qubits)\n', (2995, 3020), True, 'import pennylane as qml\n'), ((3109, 3141), 'pennylane.Rot', 'qml.Rot', (['*w6'], {'wires': '(3 % n_qubits)'}), '(*w6, wires=3 % n_qubits)\n', (3116, 3141), True, 'import pennylane as qml\n'), ((3150, 3180), 'pennylane.RX', 'qml.RX', (['w7'], {'wires': '(4 % n_qubits)'}), '(w7, wires=4 % n_qubits)\n', (3156, 3180), True, 'import pennylane as qml\n'), ((1982, 1995), 'pennylane.PauliZ', 'qml.PauliZ', (['i'], {}), '(i)\n', (1992, 1995), True, 'import pennylane as qml\n'), ((3396, 3418), 'numpy.log2', 'np.log2', (['output_dim[0]'], {}), '(output_dim[0])\n', (3403, 3418), True, 'import numpy as np\n')] |
import sys
import time
import numpy as np
from gym.spaces.discrete import Discrete
from tianshou.data import Batch
from tianshou.env import DummyVectorEnv, RayVectorEnv, ShmemVectorEnv, SubprocVectorEnv
if __name__ == '__main__':
from env import MyTestEnv, NXEnv
else: # pytest
from test.base.env import MyTestEnv, NXEnv
def has_ray():
try:
import ray # noqa: F401
return True
except ImportError:
return False
def recurse_comp(a, b):
try:
if isinstance(a, np.ndarray):
if a.dtype == object:
return np.array([recurse_comp(m, n) for m, n in zip(a, b)]).all()
else:
return np.allclose(a, b)
elif isinstance(a, (list, tuple)):
return np.array([recurse_comp(m, n) for m, n in zip(a, b)]).all()
elif isinstance(a, dict):
return np.array([recurse_comp(a[k], b[k]) for k in a.keys()]).all()
except (Exception):
return False
def test_async_env(size=10000, num=8, sleep=0.1):
# simplify the test case, just keep stepping
env_fns = [
lambda i=i: MyTestEnv(size=i, sleep=sleep, random_sleep=True)
for i in range(size, size + num)
]
test_cls = [SubprocVectorEnv, ShmemVectorEnv]
if has_ray():
test_cls += [RayVectorEnv]
for cls in test_cls:
v = cls(env_fns, wait_num=num // 2, timeout=1e-3)
v.seed(None)
v.reset()
# for a random variable u ~ U[0, 1], let v = max{u1, u2, ..., un}
# P(v <= x) = x^n (0 <= x <= 1), pdf of v is nx^{n-1}
# expectation of v is n / (n + 1)
# for a synchronous environment, the following actions should take
# about 7 * sleep * num / (num + 1) seconds
# for async simulation, the analysis is complicated, but the time cost
# should be smaller
action_list = [1] * num + [0] * (num * 2) + [1] * (num * 4)
current_idx_start = 0
action = action_list[:num]
env_ids = list(range(num))
o = []
spent_time = time.time()
while current_idx_start < len(action_list):
A, B, C, D = v.step(action=action, id=env_ids)
b = Batch({'obs': A, 'rew': B, 'done': C, 'info': D})
env_ids = b.info.env_id
o.append(b)
current_idx_start += len(action)
# len of action may be smaller than len(A) in the end
action = action_list[current_idx_start:current_idx_start + len(A)]
# truncate env_ids with the first terms
# typically len(env_ids) == len(A) == len(action), except for the
# last batch when actions are not enough
env_ids = env_ids[:len(action)]
spent_time = time.time() - spent_time
Batch.cat(o)
v.close()
# assure 1/7 improvement
if sys.platform != "darwin": # macOS cannot pass this check
assert spent_time < 6.0 * sleep * num / (num + 1)
def test_async_check_id(size=100, num=4, sleep=.2, timeout=.7):
env_fns = [
lambda: MyTestEnv(size=size, sleep=sleep * 2),
lambda: MyTestEnv(size=size, sleep=sleep * 3),
lambda: MyTestEnv(size=size, sleep=sleep * 5),
lambda: MyTestEnv(size=size, sleep=sleep * 7)
]
test_cls = [SubprocVectorEnv, ShmemVectorEnv]
if has_ray():
test_cls += [RayVectorEnv]
total_pass = 0
for cls in test_cls:
pass_check = 1
v = cls(env_fns, wait_num=num - 1, timeout=timeout)
v.reset()
expect_result = [
[0, 1],
[0, 1, 2],
[0, 1, 3],
[0, 1, 2],
[0, 1],
[0, 2, 3],
[0, 1],
]
ids = np.arange(num)
for res in expect_result:
t = time.time()
_, _, _, info = v.step([1] * len(ids), ids)
t = time.time() - t
ids = Batch(info).env_id
print(ids, t)
if not (
len(ids) == len(res) and np.allclose(sorted(ids), res) and
(t < timeout) == (len(res) == num - 1)
):
pass_check = 0
break
total_pass += pass_check
if sys.platform == "linux": # Windows/macOS may not pass this check
assert total_pass >= 2
def test_vecenv(size=10, num=8, sleep=0.001):
env_fns = [
lambda i=i: MyTestEnv(size=i, sleep=sleep, recurse_state=True)
for i in range(size, size + num)
]
venv = [
DummyVectorEnv(env_fns),
SubprocVectorEnv(env_fns),
ShmemVectorEnv(env_fns),
]
if has_ray():
venv += [RayVectorEnv(env_fns)]
for v in venv:
v.seed(0)
action_list = [1] * 5 + [0] * 10 + [1] * 20
o = [v.reset() for v in venv]
for a in action_list:
o = []
for v in venv:
A, B, C, D = v.step([a] * num)
if sum(C):
A = v.reset(np.where(C)[0])
o.append([A, B, C, D])
for index, infos in enumerate(zip(*o)):
if index == 3: # do not check info here
continue
for info in infos:
assert recurse_comp(infos[0], info)
if __name__ == '__main__':
t = [0] * len(venv)
for i, e in enumerate(venv):
t[i] = time.time()
e.reset()
for a in action_list:
done = e.step([a] * num)[2]
if sum(done) > 0:
e.reset(np.where(done)[0])
t[i] = time.time() - t[i]
for i, v in enumerate(venv):
print(f'{type(v)}: {t[i]:.6f}s')
for v in venv:
assert v.size == list(range(size, size + num))
assert v.env_num == num
assert v.action_space == [Discrete(2)] * num
for v in venv:
v.close()
def test_env_obs():
for obs_type in ["array", "object"]:
envs = SubprocVectorEnv(
[lambda i=x: NXEnv(i, obs_type) for x in [5, 10, 15, 20]]
)
obs = envs.reset()
assert obs.dtype == object
obs = envs.step([1, 1, 1, 1])[0]
assert obs.dtype == object
if __name__ == '__main__':
test_env_obs()
test_vecenv()
test_async_env()
test_async_check_id()
| [
"tianshou.env.ShmemVectorEnv",
"numpy.allclose",
"tianshou.env.DummyVectorEnv",
"tianshou.data.Batch.cat",
"numpy.where",
"test.base.env.MyTestEnv",
"tianshou.env.RayVectorEnv",
"gym.spaces.discrete.Discrete",
"tianshou.data.Batch",
"tianshou.env.SubprocVectorEnv",
"test.base.env.NXEnv",
"time... | [((2060, 2071), 'time.time', 'time.time', ([], {}), '()\n', (2069, 2071), False, 'import time\n'), ((2780, 2792), 'tianshou.data.Batch.cat', 'Batch.cat', (['o'], {}), '(o)\n', (2789, 2792), False, 'from tianshou.data import Batch\n'), ((3732, 3746), 'numpy.arange', 'np.arange', (['num'], {}), '(num)\n', (3741, 3746), True, 'import numpy as np\n'), ((4519, 4542), 'tianshou.env.DummyVectorEnv', 'DummyVectorEnv', (['env_fns'], {}), '(env_fns)\n', (4533, 4542), False, 'from tianshou.env import DummyVectorEnv, RayVectorEnv, ShmemVectorEnv, SubprocVectorEnv\n'), ((4552, 4577), 'tianshou.env.SubprocVectorEnv', 'SubprocVectorEnv', (['env_fns'], {}), '(env_fns)\n', (4568, 4577), False, 'from tianshou.env import DummyVectorEnv, RayVectorEnv, ShmemVectorEnv, SubprocVectorEnv\n'), ((4587, 4610), 'tianshou.env.ShmemVectorEnv', 'ShmemVectorEnv', (['env_fns'], {}), '(env_fns)\n', (4601, 4610), False, 'from tianshou.env import DummyVectorEnv, RayVectorEnv, ShmemVectorEnv, SubprocVectorEnv\n'), ((1122, 1171), 'test.base.env.MyTestEnv', 'MyTestEnv', ([], {'size': 'i', 'sleep': 'sleep', 'random_sleep': '(True)'}), '(size=i, sleep=sleep, random_sleep=True)\n', (1131, 1171), False, 'from test.base.env import MyTestEnv, NXEnv\n'), ((2199, 2248), 'tianshou.data.Batch', 'Batch', (["{'obs': A, 'rew': B, 'done': C, 'info': D}"], {}), "({'obs': A, 'rew': B, 'done': C, 'info': D})\n", (2204, 2248), False, 'from tianshou.data import Batch\n'), ((2747, 2758), 'time.time', 'time.time', ([], {}), '()\n', (2756, 2758), False, 'import time\n'), ((3073, 3110), 'test.base.env.MyTestEnv', 'MyTestEnv', ([], {'size': 'size', 'sleep': '(sleep * 2)'}), '(size=size, sleep=sleep * 2)\n', (3082, 3110), False, 'from test.base.env import MyTestEnv, NXEnv\n'), ((3128, 3165), 'test.base.env.MyTestEnv', 'MyTestEnv', ([], {'size': 'size', 'sleep': '(sleep * 3)'}), '(size=size, sleep=sleep * 3)\n', (3137, 3165), False, 'from test.base.env import MyTestEnv, NXEnv\n'), ((3183, 3220), 'test.base.env.MyTestEnv', 'MyTestEnv', ([], {'size': 'size', 'sleep': '(sleep * 5)'}), '(size=size, sleep=sleep * 5)\n', (3192, 3220), False, 'from test.base.env import MyTestEnv, NXEnv\n'), ((3238, 3275), 'test.base.env.MyTestEnv', 'MyTestEnv', ([], {'size': 'size', 'sleep': '(sleep * 7)'}), '(size=size, sleep=sleep * 7)\n', (3247, 3275), False, 'from test.base.env import MyTestEnv, NXEnv\n'), ((3797, 3808), 'time.time', 'time.time', ([], {}), '()\n', (3806, 3808), False, 'import time\n'), ((4400, 4450), 'test.base.env.MyTestEnv', 'MyTestEnv', ([], {'size': 'i', 'sleep': 'sleep', 'recurse_state': '(True)'}), '(size=i, sleep=sleep, recurse_state=True)\n', (4409, 4450), False, 'from test.base.env import MyTestEnv, NXEnv\n'), ((4653, 4674), 'tianshou.env.RayVectorEnv', 'RayVectorEnv', (['env_fns'], {}), '(env_fns)\n', (4665, 4674), False, 'from tianshou.env import DummyVectorEnv, RayVectorEnv, ShmemVectorEnv, SubprocVectorEnv\n'), ((5329, 5340), 'time.time', 'time.time', ([], {}), '()\n', (5338, 5340), False, 'import time\n'), ((687, 704), 'numpy.allclose', 'np.allclose', (['a', 'b'], {}), '(a, b)\n', (698, 704), True, 'import numpy as np\n'), ((3881, 3892), 'time.time', 'time.time', ([], {}), '()\n', (3890, 3892), False, 'import time\n'), ((3915, 3926), 'tianshou.data.Batch', 'Batch', (['info'], {}), '(info)\n', (3920, 3926), False, 'from tianshou.data import Batch\n'), ((5541, 5552), 'time.time', 'time.time', ([], {}), '()\n', (5550, 5552), False, 'import time\n'), ((5783, 5794), 'gym.spaces.discrete.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (5791, 5794), False, 'from gym.spaces.discrete import Discrete\n'), ((5960, 5978), 'test.base.env.NXEnv', 'NXEnv', (['i', 'obs_type'], {}), '(i, obs_type)\n', (5965, 5978), False, 'from test.base.env import MyTestEnv, NXEnv\n'), ((4953, 4964), 'numpy.where', 'np.where', (['C'], {}), '(C)\n', (4961, 4964), True, 'import numpy as np\n'), ((5503, 5517), 'numpy.where', 'np.where', (['done'], {}), '(done)\n', (5511, 5517), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import sys
sys.path.insert(0,"../../src")
sys.path.insert(0,"../credibleinterval")
import math
import functools
import time
import torch
import numpy as np
from scipy.special import gamma
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import seaborn as sns
import pymc3
from credible_estimator import credible_ball_estimator
#from src.variational_boosting_bmc import VariationalBoosting
#from src import vb_utils
#from src import sampling
def sigmoid(x,a=0,b=1):
return (b-a)*1.0/(1.0+np.exp(-x)) + a
def exp(x):
return np.exp(x)
np.random.seed(100)
torch.manual_seed(100)
tag = 1
data = np.load("testheat1b/tracking.npz",allow_pickle=True)
elbo = data["elbo"]
steps = data["steps"]
time = data["time"]
#fig4,ax4 = plt.subplots()
#cumtime = np.cumsum(time)
#ax4.plot(steps,cumtime,'ro')
#ax4.set_xlabel("iteration")
#ax4.set_ylabel("time (s)")
#ax4.set_title("Running time for algorithm")
distrib = torch.load("testheat1b/mvn%i"%100)
samples = distrib.sample(5000).numpy()
samples[:,0] = sigmoid(samples[:,0])
samples[:,1] = sigmoid(samples[:,1],b=0.4)
samples[:,2] = exp(samples[:,2])
samples[:,3] = exp(samples[:,3])
for i in range(4):
print("Data %i"%i)
mean = samples[:,i].mean()
print(pymc3.stats.hpd(samples[:,i],0.3))
#names = [r"$x_0$",r"$t_s$",r"$q_0$",r"$\rho$"]
#datadict = dict([(names[i],samples[:,i]) for i in range(4)])
#dataframe = pd.DataFrame(datadict)
#
#lims = [(-0.2,1.2),
# (-0.2,0.5),
# (-0.1,21.0),
# (-0.1,2.1)]
#g = sns.PairGrid(dataframe)
#def set_lims_pairgrid(g,lims):
# shape = g.axes.shape
# for i in range(shape[0]):
# for j in range(shape[1]):
# if i == j:
# g.axes[i,j].set_xlim(lims[i])
# elif i > j:
# g.axes[i,j].set_xlim(lims[j])
# g.axes[i,j].set_ylim(lims[i])
## else:
## g.axes[i,j].set_xlim(lims[i])
## g.axes[i,j].set_ylim(lims[j])
#g.map_diag(sns.kdeplot)
#g.map_lower(sns.kdeplot, n_levels=10);
#set_lims_pairgrid(g,lims)
#for i, j in zip(*np.triu_indices_from(g.axes, 1)):
# g.axes[i, j].set_visible(False)
#g.savefig("../../tex/figs/sourceproblemhistogramsvb") | [
"torch.manual_seed",
"sys.path.insert",
"torch.load",
"numpy.exp",
"numpy.random.seed",
"pymc3.stats.hpd",
"numpy.load"
] | [((35, 66), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../src"""'], {}), "(0, '../../src')\n", (50, 66), False, 'import sys\n'), ((66, 107), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../credibleinterval"""'], {}), "(0, '../credibleinterval')\n", (81, 107), False, 'import sys\n'), ((611, 630), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (625, 630), True, 'import numpy as np\n'), ((631, 653), 'torch.manual_seed', 'torch.manual_seed', (['(100)'], {}), '(100)\n', (648, 653), False, 'import torch\n'), ((670, 723), 'numpy.load', 'np.load', (['"""testheat1b/tracking.npz"""'], {'allow_pickle': '(True)'}), "('testheat1b/tracking.npz', allow_pickle=True)\n", (677, 723), True, 'import numpy as np\n'), ((983, 1019), 'torch.load', 'torch.load', (["('testheat1b/mvn%i' % 100)"], {}), "('testheat1b/mvn%i' % 100)\n", (993, 1019), False, 'import torch\n'), ((600, 609), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (606, 609), True, 'import numpy as np\n'), ((1287, 1322), 'pymc3.stats.hpd', 'pymc3.stats.hpd', (['samples[:, i]', '(0.3)'], {}), '(samples[:, i], 0.3)\n', (1302, 1322), False, 'import pymc3\n'), ((561, 571), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (567, 571), True, 'import numpy as np\n')] |
import pytest
import numpy as np
from sklearn.datasets import load_iris, load_boston
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.svm import SVR
from alibi.confidence.model_linearity import linearity_measure, LinearityMeasure
from alibi.confidence.model_linearity import _linear_superposition, _sample_grid, _sample_knn
from functools import reduce
@pytest.mark.parametrize('input_shape', ((3,), (4, 4, 1)))
@pytest.mark.parametrize('nb_instances', (1, 10))
def test_linear_superposition(input_shape, nb_instances):
alphas = np.array([0.5, 0.5])
vecs_list = []
for i in range(nb_instances):
v0 = np.zeros((1,) + input_shape)
v1 = np.ones((1,) + input_shape)
vec = np.stack((v0, v1), axis=1)
vecs_list.append(vec)
vecs = reduce(lambda x, y: np.vstack((x, y)), vecs_list)
summ = _linear_superposition(alphas, vecs, input_shape)
assert summ.shape[0] == nb_instances
assert summ.shape[1:] == input_shape
assert (summ == 0.5).all()
@pytest.mark.parametrize('nb_instances', (1, 5))
@pytest.mark.parametrize('nb_samples', (2, 10))
def test_sample_knn(nb_instances, nb_samples):
iris = load_iris()
X_train = iris.data
input_shape = X_train.shape[1:]
x = np.ones((nb_instances, ) + input_shape)
X_samples = _sample_knn(x=x, X_train=X_train, nb_samples=nb_samples)
assert X_samples.shape[0] == nb_instances
assert X_samples.shape[1] == nb_samples
@pytest.mark.parametrize('nb_instances', (5, ))
@pytest.mark.parametrize('nb_samples', (3, ))
@pytest.mark.parametrize('input_shape', ((3,), (4, 4, 1)))
def test_sample_grid(nb_instances, nb_samples, input_shape):
x = np.ones((nb_instances, ) + input_shape)
nb_features = x.reshape(x.shape[0], -1).shape[1]
feature_range = np.array([[0, 1] for _ in range(nb_features)])
X_samples = _sample_grid(x, feature_range, nb_samples=nb_samples)
assert X_samples.shape[0] == nb_instances
assert X_samples.shape[1] == nb_samples
@pytest.mark.parametrize('method', ('knn', 'grid'))
@pytest.mark.parametrize('epsilon', (0.04,))
@pytest.mark.parametrize('res', (100,))
@pytest.mark.parametrize('nb_instances', (1, 10))
@pytest.mark.parametrize('agg', ('global', 'pairwise'))
def test_linearity_measure_class(method, epsilon, res, nb_instances, agg):
iris = load_iris()
X_train = iris.data
y_train = iris.target
x = X_train[0: nb_instances].reshape(nb_instances, -1)
lg = LogisticRegression()
lg.fit(X_train, y_train)
def predict_fn(x):
return lg.predict_proba(x)
lin = linearity_measure(predict_fn, x, method=method, epsilon=epsilon, X_train=X_train, res=res,
model_type='classifier', agg=agg)
assert lin.shape[0] == nb_instances, 'Checking shapes'
assert (lin >= 0).all(), 'Linearity measure must be >= 0'
feature_range = [[0, 1] for _ in range(X_train.shape[1])]
lin_2 = linearity_measure(predict_fn, x, method='grid', epsilon=epsilon, feature_range=feature_range,
res=res, model_type='classifier', agg=agg)
assert lin_2.shape[0] == nb_instances, 'Nb of linearity values returned different from number of instances'
assert (lin_2 >= 0).all(), 'Linearity measure must be >= 0'
@pytest.mark.parametrize('method', ('knn', 'grid'))
@pytest.mark.parametrize('epsilon', (0.04,))
@pytest.mark.parametrize('res', (100,))
@pytest.mark.parametrize('nb_instances', (1, 10))
@pytest.mark.parametrize('agg', ('global', 'pairwise'))
def test_linearity_measure_reg(method, epsilon, res, nb_instances, agg):
boston = load_boston()
X_train, y_train = boston.data, boston.target
x = X_train[0: nb_instances].reshape(nb_instances, -1)
lg = LinearRegression()
lg.fit(X_train, y_train)
svr = SVR(kernel='linear')
svr.fit(X_train, y_train)
def predict_fn_svr(x):
return svr.predict(x)
def predict_fn(x):
return lg.predict(x)
lin = linearity_measure(predict_fn, x, method=method, epsilon=epsilon, X_train=X_train, res=res,
model_type='regressor', agg=agg)
assert lin.shape[0] == nb_instances, 'Checking shapes'
assert (lin >= 0).all(), 'Linearity measure must be >= 0'
assert np.allclose(lin, np.zeros(lin.shape))
lin_svr = linearity_measure(predict_fn_svr, x, method=method, epsilon=epsilon, X_train=X_train,
res=res, model_type='regressor', agg=agg)
assert lin_svr.shape[0] == nb_instances, 'Checking shapes'
assert (lin_svr >= 0).all(), 'Linearity measure must be >= 0'
feature_range = [[0, 1] for _ in range(X_train.shape[1])]
lin_2 = linearity_measure(predict_fn, x, method='grid', epsilon=epsilon, feature_range=feature_range,
res=res, model_type='regressor', agg=agg)
assert lin_2.shape[0] == nb_instances, 'Checking shapes'
assert (lin_2 >= 0).all(), 'Linearity measure must be >= 0'
assert np.allclose(lin_2, np.zeros(lin_2.shape))
feature_range = [[0, 1] for _ in range(X_train.shape[1])]
lin_2_svr = linearity_measure(predict_fn_svr, x, method='grid', epsilon=epsilon,
feature_range=feature_range, res=res, model_type='regressor', agg=agg)
assert lin_2_svr.shape[0] == nb_instances, 'Checking shapes'
assert (lin_2_svr >= 0).all(), 'Linearity measure must be >= 0'
y_train_multi = np.stack((y_train, y_train), axis=1)
lg_multi = LinearRegression()
lg_multi.fit(X_train, y_train_multi)
def predict_fn_multi(x):
return lg_multi.predict(x)
lm_multi = LinearityMeasure(method=method, epsilon=epsilon, res=res, model_type='regressor', agg=agg)
lm_multi.fit(X_train)
lin_multi = lm_multi.score(predict_fn_multi, x)
assert lin_multi.shape[0] == nb_instances, 'Checking shapes'
assert (lin_multi >= 0).all(), 'Linearity measure must be >= 0'
assert np.allclose(lin_multi, np.zeros(lin_multi.shape))
@pytest.mark.parametrize('method', ('knn', 'grid'))
@pytest.mark.parametrize('epsilon', (0.04,))
@pytest.mark.parametrize('res', (100,))
@pytest.mark.parametrize('nb_instances', (1, 10))
@pytest.mark.parametrize('agg', ('global', 'pairwise'))
def test_LinearityMeasure_class(method, epsilon, res, nb_instances, agg):
iris = load_iris()
X_train = iris.data
y_train = iris.target
x = X_train[0: nb_instances].reshape(nb_instances, -1)
lg = LogisticRegression()
lg.fit(X_train, y_train)
def predict_fn(x):
return lg.predict_proba(x)
lm = LinearityMeasure(method=method, epsilon=epsilon, res=res, model_type='classifier', agg=agg)
lm.fit(X_train)
lin = lm.score(predict_fn, x)
assert lin.shape[0] == nb_instances, 'Checking shapes'
assert (lin >= 0).all(), 'Linearity measure must be >= 0'
@pytest.mark.parametrize('method', ('knn', 'grid'))
@pytest.mark.parametrize('epsilon', (0.04,))
@pytest.mark.parametrize('res', (100,))
@pytest.mark.parametrize('nb_instances', (1, 10))
@pytest.mark.parametrize('agg', ('global', 'pairwise'))
def test_LinearityMeasure_reg(method, epsilon, res, nb_instances, agg):
boston = load_boston()
X_train, y_train = boston.data, boston.target
x = X_train[0: nb_instances].reshape(nb_instances, -1)
lg = LinearRegression()
lg.fit(X_train, y_train)
def predict_fn(x):
return lg.predict(x)
y_train_multi = np.stack((y_train, y_train), axis=1)
lg_multi = LinearRegression()
lg_multi.fit(X_train, y_train_multi)
def predict_fn_multi(x):
return lg_multi.predict(x)
lm = LinearityMeasure(method=method, epsilon=epsilon, res=res, model_type='regressor', agg=agg)
lm.fit(X_train)
lin = lm.score(predict_fn, x)
assert lin.shape[0] == nb_instances, 'Checking shapes'
assert (lin >= 0).all(), 'Linearity measure must be >= 0'
assert np.allclose(lin, np.zeros(lin.shape))
lm_multi = LinearityMeasure(method=method, epsilon=epsilon, res=res, model_type='regressor', agg=agg)
lm_multi.fit(X_train)
lin_multi = lm_multi.score(predict_fn_multi, x)
assert lin_multi.shape[0] == nb_instances, 'Checking shapes'
assert (lin_multi >= 0).all(), 'Linearity measure must be >= 0'
assert np.allclose(lin_multi, np.zeros(lin_multi.shape))
| [
"sklearn.datasets.load_iris",
"alibi.confidence.model_linearity.LinearityMeasure",
"numpy.ones",
"sklearn.datasets.load_boston",
"alibi.confidence.model_linearity._linear_superposition",
"alibi.confidence.model_linearity._sample_knn",
"sklearn.linear_model.LogisticRegression",
"pytest.mark.parametrize... | [((390, 447), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_shape"""', '((3,), (4, 4, 1))'], {}), "('input_shape', ((3,), (4, 4, 1)))\n", (413, 447), False, 'import pytest\n'), ((449, 497), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nb_instances"""', '(1, 10)'], {}), "('nb_instances', (1, 10))\n", (472, 497), False, 'import pytest\n'), ((1037, 1084), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nb_instances"""', '(1, 5)'], {}), "('nb_instances', (1, 5))\n", (1060, 1084), False, 'import pytest\n'), ((1086, 1132), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nb_samples"""', '(2, 10)'], {}), "('nb_samples', (2, 10))\n", (1109, 1132), False, 'import pytest\n'), ((1480, 1525), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nb_instances"""', '(5,)'], {}), "('nb_instances', (5,))\n", (1503, 1525), False, 'import pytest\n'), ((1528, 1571), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nb_samples"""', '(3,)'], {}), "('nb_samples', (3,))\n", (1551, 1571), False, 'import pytest\n'), ((1574, 1631), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_shape"""', '((3,), (4, 4, 1))'], {}), "('input_shape', ((3,), (4, 4, 1)))\n", (1597, 1631), False, 'import pytest\n'), ((2027, 2077), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "('knn', 'grid')"], {}), "('method', ('knn', 'grid'))\n", (2050, 2077), False, 'import pytest\n'), ((2079, 2122), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""epsilon"""', '(0.04,)'], {}), "('epsilon', (0.04,))\n", (2102, 2122), False, 'import pytest\n'), ((2124, 2162), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""res"""', '(100,)'], {}), "('res', (100,))\n", (2147, 2162), False, 'import pytest\n'), ((2164, 2212), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nb_instances"""', '(1, 10)'], {}), "('nb_instances', (1, 10))\n", (2187, 2212), False, 'import pytest\n'), ((2214, 2268), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""agg"""', "('global', 'pairwise')"], {}), "('agg', ('global', 'pairwise'))\n", (2237, 2268), False, 'import pytest\n'), ((3302, 3352), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "('knn', 'grid')"], {}), "('method', ('knn', 'grid'))\n", (3325, 3352), False, 'import pytest\n'), ((3354, 3397), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""epsilon"""', '(0.04,)'], {}), "('epsilon', (0.04,))\n", (3377, 3397), False, 'import pytest\n'), ((3399, 3437), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""res"""', '(100,)'], {}), "('res', (100,))\n", (3422, 3437), False, 'import pytest\n'), ((3439, 3487), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nb_instances"""', '(1, 10)'], {}), "('nb_instances', (1, 10))\n", (3462, 3487), False, 'import pytest\n'), ((3489, 3543), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""agg"""', "('global', 'pairwise')"], {}), "('agg', ('global', 'pairwise'))\n", (3512, 3543), False, 'import pytest\n'), ((6006, 6056), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "('knn', 'grid')"], {}), "('method', ('knn', 'grid'))\n", (6029, 6056), False, 'import pytest\n'), ((6058, 6101), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""epsilon"""', '(0.04,)'], {}), "('epsilon', (0.04,))\n", (6081, 6101), False, 'import pytest\n'), ((6103, 6141), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""res"""', '(100,)'], {}), "('res', (100,))\n", (6126, 6141), False, 'import pytest\n'), ((6143, 6191), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nb_instances"""', '(1, 10)'], {}), "('nb_instances', (1, 10))\n", (6166, 6191), False, 'import pytest\n'), ((6193, 6247), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""agg"""', "('global', 'pairwise')"], {}), "('agg', ('global', 'pairwise'))\n", (6216, 6247), False, 'import pytest\n'), ((6854, 6904), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "('knn', 'grid')"], {}), "('method', ('knn', 'grid'))\n", (6877, 6904), False, 'import pytest\n'), ((6906, 6949), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""epsilon"""', '(0.04,)'], {}), "('epsilon', (0.04,))\n", (6929, 6949), False, 'import pytest\n'), ((6951, 6989), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""res"""', '(100,)'], {}), "('res', (100,))\n", (6974, 6989), False, 'import pytest\n'), ((6991, 7039), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nb_instances"""', '(1, 10)'], {}), "('nb_instances', (1, 10))\n", (7014, 7039), False, 'import pytest\n'), ((7041, 7095), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""agg"""', "('global', 'pairwise')"], {}), "('agg', ('global', 'pairwise'))\n", (7064, 7095), False, 'import pytest\n'), ((569, 589), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (577, 589), True, 'import numpy as np\n'), ((871, 919), 'alibi.confidence.model_linearity._linear_superposition', '_linear_superposition', (['alphas', 'vecs', 'input_shape'], {}), '(alphas, vecs, input_shape)\n', (892, 919), False, 'from alibi.confidence.model_linearity import _linear_superposition, _sample_grid, _sample_knn\n'), ((1192, 1203), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (1201, 1203), False, 'from sklearn.datasets import load_iris, load_boston\n'), ((1272, 1310), 'numpy.ones', 'np.ones', (['((nb_instances,) + input_shape)'], {}), '((nb_instances,) + input_shape)\n', (1279, 1310), True, 'import numpy as np\n'), ((1329, 1385), 'alibi.confidence.model_linearity._sample_knn', '_sample_knn', ([], {'x': 'x', 'X_train': 'X_train', 'nb_samples': 'nb_samples'}), '(x=x, X_train=X_train, nb_samples=nb_samples)\n', (1340, 1385), False, 'from alibi.confidence.model_linearity import _linear_superposition, _sample_grid, _sample_knn\n'), ((1702, 1740), 'numpy.ones', 'np.ones', (['((nb_instances,) + input_shape)'], {}), '((nb_instances,) + input_shape)\n', (1709, 1740), True, 'import numpy as np\n'), ((1879, 1932), 'alibi.confidence.model_linearity._sample_grid', '_sample_grid', (['x', 'feature_range'], {'nb_samples': 'nb_samples'}), '(x, feature_range, nb_samples=nb_samples)\n', (1891, 1932), False, 'from alibi.confidence.model_linearity import _linear_superposition, _sample_grid, _sample_knn\n'), ((2356, 2367), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (2365, 2367), False, 'from sklearn.datasets import load_iris, load_boston\n'), ((2487, 2507), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (2505, 2507), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((2607, 2736), 'alibi.confidence.model_linearity.linearity_measure', 'linearity_measure', (['predict_fn', 'x'], {'method': 'method', 'epsilon': 'epsilon', 'X_train': 'X_train', 'res': 'res', 'model_type': '"""classifier"""', 'agg': 'agg'}), "(predict_fn, x, method=method, epsilon=epsilon, X_train=\n X_train, res=res, model_type='classifier', agg=agg)\n", (2624, 2736), False, 'from alibi.confidence.model_linearity import linearity_measure, LinearityMeasure\n'), ((2956, 3096), 'alibi.confidence.model_linearity.linearity_measure', 'linearity_measure', (['predict_fn', 'x'], {'method': '"""grid"""', 'epsilon': 'epsilon', 'feature_range': 'feature_range', 'res': 'res', 'model_type': '"""classifier"""', 'agg': 'agg'}), "(predict_fn, x, method='grid', epsilon=epsilon,\n feature_range=feature_range, res=res, model_type='classifier', agg=agg)\n", (2973, 3096), False, 'from alibi.confidence.model_linearity import linearity_measure, LinearityMeasure\n'), ((3631, 3644), 'sklearn.datasets.load_boston', 'load_boston', ([], {}), '()\n', (3642, 3644), False, 'from sklearn.datasets import load_iris, load_boston\n'), ((3764, 3782), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (3780, 3782), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((3822, 3842), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (3825, 3842), False, 'from sklearn.svm import SVR\n'), ((3995, 4123), 'alibi.confidence.model_linearity.linearity_measure', 'linearity_measure', (['predict_fn', 'x'], {'method': 'method', 'epsilon': 'epsilon', 'X_train': 'X_train', 'res': 'res', 'model_type': '"""regressor"""', 'agg': 'agg'}), "(predict_fn, x, method=method, epsilon=epsilon, X_train=\n X_train, res=res, model_type='regressor', agg=agg)\n", (4012, 4123), False, 'from alibi.confidence.model_linearity import linearity_measure, LinearityMeasure\n'), ((4332, 4463), 'alibi.confidence.model_linearity.linearity_measure', 'linearity_measure', (['predict_fn_svr', 'x'], {'method': 'method', 'epsilon': 'epsilon', 'X_train': 'X_train', 'res': 'res', 'model_type': '"""regressor"""', 'agg': 'agg'}), "(predict_fn_svr, x, method=method, epsilon=epsilon,\n X_train=X_train, res=res, model_type='regressor', agg=agg)\n", (4349, 4463), False, 'from alibi.confidence.model_linearity import linearity_measure, LinearityMeasure\n'), ((4696, 4835), 'alibi.confidence.model_linearity.linearity_measure', 'linearity_measure', (['predict_fn', 'x'], {'method': '"""grid"""', 'epsilon': 'epsilon', 'feature_range': 'feature_range', 'res': 'res', 'model_type': '"""regressor"""', 'agg': 'agg'}), "(predict_fn, x, method='grid', epsilon=epsilon,\n feature_range=feature_range, res=res, model_type='regressor', agg=agg)\n", (4713, 4835), False, 'from alibi.confidence.model_linearity import linearity_measure, LinearityMeasure\n'), ((5119, 5262), 'alibi.confidence.model_linearity.linearity_measure', 'linearity_measure', (['predict_fn_svr', 'x'], {'method': '"""grid"""', 'epsilon': 'epsilon', 'feature_range': 'feature_range', 'res': 'res', 'model_type': '"""regressor"""', 'agg': 'agg'}), "(predict_fn_svr, x, method='grid', epsilon=epsilon,\n feature_range=feature_range, res=res, model_type='regressor', agg=agg)\n", (5136, 5262), False, 'from alibi.confidence.model_linearity import linearity_measure, LinearityMeasure\n'), ((5447, 5483), 'numpy.stack', 'np.stack', (['(y_train, y_train)'], {'axis': '(1)'}), '((y_train, y_train), axis=1)\n', (5455, 5483), True, 'import numpy as np\n'), ((5499, 5517), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (5515, 5517), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((5640, 5735), 'alibi.confidence.model_linearity.LinearityMeasure', 'LinearityMeasure', ([], {'method': 'method', 'epsilon': 'epsilon', 'res': 'res', 'model_type': '"""regressor"""', 'agg': 'agg'}), "(method=method, epsilon=epsilon, res=res, model_type=\n 'regressor', agg=agg)\n", (5656, 5735), False, 'from alibi.confidence.model_linearity import linearity_measure, LinearityMeasure\n'), ((6334, 6345), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (6343, 6345), False, 'from sklearn.datasets import load_iris, load_boston\n'), ((6465, 6485), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (6483, 6485), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((6584, 6680), 'alibi.confidence.model_linearity.LinearityMeasure', 'LinearityMeasure', ([], {'method': 'method', 'epsilon': 'epsilon', 'res': 'res', 'model_type': '"""classifier"""', 'agg': 'agg'}), "(method=method, epsilon=epsilon, res=res, model_type=\n 'classifier', agg=agg)\n", (6600, 6680), False, 'from alibi.confidence.model_linearity import linearity_measure, LinearityMeasure\n'), ((7182, 7195), 'sklearn.datasets.load_boston', 'load_boston', ([], {}), '()\n', (7193, 7195), False, 'from sklearn.datasets import load_iris, load_boston\n'), ((7315, 7333), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (7331, 7333), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((7437, 7473), 'numpy.stack', 'np.stack', (['(y_train, y_train)'], {'axis': '(1)'}), '((y_train, y_train), axis=1)\n', (7445, 7473), True, 'import numpy as np\n'), ((7489, 7507), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (7505, 7507), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression\n'), ((7624, 7719), 'alibi.confidence.model_linearity.LinearityMeasure', 'LinearityMeasure', ([], {'method': 'method', 'epsilon': 'epsilon', 'res': 'res', 'model_type': '"""regressor"""', 'agg': 'agg'}), "(method=method, epsilon=epsilon, res=res, model_type=\n 'regressor', agg=agg)\n", (7640, 7719), False, 'from alibi.confidence.model_linearity import linearity_measure, LinearityMeasure\n'), ((7955, 8050), 'alibi.confidence.model_linearity.LinearityMeasure', 'LinearityMeasure', ([], {'method': 'method', 'epsilon': 'epsilon', 'res': 'res', 'model_type': '"""regressor"""', 'agg': 'agg'}), "(method=method, epsilon=epsilon, res=res, model_type=\n 'regressor', agg=agg)\n", (7971, 8050), False, 'from alibi.confidence.model_linearity import linearity_measure, LinearityMeasure\n'), ((657, 685), 'numpy.zeros', 'np.zeros', (['((1,) + input_shape)'], {}), '((1,) + input_shape)\n', (665, 685), True, 'import numpy as np\n'), ((699, 726), 'numpy.ones', 'np.ones', (['((1,) + input_shape)'], {}), '((1,) + input_shape)\n', (706, 726), True, 'import numpy as np\n'), ((741, 767), 'numpy.stack', 'np.stack', (['(v0, v1)'], {'axis': '(1)'}), '((v0, v1), axis=1)\n', (749, 767), True, 'import numpy as np\n'), ((4296, 4315), 'numpy.zeros', 'np.zeros', (['lin.shape'], {}), '(lin.shape)\n', (4304, 4315), True, 'import numpy as np\n'), ((5017, 5038), 'numpy.zeros', 'np.zeros', (['lin_2.shape'], {}), '(lin_2.shape)\n', (5025, 5038), True, 'import numpy as np\n'), ((5976, 6001), 'numpy.zeros', 'np.zeros', (['lin_multi.shape'], {}), '(lin_multi.shape)\n', (5984, 6001), True, 'import numpy as np\n'), ((7918, 7937), 'numpy.zeros', 'np.zeros', (['lin.shape'], {}), '(lin.shape)\n', (7926, 7937), True, 'import numpy as np\n'), ((8291, 8316), 'numpy.zeros', 'np.zeros', (['lin_multi.shape'], {}), '(lin_multi.shape)\n', (8299, 8316), True, 'import numpy as np\n'), ((829, 846), 'numpy.vstack', 'np.vstack', (['(x, y)'], {}), '((x, y))\n', (838, 846), True, 'import numpy as np\n')] |
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
import numpy as np
import json
import os
import sys
import cv2
import copy
import paddlex.utils.logging as logging
# fix linspace problem for pycocotools while numpy > 1.17.2
backup_linspace = np.linspace
def fixed_linspace(start,
stop,
num=50,
endpoint=True,
retstep=False,
dtype=None,
axis=0):
num = int(num)
return backup_linspace(start, stop, num, endpoint, retstep, dtype, axis)
def eval_results(results,
metric,
coco_gt,
with_background=True,
resolution=None,
is_bbox_normalized=False,
map_type='11point'):
"""Evaluation for evaluation program results"""
box_ap_stats = []
coco_gt_data = copy.deepcopy(coco_gt)
eval_details = {'gt': copy.deepcopy(coco_gt.dataset)}
if metric == 'COCO':
np.linspace = fixed_linspace
if 'proposal' in results[0]:
proposal_eval(results, coco_gt_data)
if 'bbox' in results[0]:
box_ap_stats, xywh_results = coco_bbox_eval(
results,
coco_gt_data,
with_background,
is_bbox_normalized=is_bbox_normalized)
if 'mask' in results[0]:
mask_ap_stats, segm_results = mask_eval(results, coco_gt_data,
resolution)
ap_stats = [box_ap_stats, mask_ap_stats]
eval_details['bbox'] = xywh_results
eval_details['mask'] = segm_results
return ap_stats, eval_details
np.linspace = backup_linspace
else:
if 'accum_map' in results[-1]:
res = np.mean(results[-1]['accum_map'][0])
logging.debug('mAP: {:.2f}'.format(res * 100.))
box_ap_stats.append(res * 100.)
elif 'bbox' in results[0]:
box_ap, xywh_results = voc_bbox_eval(
results,
coco_gt_data,
with_background,
is_bbox_normalized=is_bbox_normalized,
map_type=map_type)
box_ap_stats.append(box_ap)
eval_details['bbox'] = xywh_results
return box_ap_stats, eval_details
def proposal_eval(results, coco_gt, outputfile, max_dets=(100, 300, 1000)):
assert 'proposal' in results[0]
assert outfile.endswith('.json')
xywh_results = proposal2out(results)
assert len(
xywh_results) > 0, "The number of valid proposal detected is zero.\n \
Please use reasonable model and check input data."
with open(outfile, 'w') as f:
json.dump(xywh_results, f)
cocoapi_eval(xywh_results, 'proposal', coco_gt=coco_gt, max_dets=max_dets)
# flush coco evaluation result
sys.stdout.flush()
def coco_bbox_eval(results,
coco_gt,
with_background=True,
is_bbox_normalized=False):
assert 'bbox' in results[0]
from pycocotools.coco import COCO
cat_ids = coco_gt.getCatIds()
# when with_background = True, mapping category to classid, like:
# background:0, first_class:1, second_class:2, ...
clsid2catid = dict(
{i + int(with_background): catid
for i, catid in enumerate(cat_ids)})
xywh_results = bbox2out(
results, clsid2catid, is_bbox_normalized=is_bbox_normalized)
results = copy.deepcopy(xywh_results)
if len(xywh_results) == 0:
logging.warning(
"The number of valid bbox detected is zero.\n Please use reasonable model and check input data.\n stop eval!"
)
return [0.0], results
map_stats = cocoapi_eval(xywh_results, 'bbox', coco_gt=coco_gt)
# flush coco evaluation result
sys.stdout.flush()
return map_stats, results
def loadRes(coco_obj, anns):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
from pycocotools.coco import COCO
import pycocotools.mask as maskUtils
import time
res = COCO()
res.dataset['images'] = [img for img in coco_obj.dataset['images']]
tic = time.time()
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(coco_obj.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set(
[ann['image_id'] for ann in anns])
res.dataset['images'] = [
img for img in res.dataset['images'] if img['id'] in imgIds
]
for id, ann in enumerate(anns):
ann['id'] = id + 1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(
coco_obj.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2] * bb[3]
ann['id'] = id + 1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(
coco_obj.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = maskUtils.area(ann['segmentation'])
if not 'bbox' in ann:
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
ann['id'] = id + 1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(
coco_obj.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0, x1, y0, y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1 - x0) * (y1 - y0)
ann['id'] = id + 1
ann['bbox'] = [x0, y0, x1 - x0, y1 - y0]
res.dataset['annotations'] = anns
res.createIndex()
return res
def mask_eval(results, coco_gt, resolution, thresh_binarize=0.5):
assert 'mask' in results[0]
from pycocotools.coco import COCO
clsid2catid = {i + 1: v for i, v in enumerate(coco_gt.getCatIds())}
segm_results = mask2out(results, clsid2catid, resolution, thresh_binarize)
results = copy.deepcopy(segm_results)
if len(segm_results) == 0:
logging.warning(
"The number of valid mask detected is zero.\n Please use reasonable model and check input data."
)
return None, results
map_stats = cocoapi_eval(segm_results, 'segm', coco_gt=coco_gt)
return map_stats, results
def cocoapi_eval(anns,
style,
coco_gt=None,
anno_file=None,
max_dets=(100, 300, 1000)):
"""
Args:
anns: Evaluation result.
style: COCOeval style, can be `bbox` , `segm` and `proposal`.
coco_gt: Whether to load COCOAPI through anno_file,
eg: coco_gt = COCO(anno_file)
anno_file: COCO annotations file.
max_dets: COCO evaluation maxDets.
"""
assert coco_gt != None or anno_file != None
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
if coco_gt == None:
coco_gt = COCO(anno_file)
logging.debug("Start evaluate...")
coco_dt = loadRes(coco_gt, anns)
if style == 'proposal':
coco_eval = COCOeval(coco_gt, coco_dt, 'bbox')
coco_eval.params.useCats = 0
coco_eval.params.maxDets = list(max_dets)
else:
coco_eval = COCOeval(coco_gt, coco_dt, style)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval.stats
def proposal2out(results, is_bbox_normalized=False):
xywh_res = []
for t in results:
bboxes = t['proposal'][0]
lengths = t['proposal'][1][0]
im_ids = np.array(t['im_id'][0]).flatten()
assert len(lengths) == im_ids.size
if bboxes.shape == (1, 1) or bboxes is None:
continue
k = 0
for i in range(len(lengths)):
num = lengths[i]
im_id = int(im_ids[i])
for j in range(num):
dt = bboxes[k]
xmin, ymin, xmax, ymax = dt.tolist()
if is_bbox_normalized:
xmin, ymin, xmax, ymax = \
clip_bbox([xmin, ymin, xmax, ymax])
w = xmax - xmin
h = ymax - ymin
else:
w = xmax - xmin + 1
h = ymax - ymin + 1
bbox = [xmin, ymin, w, h]
coco_res = {
'image_id': im_id,
'category_id': 1,
'bbox': bbox,
'score': 1.0
}
xywh_res.append(coco_res)
k += 1
return xywh_res
def bbox2out(results, clsid2catid, is_bbox_normalized=False):
"""
Args:
results: request a dict, should include: `bbox`, `im_id`,
if is_bbox_normalized=True, also need `im_shape`.
clsid2catid: class id to category id map of COCO2017 dataset.
is_bbox_normalized: whether or not bbox is normalized.
"""
xywh_res = []
for t in results:
bboxes = t['bbox'][0]
lengths = t['bbox'][1][0]
im_ids = np.array(t['im_id'][0]).flatten()
if bboxes.shape == (1, 1) or bboxes is None:
continue
k = 0
for i in range(len(lengths)):
num = lengths[i]
im_id = int(im_ids[i])
for j in range(num):
dt = bboxes[k]
clsid, score, xmin, ymin, xmax, ymax = dt.tolist()
catid = (clsid2catid[int(clsid)])
if is_bbox_normalized:
xmin, ymin, xmax, ymax = \
clip_bbox([xmin, ymin, xmax, ymax])
w = xmax - xmin
h = ymax - ymin
im_shape = t['im_shape'][0][i].tolist()
im_height, im_width = int(im_shape[0]), int(im_shape[1])
xmin *= im_width
ymin *= im_height
w *= im_width
h *= im_height
else:
w = xmax - xmin + 1
h = ymax - ymin + 1
bbox = [xmin, ymin, w, h]
coco_res = {
'image_id': im_id,
'category_id': catid,
'bbox': bbox,
'score': score
}
xywh_res.append(coco_res)
k += 1
return xywh_res
def mask2out(results, clsid2catid, resolution, thresh_binarize=0.5):
import pycocotools.mask as mask_util
scale = (resolution + 2.0) / resolution
segm_res = []
# for each batch
for t in results:
bboxes = t['bbox'][0]
lengths = t['bbox'][1][0]
im_ids = np.array(t['im_id'][0])
if bboxes.shape == (1, 1) or bboxes is None:
continue
if len(bboxes.tolist()) == 0:
continue
masks = t['mask'][0]
s = 0
# for each sample
for i in range(len(lengths)):
num = lengths[i]
im_id = int(im_ids[i][0])
im_shape = t['im_shape'][0][i]
bbox = bboxes[s:s + num][:, 2:]
clsid_scores = bboxes[s:s + num][:, 0:2]
mask = masks[s:s + num]
s += num
im_h = int(im_shape[0])
im_w = int(im_shape[1])
expand_bbox = expand_boxes(bbox, scale)
expand_bbox = expand_bbox.astype(np.int32)
padded_mask = np.zeros((resolution + 2, resolution + 2),
dtype=np.float32)
for j in range(num):
xmin, ymin, xmax, ymax = expand_bbox[j].tolist()
clsid, score = clsid_scores[j].tolist()
clsid = int(clsid)
padded_mask[1:-1, 1:-1] = mask[j, clsid, :, :]
catid = clsid2catid[clsid]
w = xmax - xmin + 1
h = ymax - ymin + 1
w = np.maximum(w, 1)
h = np.maximum(h, 1)
resized_mask = cv2.resize(padded_mask, (w, h))
resized_mask = np.array(
resized_mask > thresh_binarize, dtype=np.uint8)
im_mask = np.zeros((im_h, im_w), dtype=np.uint8)
x0 = min(max(xmin, 0), im_w)
x1 = min(max(xmax + 1, 0), im_w)
y0 = min(max(ymin, 0), im_h)
y1 = min(max(ymax + 1, 0), im_h)
im_mask[y0:y1, x0:x1] = resized_mask[(y0 - ymin):(y1 - ymin), (
x0 - xmin):(x1 - xmin)]
segm = mask_util.encode(
np.array(im_mask[:, :, np.newaxis], order='F'))[0]
catid = clsid2catid[clsid]
segm['counts'] = segm['counts'].decode('utf8')
coco_res = {
'image_id': im_id,
'category_id': catid,
'segmentation': segm,
'score': score
}
segm_res.append(coco_res)
return segm_res
def expand_boxes(boxes, scale):
"""
Expand an array of boxes by a given scale.
"""
w_half = (boxes[:, 2] - boxes[:, 0]) * .5
h_half = (boxes[:, 3] - boxes[:, 1]) * .5
x_c = (boxes[:, 2] + boxes[:, 0]) * .5
y_c = (boxes[:, 3] + boxes[:, 1]) * .5
w_half *= scale
h_half *= scale
boxes_exp = np.zeros(boxes.shape)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
def voc_bbox_eval(results,
coco_gt,
with_background=False,
overlap_thresh=0.5,
map_type='11point',
is_bbox_normalized=False,
evaluate_difficult=False):
"""
Bounding box evaluation for VOC dataset
Args:
results (list): prediction bounding box results.
class_num (int): evaluation class number.
overlap_thresh (float): the postive threshold of
bbox overlap
map_type (string): method for mAP calcualtion,
can only be '11point' or 'integral'
is_bbox_normalized (bool): whether bbox is normalized
to range [0, 1].
evaluate_difficult (bool): whether to evaluate
difficult gt bbox.
"""
assert 'bbox' in results[0]
logging.debug("Start evaluate...")
from pycocotools.coco import COCO
cat_ids = coco_gt.getCatIds()
# when with_background = True, mapping category to classid, like:
# background:0, first_class:1, second_class:2, ...
clsid2catid = dict(
{i + int(with_background): catid
for i, catid in enumerate(cat_ids)})
class_num = len(clsid2catid) + int(with_background)
detection_map = DetectionMAP(
class_num=class_num,
overlap_thresh=overlap_thresh,
map_type=map_type,
is_bbox_normalized=is_bbox_normalized,
evaluate_difficult=evaluate_difficult)
xywh_res = []
det_nums = 0
gt_nums = 0
for t in results:
bboxes = t['bbox'][0]
bbox_lengths = t['bbox'][1][0]
im_ids = np.array(t['im_id'][0]).flatten()
if bboxes.shape == (1, 1) or bboxes is None:
continue
gt_boxes = t['gt_box'][0]
gt_labels = t['gt_label'][0]
difficults = t['is_difficult'][0] if not evaluate_difficult \
else None
if len(t['gt_box'][1]) == 0:
# gt_box, gt_label, difficult read as zero padded Tensor
bbox_idx = 0
for i in range(len(gt_boxes)):
gt_box = gt_boxes[i]
gt_label = gt_labels[i]
difficult = None if difficults is None \
else difficults[i]
bbox_num = bbox_lengths[i]
bbox = bboxes[bbox_idx:bbox_idx + bbox_num]
gt_box, gt_label, difficult = prune_zero_padding(
gt_box, gt_label, difficult)
detection_map.update(bbox, gt_box, gt_label, difficult)
bbox_idx += bbox_num
det_nums += bbox_num
gt_nums += gt_box.shape[0]
im_id = int(im_ids[i])
for b in bbox:
clsid, score, xmin, ymin, xmax, ymax = b.tolist()
w = xmax - xmin + 1
h = ymax - ymin + 1
bbox = [xmin, ymin, w, h]
coco_res = {
'image_id': im_id,
'category_id': clsid2catid[clsid],
'bbox': bbox,
'score': score
}
xywh_res.append(coco_res)
else:
# gt_box, gt_label, difficult read as LoDTensor
gt_box_lengths = t['gt_box'][1][0]
bbox_idx = 0
gt_box_idx = 0
for i in range(len(bbox_lengths)):
bbox_num = bbox_lengths[i]
gt_box_num = gt_box_lengths[i]
bbox = bboxes[bbox_idx:bbox_idx + bbox_num]
gt_box = gt_boxes[gt_box_idx:gt_box_idx + gt_box_num]
gt_label = gt_labels[gt_box_idx:gt_box_idx + gt_box_num]
difficult = None if difficults is None else \
difficults[gt_box_idx: gt_box_idx + gt_box_num]
detection_map.update(bbox, gt_box, gt_label, difficult)
bbox_idx += bbox_num
gt_box_idx += gt_box_num
im_id = int(im_ids[i])
for b in bbox:
clsid, score, xmin, ymin, xmax, ymax = b.tolist()
w = xmax - xmin + 1
h = ymax - ymin + 1
bbox = [xmin, ymin, w, h]
coco_res = {
'image_id': im_id,
'category_id': clsid2catid[clsid],
'bbox': bbox,
'score': score
}
xywh_res.append(coco_res)
logging.debug("Accumulating evaluatation results...")
detection_map.accumulate()
map_stat = 100. * detection_map.get_map()
logging.debug("mAP({:.2f}, {}) = {:.2f}".format(overlap_thresh, map_type,
map_stat))
return map_stat, xywh_res
def prune_zero_padding(gt_box, gt_label, difficult=None):
valid_cnt = 0
for i in range(len(gt_box)):
if gt_box[i, 0] == 0 and gt_box[i, 1] == 0 and \
gt_box[i, 2] == 0 and gt_box[i, 3] == 0:
break
valid_cnt += 1
return (gt_box[:valid_cnt], gt_label[:valid_cnt],
difficult[:valid_cnt] if difficult is not None else None)
def bbox_area(bbox, is_bbox_normalized):
"""
Calculate area of a bounding box
"""
norm = 1. - float(is_bbox_normalized)
width = bbox[2] - bbox[0] + norm
height = bbox[3] - bbox[1] + norm
return width * height
def jaccard_overlap(pred, gt, is_bbox_normalized=False):
"""
Calculate jaccard overlap ratio between two bounding box
"""
if pred[0] >= gt[2] or pred[2] <= gt[0] or \
pred[1] >= gt[3] or pred[3] <= gt[1]:
return 0.
inter_xmin = max(pred[0], gt[0])
inter_ymin = max(pred[1], gt[1])
inter_xmax = min(pred[2], gt[2])
inter_ymax = min(pred[3], gt[3])
inter_size = bbox_area([inter_xmin, inter_ymin, inter_xmax, inter_ymax],
is_bbox_normalized)
pred_size = bbox_area(pred, is_bbox_normalized)
gt_size = bbox_area(gt, is_bbox_normalized)
overlap = float(inter_size) / (pred_size + gt_size - inter_size)
return overlap
class DetectionMAP(object):
"""
Calculate detection mean average precision.
Currently support two types: 11point and integral
Args:
class_num (int): the class number.
overlap_thresh (float): The threshold of overlap
ratio between prediction bounding box and
ground truth bounding box for deciding
true/false positive. Default 0.5.
map_type (str): calculation method of mean average
precision, currently support '11point' and
'integral'. Default '11point'.
is_bbox_normalized (bool): whther bounding boxes
is normalized to range[0, 1]. Default False.
evaluate_difficult (bool): whether to evaluate
difficult bounding boxes. Default False.
"""
def __init__(self,
class_num,
overlap_thresh=0.5,
map_type='11point',
is_bbox_normalized=False,
evaluate_difficult=False):
self.class_num = class_num
self.overlap_thresh = overlap_thresh
assert map_type in ['11point', 'integral'], \
"map_type currently only support '11point' "\
"and 'integral'"
self.map_type = map_type
self.is_bbox_normalized = is_bbox_normalized
self.evaluate_difficult = evaluate_difficult
self.reset()
def update(self, bbox, gt_box, gt_label, difficult=None):
"""
Update metric statics from given prediction and ground
truth infomations.
"""
if difficult is None:
difficult = np.zeros_like(gt_label)
# record class gt count
for gtl, diff in zip(gt_label, difficult):
if self.evaluate_difficult or int(diff) == 0:
self.class_gt_counts[int(np.array(gtl))] += 1
# record class score positive
visited = [False] * len(gt_label)
for b in bbox:
label, score, xmin, ymin, xmax, ymax = b.tolist()
pred = [xmin, ymin, xmax, ymax]
max_idx = -1
max_overlap = -1.0
for i, gl in enumerate(gt_label):
if int(gl) == int(label):
overlap = jaccard_overlap(pred, gt_box[i],
self.is_bbox_normalized)
if overlap > max_overlap:
max_overlap = overlap
max_idx = i
if max_overlap > self.overlap_thresh:
if self.evaluate_difficult or \
int(np.array(difficult[max_idx])) == 0:
if not visited[max_idx]:
self.class_score_poss[int(label)].append([score, 1.0])
visited[max_idx] = True
else:
self.class_score_poss[int(label)].append([score, 0.0])
else:
self.class_score_poss[int(label)].append([score, 0.0])
def reset(self):
"""
Reset metric statics
"""
self.class_score_poss = [[] for _ in range(self.class_num)]
self.class_gt_counts = [0] * self.class_num
self.mAP = None
self.APs = [None] * self.class_num
def accumulate(self):
"""
Accumulate metric results and calculate mAP
"""
mAP = 0.
valid_cnt = 0
for id, (score_pos, count) in enumerate(
zip(self.class_score_poss, self.class_gt_counts)):
if count == 0: continue
if len(score_pos) == 0:
valid_cnt += 1
continue
accum_tp_list, accum_fp_list = \
self._get_tp_fp_accum(score_pos)
precision = []
recall = []
for ac_tp, ac_fp in zip(accum_tp_list, accum_fp_list):
precision.append(float(ac_tp) / (ac_tp + ac_fp))
recall.append(float(ac_tp) / count)
if self.map_type == '11point':
max_precisions = [0.] * 11
start_idx = len(precision) - 1
for j in range(10, -1, -1):
for i in range(start_idx, -1, -1):
if recall[i] < float(j) / 10.:
start_idx = i
if j > 0:
max_precisions[j - 1] = max_precisions[j]
break
else:
if max_precisions[j] < precision[i]:
max_precisions[j] = precision[i]
mAP += sum(max_precisions) / 11.
self.APs[id] = sum(max_precisions) / 11.
valid_cnt += 1
elif self.map_type == 'integral':
import math
ap = 0.
prev_recall = 0.
for i in range(len(precision)):
recall_gap = math.fabs(recall[i] - prev_recall)
if recall_gap > 1e-6:
ap += precision[i] * recall_gap
prev_recall = recall[i]
mAP += ap
self.APs[id] = sum(max_precisions) / 11.
valid_cnt += 1
else:
raise Exception("Unspported mAP type {}".format(self.map_type))
self.mAP = mAP / float(valid_cnt) if valid_cnt > 0 else mAP
def get_map(self):
"""
Get mAP result
"""
if self.mAP is None:
raise Exception("mAP is not calculated.")
return self.mAP
def _get_tp_fp_accum(self, score_pos_list):
"""
Calculate accumulating true/false positive results from
[score, pos] records
"""
sorted_list = sorted(score_pos_list, key=lambda s: s[0], reverse=True)
accum_tp = 0
accum_fp = 0
accum_tp_list = []
accum_fp_list = []
for (score, pos) in sorted_list:
accum_tp += int(pos)
accum_tp_list.append(accum_tp)
accum_fp += 1 - int(pos)
accum_fp_list.append(accum_fp)
return accum_tp_list, accum_fp_list
| [
"pycocotools.mask.toBbox",
"pycocotools.cocoeval.COCOeval",
"numpy.array",
"copy.deepcopy",
"paddlex.utils.logging.debug",
"numpy.mean",
"pycocotools.coco.COCO",
"numpy.max",
"math.fabs",
"numpy.min",
"numpy.maximum",
"sys.stdout.flush",
"pycocotools.mask.area",
"cv2.resize",
"time.time"... | [((1475, 1497), 'copy.deepcopy', 'copy.deepcopy', (['coco_gt'], {}), '(coco_gt)\n', (1488, 1497), False, 'import copy\n'), ((3464, 3482), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3480, 3482), False, 'import sys\n'), ((4086, 4113), 'copy.deepcopy', 'copy.deepcopy', (['xywh_results'], {}), '(xywh_results)\n', (4099, 4113), False, 'import copy\n'), ((4440, 4458), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4456, 4458), False, 'import sys\n'), ((4803, 4809), 'pycocotools.coco.COCO', 'COCO', ([], {}), '()\n', (4807, 4809), False, 'from pycocotools.coco import COCO\n'), ((4893, 4904), 'time.time', 'time.time', ([], {}), '()\n', (4902, 4904), False, 'import time\n'), ((7320, 7347), 'copy.deepcopy', 'copy.deepcopy', (['segm_results'], {}), '(segm_results)\n', (7333, 7347), False, 'import copy\n'), ((8325, 8359), 'paddlex.utils.logging.debug', 'logging.debug', (['"""Start evaluate..."""'], {}), "('Start evaluate...')\n", (8338, 8359), True, 'import paddlex.utils.logging as logging\n'), ((14713, 14734), 'numpy.zeros', 'np.zeros', (['boxes.shape'], {}), '(boxes.shape)\n', (14721, 14734), True, 'import numpy as np\n'), ((15784, 15818), 'paddlex.utils.logging.debug', 'logging.debug', (['"""Start evaluate..."""'], {}), "('Start evaluate...')\n", (15797, 15818), True, 'import paddlex.utils.logging as logging\n'), ((19525, 19578), 'paddlex.utils.logging.debug', 'logging.debug', (['"""Accumulating evaluatation results..."""'], {}), "('Accumulating evaluatation results...')\n", (19538, 19578), True, 'import paddlex.utils.logging as logging\n'), ((1524, 1554), 'copy.deepcopy', 'copy.deepcopy', (['coco_gt.dataset'], {}), '(coco_gt.dataset)\n', (1537, 1554), False, 'import copy\n'), ((3318, 3344), 'json.dump', 'json.dump', (['xywh_results', 'f'], {}), '(xywh_results, f)\n', (3327, 3344), False, 'import json\n'), ((4153, 4291), 'paddlex.utils.logging.warning', 'logging.warning', (['"""The number of valid bbox detected is zero.\n Please use reasonable model and check input data.\n stop eval!"""'], {}), '(\n """The number of valid bbox detected is zero.\n Please use reasonable model and check input data.\n stop eval!"""\n )\n', (4168, 4291), True, 'import paddlex.utils.logging as logging\n'), ((7387, 7513), 'paddlex.utils.logging.warning', 'logging.warning', (['"""The number of valid mask detected is zero.\n Please use reasonable model and check input data."""'], {}), '(\n """The number of valid mask detected is zero.\n Please use reasonable model and check input data."""\n )\n', (7402, 7513), True, 'import paddlex.utils.logging as logging\n'), ((8305, 8320), 'pycocotools.coco.COCO', 'COCO', (['anno_file'], {}), '(anno_file)\n', (8309, 8320), False, 'from pycocotools.coco import COCO\n'), ((8445, 8479), 'pycocotools.cocoeval.COCOeval', 'COCOeval', (['coco_gt', 'coco_dt', '"""bbox"""'], {}), "(coco_gt, coco_dt, 'bbox')\n", (8453, 8479), False, 'from pycocotools.cocoeval import COCOeval\n'), ((8597, 8630), 'pycocotools.cocoeval.COCOeval', 'COCOeval', (['coco_gt', 'coco_dt', 'style'], {}), '(coco_gt, coco_dt, style)\n', (8605, 8630), False, 'from pycocotools.cocoeval import COCOeval\n'), ((12064, 12087), 'numpy.array', 'np.array', (["t['im_id'][0]"], {}), "(t['im_id'][0])\n", (12072, 12087), True, 'import numpy as np\n'), ((2406, 2442), 'numpy.mean', 'np.mean', (["results[-1]['accum_map'][0]"], {}), "(results[-1]['accum_map'][0])\n", (2413, 2442), True, 'import numpy as np\n'), ((5593, 5638), 'copy.deepcopy', 'copy.deepcopy', (["coco_obj.dataset['categories']"], {}), "(coco_obj.dataset['categories'])\n", (5606, 5638), False, 'import copy\n'), ((12803, 12863), 'numpy.zeros', 'np.zeros', (['(resolution + 2, resolution + 2)'], {'dtype': 'np.float32'}), '((resolution + 2, resolution + 2), dtype=np.float32)\n', (12811, 12863), True, 'import numpy as np\n'), ((22787, 22810), 'numpy.zeros_like', 'np.zeros_like', (['gt_label'], {}), '(gt_label)\n', (22800, 22810), True, 'import numpy as np\n'), ((6084, 6129), 'copy.deepcopy', 'copy.deepcopy', (["coco_obj.dataset['categories']"], {}), "(coco_obj.dataset['categories'])\n", (6097, 6129), False, 'import copy\n'), ((8920, 8943), 'numpy.array', 'np.array', (["t['im_id'][0]"], {}), "(t['im_id'][0])\n", (8928, 8943), True, 'import numpy as np\n'), ((10426, 10449), 'numpy.array', 'np.array', (["t['im_id'][0]"], {}), "(t['im_id'][0])\n", (10434, 10449), True, 'import numpy as np\n'), ((13289, 13305), 'numpy.maximum', 'np.maximum', (['w', '(1)'], {}), '(w, 1)\n', (13299, 13305), True, 'import numpy as np\n'), ((13326, 13342), 'numpy.maximum', 'np.maximum', (['h', '(1)'], {}), '(h, 1)\n', (13336, 13342), True, 'import numpy as np\n'), ((13375, 13406), 'cv2.resize', 'cv2.resize', (['padded_mask', '(w, h)'], {}), '(padded_mask, (w, h))\n', (13385, 13406), False, 'import cv2\n'), ((13438, 13494), 'numpy.array', 'np.array', (['(resized_mask > thresh_binarize)'], {'dtype': 'np.uint8'}), '(resized_mask > thresh_binarize, dtype=np.uint8)\n', (13446, 13494), True, 'import numpy as np\n'), ((13542, 13580), 'numpy.zeros', 'np.zeros', (['(im_h, im_w)'], {'dtype': 'np.uint8'}), '((im_h, im_w), dtype=np.uint8)\n', (13550, 13580), True, 'import numpy as np\n'), ((16570, 16593), 'numpy.array', 'np.array', (["t['im_id'][0]"], {}), "(t['im_id'][0])\n", (16578, 16593), True, 'import numpy as np\n'), ((6286, 6321), 'pycocotools.mask.area', 'maskUtils.area', (["ann['segmentation']"], {}), "(ann['segmentation'])\n", (6300, 6321), True, 'import pycocotools.mask as maskUtils\n'), ((6555, 6600), 'copy.deepcopy', 'copy.deepcopy', (["coco_obj.dataset['categories']"], {}), "(coco_obj.dataset['categories'])\n", (6568, 6600), False, 'import copy\n'), ((6386, 6423), 'pycocotools.mask.toBbox', 'maskUtils.toBbox', (["ann['segmentation']"], {}), "(ann['segmentation'])\n", (6402, 6423), True, 'import pycocotools.mask as maskUtils\n'), ((13956, 14002), 'numpy.array', 'np.array', (['im_mask[:, :, np.newaxis]'], {'order': '"""F"""'}), "(im_mask[:, :, np.newaxis], order='F')\n", (13964, 14002), True, 'import numpy as np\n'), ((22994, 23007), 'numpy.array', 'np.array', (['gtl'], {}), '(gtl)\n', (23002, 23007), True, 'import numpy as np\n'), ((26131, 26165), 'math.fabs', 'math.fabs', (['(recall[i] - prev_recall)'], {}), '(recall[i] - prev_recall)\n', (26140, 26165), False, 'import math\n'), ((6764, 6773), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (6770, 6773), True, 'import numpy as np\n'), ((6775, 6784), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (6781, 6784), True, 'import numpy as np\n'), ((6786, 6795), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (6792, 6795), True, 'import numpy as np\n'), ((6797, 6806), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (6803, 6806), True, 'import numpy as np\n'), ((23758, 23786), 'numpy.array', 'np.array', (['difficult[max_idx]'], {}), '(difficult[max_idx])\n', (23766, 23786), True, 'import numpy as np\n')] |
import numpy as np
class HMC():
def __init__(self, log_prob, grad_log_prob, invmetric_diag=None):
self.log_prob, self.grad_log_prob = log_prob, grad_log_prob
self.V = lambda x : self.log_prob(x)*-1.
#self.V_g = lambda x : self.grad_log_prob(x)*-1.
self.leapcount, self.Vgcount, self.Hcount = 0, 0, 0
if invmetric_diag is None: self.invmetric_diag = 1.
else: self.invmetric_diag = invmetric_diag
self.metricstd = self.invmetric_diag**-0.5
self.KE = lambda p: 0.5*(p**2 * self.invmetric_diag).sum()
self.KE_g = lambda p: p * self.invmetric_diag
def V_g(self, x):
self.Vgcount += 1
return self.grad_log_prob(x)*-1.
def unit_norm_KE(self, p):
return 0.5 * (p**2).sum()
def unit_norm_KE_g(self, p):
return p
def H(self, q,p):
self.Hcount += 1
return self.V(q) + self.KE(p)
def leapfrog(self, q, p, N, step_size):
self.leapcount += 1
q0, p0 = q, p
try:
p = p - 0.5*step_size * self.V_g(q)
for i in range(N-1):
q = q + step_size * self.KE_g(p)
p = p - step_size * self.V_g(q)
q = q + step_size * self.KE_g(p)
p = p - 0.5*step_size * self.V_g(q)
return q, p
except Exception as e:
print(e)
return q0, p0
def leapfrog1(self, q, p, step_size, Vgq=None): #This needs to be optimized to not estimate V_g again and again
self.leapcount += 1
q0, p0 = q, p
try:
if Vgq is None: Vgq = self.V_g(q)
p = p - 0.5*step_size * Vgq
q = q + step_size * self.KE_g(p)
p = p - 0.5*step_size * self.V_g(q)
return q, p, Vgq
except Exception as e:
print(e)
return q0, p0, Vgq
def metropolis(self, qp0, qp1):
q0, p0 = qp0
q1, p1 = qp1
H0 = self.H(q0, p0)
H1 = self.H(q1, p1)
prob = np.exp(H0 - H1)
#prob = min(1., np.exp(H0 - H1))
if np.isnan(prob) or np.isinf(prob) or (q0-q1).sum()==0:
return q0, p0, 2., [H0, H1]
elif np.random.uniform(0., 1., size=1) > min(1., prob):
return q0, p0, 0., [H0, H1]
else: return q1, p1, 1., [H0, H1]
def hmc_step(self, q, N, step_size):
'''Single hmc iteration
Parameters:
----------
q: initial position
N: number of leapfrog steps
step_size: step size for leapfrog iteration
Returns:
--------
A tuple of-
q
p
accepted (0/1/2)
acceptance probability
list of [Hcounts, Vcounts, nleapfrogs]
'''
self.leapcount, self.Vgcount, self.Hcount = 0, 0, 0
p = np.random.normal(size=q.size).reshape(q.shape) * self.metricstd
q1, p1 = self.leapfrog(q, p, N, step_size)
q, p, accepted, prob = self.metropolis([q, p], [q1, p1])
return q, p, accepted, prob, [self.Hcount, self.Vgcount, self.leapcount]
######################
class AdHMC_eps0(HMC):
def __init__(self, log_prob, grad_log_prob, invmetric_diag=None):
super().__init__(log_prob, grad_log_prob, invmetric_diag)
def get_stepsize(self, q0, p0, smin=0.01, smax=1.0, ntry=20, logspace=True, nsteps=1, eps=None):
H0 = self.H(q0, p0)
Hs = np.zeros(ntry)
if logspace: steps = np.logspace(np.log10(smin), np.log10(smax), ntry)
else: steps = np.linspace(smin, smax, ntry)
pwts = steps.copy()**0.5 #np.linspace(0.9, 1.1, steps.size)
for iss, ss in enumerate(steps):
#nsteps = int(steps.max()/ss)+1
q1, p1 = self.leapfrog(q0, p0, nsteps, ss)
Hs[iss] = self.H(q1, p1)
pp = np.exp(H0 - Hs) * pwts
pp[np.isnan(pp)] = 0
pp[np.isinf(pp)] = 0
pp /= pp.sum()
cdf = np.cumsum(pp)
if eps is None:
sx = np.random.uniform(low=cdf.min())
isx = np.where(sx > cdf)[0][-1]
sx2 = np.random.uniform(steps[isx], steps[isx+1])
prob = pp[isx+1] # * 1/(steps[isx+1]-steps[isx+1])
return sx2, pp[isx+1]
else:
prob = pp[np.where(steps > eps)[0][0]]
return prob
def hmc_step(self, q0, Nleap, smin=0.01, smax=1.0, Tint=0, ntry=10, nsteps=1):
'''Single hmc iteration
Parameters:
----------
q: initial position
N: number of leapfrog steps
step_size: step size for leapfrog iteration
smin: Minimum allowed step size
smin: Maximum allowed step size
Tint: Time of integration
ntry: Number of points to try for estimating first step size
nsteps: Number of steps per try for estimating first step size
Returns:
--------
A tuple of-
q
p
accepted (0/1/2)
acceptance probability
array of [pfactor denominator, pfactor numberator, stepsize]
list of [Hcounts, Vcounts, nleapfrogs]
'''
self.leapcount, self.Vgcount, self.Hcount = 0, 0, 0
p0 = np.random.normal(size=q0.size).reshape(q0.shape) * self.metricstd
H0 = self.H(q0, p0)
if (Tint == 0) and (Nleap == 0):
print("Tint and Nleap cannot be both zeros")
import sys
sys.exit()
elif (Tint != 0) and (Nleap != 0):
print("Tint and Nleap both given and are inconsistent")
import sys
sys.exit()
#First step is drawn from a distribution
ss, pf_den = self.get_stepsize(q0, p0, smin, smax, ntry=ntry, nsteps=nsteps)
eps = ss
if Tint == 0: N = Nleap
else: N = int(Tint/eps) + 1
#print("Steps size is %0.2f, and number of steps is %d"%(eps, N))
q1, p1 = self.leapfrog(q0, p0, N, ss)
H1 = self.H(q1, p1)
pb_num = self.get_stepsize(q1, -p1, smin=smin, smax=smax, eps=ss, ntry=ntry, nsteps=nsteps)
hastings_factor = pb_num/pf_den
prob = np.exp(H0 - H1) * hastings_factor
#print("prb, fac, metrop : ", prob, adfac, prob/adfac, pb_num, pf_den)
toret = [[prob, prob/hastings_factor, hastings_factor], np.stack([pf_den, pb_num, eps]), [self.Hcount, self.Vgcount, self.leapcount]]
if np.isnan(prob) or np.isinf(prob) or (q0-q1).sum()==0:
return q0, p0, 2., *toret
elif np.random.uniform(0., 1., size=1) > min(1., prob):
return q0, p0, 0., *toret
else: return q1, p1, 1., *toret
##
######################
class AdHMC(HMC):
def __init__(self, log_prob, grad_log_prob, invmetric_diag=None):
super().__init__(log_prob, grad_log_prob, invmetric_diag)
def get_stepsize(self, q0, p0, smin=0.01, smax=1.0, ntry=10, logspace=True, nsteps=1, eps=None):
H0 = self.H(q0, p0)
Hs = np.zeros(ntry)
if logspace: steps = np.logspace(np.log10(smin), np.log10(smax), ntry)
else: steps = np.linspace(smin, smax, ntry)
pwts = steps.copy()**0.5 #np.linspace(0.9, 1.1, steps.size)
for iss, ss in enumerate(steps):
#nsteps = int(steps.max()/ss)+1
q1, p1 = self.leapfrog(q0, p0, nsteps, ss)
Hs[iss] = self.H(q1, p1)
pp = np.exp(H0 - Hs) * pwts
pp[np.isnan(pp)] = 0
pp[np.isinf(pp)] = 0
pp /= pp.sum()
cdf = np.cumsum(pp)
if eps is None:
sx = np.random.uniform(low=cdf.min())
isx = np.where(sx > cdf)[0][-1]
sx2 = np.random.uniform(steps[isx], steps[isx+1])
prob = pp[isx+1] # * 1/(steps[isx+1]-steps[isx+1])
return sx2, pp[isx+1]
else:
prob = pp[np.where(steps > eps)[0][0]]
return prob
def hmc_step(self, q0, Nleap=100, nleap=10, ratios= [1/np.sqrt(2), np.sqrt(2)], pwts0 = [1., 1.], smin=0.01, smax=1.0, ntry_eps0=10, nsteps_eps0=1, logeps=True, verbose=False):
'''
Parameters:
----------
q: initial position
Nleap: number of leapfrog steps
nleap: number of leapfrog steps to adapt step size
smin: Minimum allowed step size
smin: Maximum allowed step size
ratios: ratio to change step size with after nleap steps- expected in INCREASING order
ntry_eps0: Number of points to try for estimating first step size
nsteps_eps0: Number of steps per try for estimating first step size
Returns:
--------
A tuple of-
q
p
accepted (0/1/2)
list of probabiliies [acc_prob, acc_prob/hastings_factor, hastings_factor]
array of checks [pfactor denominator, pfactor numberator, stepsize]
list of counts [Hcounts, Vcounts, nleapfrogs]
'''
#normprob is not implemented
self.leapcount, self.Vgcount, self.Hcount = 0, 0, 0
p0 = np.random.normal(size=q0.size).reshape(q0.shape) * self.metricstd
N = int(Nleap//nleap)
#First step is drawn from a distribution
eps, pf_den, pb_num = np.zeros(N), np.zeros(N), np.zeros(N)
#pwts0 = 1. #np.array([0.9, 1.0, 1.1])
nr = len(ratios)
H0 = self.H(q0, p0)
#First step is drawn from a distribution
ss, pf_den[0] = self.get_stepsize(q0, p0, smin, smax, ntry=ntry_eps0, nsteps=nsteps_eps0, logspace=logeps)
eps[0] = ss
q1, p1 = self.leapfrog(q0, p0, nleap, ss)
H1 = self.H(q1, p1)
Hprev = H0
sigma = np.log(0.5)/2.
for i in range(N - 1):
#q1, p1, H1 is the current position.
#ss is the current step size i.e. the last taken
##Forward
pf, pb = np.zeros(nr), np.zeros(nr)
qs, ps, Hs = [], [], []
for j in range(nr):
ss2 = ss*ratios[j]
q, p= self.leapfrog(q1, p1, nleap, ss2)
qs.append(q)
ps.append(p)
Hs.append(self.H(q, p))
pH = np.exp(H1 - Hs[-1])
if np.isnan(pH) or np.isinf(pH): pH = 0
pf[j] = pH
pwts = np.ones(nr) * pwts0
if smin > ss*ratios[0]: pwts[0] = 0
if smax < ss*ratios[-1]: pwts[-1] = 0
pf *= pwts
pfraw = pf.copy()
pf /= pf.sum()
if np.isnan(pf.sum()) or np.isinf(pf.sum()):
if verbose:
print("Something blew up so returning initial position")
print(pfraw, pwts, pf, Hs, ss)
return q0, p0, 100+i, [np.NaN, np.NaN, np.NaN], np.stack([pf_den, pb_num, eps]), [self.Hcount, self.Vgcount, self.leapcount]
#Select a step size to carry forth
pind = np.random.choice(nr, p=pf)
ssnew = ss*ratios[pind]
q2, p2, H2 = qs[pind], ps[pind], Hs[pind]
pf_den[i+1] = pf[pind]
##Reverse
#step from q1, p1 if we arrive here with ssnew step size in reverse direction
Hsb = []
for j in range(nr):
if np.allclose(ssnew*ratios[j] , ss):
Hsb.append(Hprev)
pbind = j
else:
ss2 = ssnew*ratios[j]
q, p = self.leapfrog(q1, -p1, nleap, ss2)
Hsb.append(self.H(q, p))
pH = np.exp(H1 - Hsb[-1])
if np.isnan(pH) or np.isinf(pH): pH = 0
pb[j] = pH
pwts = np.ones(nr) *pwts0
if smin > ssnew*ratios[0]: pwts[0] = 0
if smax < ssnew*ratios[-1]: pwts[-1] = 0
pb *= pwts
pb /= pb.sum()
pb_num[i] = pb[pbind]
#setup for next step
eps[i+1] = ssnew
ss = ssnew
Hprev = H1
q1, p1, H1 = q2, p2, H2
#print(pf, pb, pf[pind], pb[pbind])
#print(ss)
#print('started and ended with step sizes ', eps[0], eps[i+1])
#print('Number of steps taken is %d out of %d'%(i, N))
if (ssnew > smin) and (ssnew < smax):
#pb_num[-1] = self.get_stepsize(q2, -p2, smin=smin, smax=smax, eps=ssnew, ntry=ntry, nsteps=nsteps)
pb_num[i+1] = self.get_stepsize(q2, -p2, smin=smin, smax=smax, eps=ssnew, ntry=ntry_eps0, nsteps=nsteps_eps0, logspace=logeps)
hastings_factor = np.prod(pb_num[:i+2])/np.prod(pf_den[:i+2])
prob = np.exp(H0 - H2) * hastings_factor
if verbose: print("prb, fac, metrop : ", prob, hastings_factor, prob/hastings_factor, pb_num[-1], pf_den[0])
#Return
toret = [[prob, prob/hastings_factor, hastings_factor], np.stack([pf_den, pb_num, eps]), [self.Hcount, self.Vgcount, self.leapcount]]
if np.isnan(prob) or np.isinf(prob) or (q0-q1).sum()==0:
return q0, p0, 2., *toret
elif np.random.uniform(0., 1., size=1) > min(1., prob):
return q0, p0, 0., *toret
else:
return q2, p2, 1., *toret
##
######################
######################
class AdHMC_May(HMC):
##DO NOT DELETE. HAS normprob and other methods implemented that need to be moved above
def __init__(self, log_prob, grad_log_prob, invmetric_diag=None):
super.__init__(log_prob, grad_log_prob, invmetric_diag)
def get_stepsize(self, q0, p0, smin=0.01, smax=1.0, ntry=20, logspace=True, nsteps=1, eps=None):
H0 = self.H(q0, p0)
Hs = np.zeros(ntry)
if logspace: steps = np.logspace(np.log10(smin), np.log10(smax), ntry)
else: steps = np.linspace(smin, smax, ntry)
pwts = steps.copy()**0.5 #np.linspace(0.9, 1.1, steps.size)
for iss, ss in enumerate(steps):
#nsteps = int(steps.max()/ss)+1
q1, p1 = self.leapfrog(q0, p0, nsteps, ss)
Hs[iss] = self.H(q1, p1)
pp = np.exp(H0 - Hs) * pwts
pp[np.isnan(pp)] = 0
pp[np.isinf(pp)] = 0
pp /= pp.sum()
cdf = np.cumsum(pp)
if eps is None:
sx = np.random.uniform(low=cdf.min())
isx = np.where(sx > cdf)[0][-1]
sx2 = np.random.uniform(steps[isx], steps[isx+1])
prob = pp[isx+1] # * 1/(steps[isx+1]-steps[isx+1])
return sx2, pp[isx+1]
else:
prob = pp[np.where(steps > eps)[0][0]]
return prob
def hmc_step(self, q0, Nleap, smin=0.01, smax=1.0, ratios= [0.75, 1.0, 1/0.75], Tint=0, ntry=20, nsteps=1, normprob=True):
self.leapcount, self.Vgcount, self.Hcount = 0, 0, 0
p0 = np.random.normal(size=q0.size).reshape(q0.shape) * self.metricstd
if (Tint == 0) and (Nleap == 0):
print("Tint and Nleap cannot be both zeros")
import sys
sys.exit()
elif (Tint != 0) and (Nleap != 0):
print("Tint and Nleap both given and are inconsistent")
import sys
sys.exit()
if Tint == 0: N = Nleap
else: N = int(Tint/smin)
#First step is drawn from a distribution
eps, pf_den, pb_num = np.zeros(N), np.zeros(N), np.zeros(N)
pwts0 = 1. #np.array([0.9, 1.0, 1.1])
nr = len(ratios)
H0 = self.H(q0, p0)
def halfleap(q, p, step_size, Vgq=None): #This needs to be optimized to not estimate V_g again and again
self.leapcount += 1
q0, p0 = q, p
if Vgq is None: Vgq = self.V_g(q)
p = p - 0.5*step_size * Vgq
q = q + step_size * self.KE_g(p)
return q, p, Vgq
#First step is drawn from a distribution
ss, pf_den[0] = self.get_stepsize(q0, p0, smin, smax, ntry=ntry, nsteps=nsteps)
eps[0] = ss
q1, p1, _ = self.leapfrog1(q0, p0, ss)
H1 = self.H(q1, p1)
Hprev = H0
sigma = np.log(0.5)/2.
#print('Doing half step to estimate goodness')
for i in range(N-1):
#q1, p1, H1 is the current position.
#ss is the current step size i.e. the last taken
#Forward
pf, pb = np.zeros(nr), np.zeros(nr)
qs, ps, Hs = [], [], []
Vgq = None
for j in range(nr):
ss2 = ss*ratios[j]
q, p, Vgq = self.leapfrog1(q1, p1, ss2, Vgq)
#q, p, Vgq = halfleap(q1, p1, ss2, Vgq)
qs.append(q)
ps.append(p)
Hs.append(self.H(q, p))
#if normprob: pH = np.exp(-0.5 * (Hs[-1] - Hprev)**2 / sigma**2)
#if normprob: pH = np.exp(- abs(Hs[-1] - Hprev))
if normprob: pH = np.exp(- abs(Hs[-1] - H1))
else: pH = np.exp(H1 - Hs[-1])
if np.isnan(pH) or np.isinf(pH): pH = 0
pf[j] = pH
pwts = np.ones(nr) * pwts0
if smin > ss*ratios[0]: pwts[0] = 0
if smax < ss*ratios[-1]: pwts[-1] = 0
pf *= pwts
pf /= pf.sum()
if np.isnan(pf.sum()) or np.isinf(pf.sum()):
return q0, p0, 100+i, 0, np.stack([pf_den, pb_num, eps]), [self.Hcount, self.Vgcount, self.leapcount]
pind = np.random.choice(nr, p=pf)
ssnew = ss*ratios[pind]
q2, p2, _ = self.leapfrog1(q1, p1, ssnew, Vgq)
H2 = self.H(q2, p2)
#q2, p2, H2 = qs[pind], ps[pind], Hs[pind]
pf_den[i+1] = pf[pind]
#step from q1, p1 if we arrive here with ssnew step size in reverse direction
Hsb = []
for j in range(nr):
if np.allclose(ssnew*ratios[j] , ss):
Hsb.append(Hprev)
pbind = j
else:
ss2 = ssnew*ratios[j]
q, p, Vgq = self.leapfrog1(q1, -p1, ss2, Vgq)
Hsb.append(self.H(q, p))
#if normprob: pH = np.exp(-0.5 * (Hsb[-1] - H2)**2 / sigma**2)
#if normprob: pH = np.exp(- abs(Hsb[-1] - H2))
if normprob: pH = np.exp(- abs(Hsb[-1] - H1))
else: pH = np.exp(H1 - Hsb[-1])
if np.isnan(pH) or np.isinf(pH): pH = 0
pb[j] = pH
pwts = np.ones(nr) *pwts0
if smin > ssnew*ratios[0]: pwts[0] = 0
if smax < ssnew*ratios[-1]: pwts[-1] = 0
pb *= pwts
pb /= pb.sum()
pb_num[i] = pb[pbind]
#setup for next step
eps[i+1] =ssnew # ratios[pind]
ss = ssnew
Hprev = H1
q1, p1, H1 = q2, p2, H2
#print(pf, pb, pf[pind], pb[pbind])
#print(ss)
##THIS VIOLATES DB BUT LETS SEE HOW BAD THIS IS
##THIS MIGHT BE RELATED TO NUTS GOING IN BOTH DIRECTIONS
##CONSIDER SEQ OF STEPSIZES 0.5,1,2,3,6 with TINT=10, AND CASE WHEN WE START FROM 1 OR 3
#if Tint > 0 :
# if eps[:i+1].sum() > Tint : break
#print('started and ended with step sizes ', eps[0], eps[i+1])
#print('Number of steps taken is %d out of %d'%(i, N))
if (ssnew > smin) and (ssnew < smax):
#pb_num[-1] = self.get_stepsize(q2, -p2, smin=smin, smax=smax, eps=ssnew, ntry=ntry, nsteps=nsteps)
pb_num[i+1] = self.get_stepsize(q2, -p2, smin=smin, smax=smax, eps=ssnew, ntry=ntry, nsteps=nsteps)
adfac = np.prod(pb_num[:i+2])/np.prod(pf_den[:i+2])
prob = np.exp(H0 - H2) * adfac
#print("prb, fac, metrop : ", prob, adfac, prob/adfac, pb_num[-1], pf_den[0])
if np.isnan(prob) or np.isinf(prob) or (q0-q1).sum()==0:
return q0, p0, 2., prob, np.stack([pf_den, pb_num, eps]), [self.Hcount, self.Vgcount, self.leapcount]
elif np.random.uniform(0., 1., size=1) > min(1., prob):
return q0, p0, 0., prob, np.stack([pf_den, pb_num, eps]), [self.Hcount, self.Vgcount, self.leapcount]
else: return q2, p2, 1., prob, np.stack([pf_den, pb_num, eps]), [self.Hcount, self.Vgcount, self.leapcount]
##
| [
"numpy.random.normal",
"numpy.prod",
"numpy.log10",
"sys.exit",
"numpy.sqrt",
"numpy.ones",
"numpy.allclose",
"numpy.random.choice",
"numpy.where",
"numpy.log",
"numpy.exp",
"numpy.stack",
"numpy.zeros",
"numpy.linspace",
"numpy.isnan",
"numpy.random.uniform",
"numpy.cumsum",
"nump... | [((2043, 2058), 'numpy.exp', 'np.exp', (['(H0 - H1)'], {}), '(H0 - H1)\n', (2049, 2058), True, 'import numpy as np\n'), ((3450, 3464), 'numpy.zeros', 'np.zeros', (['ntry'], {}), '(ntry)\n', (3458, 3464), True, 'import numpy as np\n'), ((3975, 3988), 'numpy.cumsum', 'np.cumsum', (['pp'], {}), '(pp)\n', (3984, 3988), True, 'import numpy as np\n'), ((6994, 7008), 'numpy.zeros', 'np.zeros', (['ntry'], {}), '(ntry)\n', (7002, 7008), True, 'import numpy as np\n'), ((7519, 7532), 'numpy.cumsum', 'np.cumsum', (['pp'], {}), '(pp)\n', (7528, 7532), True, 'import numpy as np\n'), ((13606, 13620), 'numpy.zeros', 'np.zeros', (['ntry'], {}), '(ntry)\n', (13614, 13620), True, 'import numpy as np\n'), ((14131, 14144), 'numpy.cumsum', 'np.cumsum', (['pp'], {}), '(pp)\n', (14140, 14144), True, 'import numpy as np\n'), ((2111, 2125), 'numpy.isnan', 'np.isnan', (['prob'], {}), '(prob)\n', (2119, 2125), True, 'import numpy as np\n'), ((2129, 2143), 'numpy.isinf', 'np.isinf', (['prob'], {}), '(prob)\n', (2137, 2143), True, 'import numpy as np\n'), ((3567, 3596), 'numpy.linspace', 'np.linspace', (['smin', 'smax', 'ntry'], {}), '(smin, smax, ntry)\n', (3578, 3596), True, 'import numpy as np\n'), ((3855, 3870), 'numpy.exp', 'np.exp', (['(H0 - Hs)'], {}), '(H0 - Hs)\n', (3861, 3870), True, 'import numpy as np\n'), ((3889, 3901), 'numpy.isnan', 'np.isnan', (['pp'], {}), '(pp)\n', (3897, 3901), True, 'import numpy as np\n'), ((3919, 3931), 'numpy.isinf', 'np.isinf', (['pp'], {}), '(pp)\n', (3927, 3931), True, 'import numpy as np\n'), ((4126, 4171), 'numpy.random.uniform', 'np.random.uniform', (['steps[isx]', 'steps[isx + 1]'], {}), '(steps[isx], steps[isx + 1])\n', (4143, 4171), True, 'import numpy as np\n'), ((5462, 5472), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5470, 5472), False, 'import sys\n'), ((6161, 6176), 'numpy.exp', 'np.exp', (['(H0 - H1)'], {}), '(H0 - H1)\n', (6167, 6176), True, 'import numpy as np\n'), ((6338, 6369), 'numpy.stack', 'np.stack', (['[pf_den, pb_num, eps]'], {}), '([pf_den, pb_num, eps])\n', (6346, 6369), True, 'import numpy as np\n'), ((6427, 6441), 'numpy.isnan', 'np.isnan', (['prob'], {}), '(prob)\n', (6435, 6441), True, 'import numpy as np\n'), ((6445, 6459), 'numpy.isinf', 'np.isinf', (['prob'], {}), '(prob)\n', (6453, 6459), True, 'import numpy as np\n'), ((7111, 7140), 'numpy.linspace', 'np.linspace', (['smin', 'smax', 'ntry'], {}), '(smin, smax, ntry)\n', (7122, 7140), True, 'import numpy as np\n'), ((7399, 7414), 'numpy.exp', 'np.exp', (['(H0 - Hs)'], {}), '(H0 - Hs)\n', (7405, 7414), True, 'import numpy as np\n'), ((7433, 7445), 'numpy.isnan', 'np.isnan', (['pp'], {}), '(pp)\n', (7441, 7445), True, 'import numpy as np\n'), ((7463, 7475), 'numpy.isinf', 'np.isinf', (['pp'], {}), '(pp)\n', (7471, 7475), True, 'import numpy as np\n'), ((7670, 7715), 'numpy.random.uniform', 'np.random.uniform', (['steps[isx]', 'steps[isx + 1]'], {}), '(steps[isx], steps[isx + 1])\n', (7687, 7715), True, 'import numpy as np\n'), ((7974, 7984), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7981, 7984), True, 'import numpy as np\n'), ((9199, 9210), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (9207, 9210), True, 'import numpy as np\n'), ((9212, 9223), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (9220, 9223), True, 'import numpy as np\n'), ((9225, 9236), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (9233, 9236), True, 'import numpy as np\n'), ((9644, 9655), 'numpy.log', 'np.log', (['(0.5)'], {}), '(0.5)\n', (9650, 9655), True, 'import numpy as np\n'), ((10893, 10919), 'numpy.random.choice', 'np.random.choice', (['nr'], {'p': 'pf'}), '(nr, p=pf)\n', (10909, 10919), True, 'import numpy as np\n'), ((12533, 12556), 'numpy.prod', 'np.prod', (['pb_num[:i + 2]'], {}), '(pb_num[:i + 2])\n', (12540, 12556), True, 'import numpy as np\n'), ((12555, 12578), 'numpy.prod', 'np.prod', (['pf_den[:i + 2]'], {}), '(pf_den[:i + 2])\n', (12562, 12578), True, 'import numpy as np\n'), ((12592, 12607), 'numpy.exp', 'np.exp', (['(H0 - H2)'], {}), '(H0 - H2)\n', (12598, 12607), True, 'import numpy as np\n'), ((12823, 12854), 'numpy.stack', 'np.stack', (['[pf_den, pb_num, eps]'], {}), '([pf_den, pb_num, eps])\n', (12831, 12854), True, 'import numpy as np\n'), ((12912, 12926), 'numpy.isnan', 'np.isnan', (['prob'], {}), '(prob)\n', (12920, 12926), True, 'import numpy as np\n'), ((12930, 12944), 'numpy.isinf', 'np.isinf', (['prob'], {}), '(prob)\n', (12938, 12944), True, 'import numpy as np\n'), ((13723, 13752), 'numpy.linspace', 'np.linspace', (['smin', 'smax', 'ntry'], {}), '(smin, smax, ntry)\n', (13734, 13752), True, 'import numpy as np\n'), ((14011, 14026), 'numpy.exp', 'np.exp', (['(H0 - Hs)'], {}), '(H0 - Hs)\n', (14017, 14026), True, 'import numpy as np\n'), ((14045, 14057), 'numpy.isnan', 'np.isnan', (['pp'], {}), '(pp)\n', (14053, 14057), True, 'import numpy as np\n'), ((14075, 14087), 'numpy.isinf', 'np.isinf', (['pp'], {}), '(pp)\n', (14083, 14087), True, 'import numpy as np\n'), ((14282, 14327), 'numpy.random.uniform', 'np.random.uniform', (['steps[isx]', 'steps[isx + 1]'], {}), '(steps[isx], steps[isx + 1])\n', (14299, 14327), True, 'import numpy as np\n'), ((14932, 14942), 'sys.exit', 'sys.exit', ([], {}), '()\n', (14940, 14942), False, 'import sys\n'), ((15245, 15256), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (15253, 15256), True, 'import numpy as np\n'), ((15258, 15269), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (15266, 15269), True, 'import numpy as np\n'), ((15271, 15282), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (15279, 15282), True, 'import numpy as np\n'), ((15994, 16005), 'numpy.log', 'np.log', (['(0.5)'], {}), '(0.5)\n', (16000, 16005), True, 'import numpy as np\n'), ((17336, 17362), 'numpy.random.choice', 'np.random.choice', (['nr'], {'p': 'pf'}), '(nr, p=pf)\n', (17352, 17362), True, 'import numpy as np\n'), ((19549, 19572), 'numpy.prod', 'np.prod', (['pb_num[:i + 2]'], {}), '(pb_num[:i + 2])\n', (19556, 19572), True, 'import numpy as np\n'), ((19571, 19594), 'numpy.prod', 'np.prod', (['pf_den[:i + 2]'], {}), '(pf_den[:i + 2])\n', (19578, 19594), True, 'import numpy as np\n'), ((19608, 19623), 'numpy.exp', 'np.exp', (['(H0 - H2)'], {}), '(H0 - H2)\n', (19614, 19623), True, 'import numpy as np\n'), ((19729, 19743), 'numpy.isnan', 'np.isnan', (['prob'], {}), '(prob)\n', (19737, 19743), True, 'import numpy as np\n'), ((19747, 19761), 'numpy.isinf', 'np.isinf', (['prob'], {}), '(prob)\n', (19755, 19761), True, 'import numpy as np\n'), ((2219, 2254), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {'size': '(1)'}), '(0.0, 1.0, size=1)\n', (2236, 2254), True, 'import numpy as np\n'), ((3506, 3520), 'numpy.log10', 'np.log10', (['smin'], {}), '(smin)\n', (3514, 3520), True, 'import numpy as np\n'), ((3522, 3536), 'numpy.log10', 'np.log10', (['smax'], {}), '(smax)\n', (3530, 3536), True, 'import numpy as np\n'), ((5619, 5629), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5627, 5629), False, 'import sys\n'), ((6533, 6568), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {'size': '(1)'}), '(0.0, 1.0, size=1)\n', (6550, 6568), True, 'import numpy as np\n'), ((7050, 7064), 'numpy.log10', 'np.log10', (['smin'], {}), '(smin)\n', (7058, 7064), True, 'import numpy as np\n'), ((7066, 7080), 'numpy.log10', 'np.log10', (['smax'], {}), '(smax)\n', (7074, 7080), True, 'import numpy as np\n'), ((7962, 7972), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7969, 7972), True, 'import numpy as np\n'), ((9845, 9857), 'numpy.zeros', 'np.zeros', (['nr'], {}), '(nr)\n', (9853, 9857), True, 'import numpy as np\n'), ((9859, 9871), 'numpy.zeros', 'np.zeros', (['nr'], {}), '(nr)\n', (9867, 9871), True, 'import numpy as np\n'), ((10150, 10169), 'numpy.exp', 'np.exp', (['(H1 - Hs[-1])'], {}), '(H1 - Hs[-1])\n', (10156, 10169), True, 'import numpy as np\n'), ((10273, 10284), 'numpy.ones', 'np.ones', (['nr'], {}), '(nr)\n', (10280, 10284), True, 'import numpy as np\n'), ((11232, 11266), 'numpy.allclose', 'np.allclose', (['(ssnew * ratios[j])', 'ss'], {}), '(ssnew * ratios[j], ss)\n', (11243, 11266), True, 'import numpy as np\n'), ((11528, 11548), 'numpy.exp', 'np.exp', (['(H1 - Hsb[-1])'], {}), '(H1 - Hsb[-1])\n', (11534, 11548), True, 'import numpy as np\n'), ((11651, 11662), 'numpy.ones', 'np.ones', (['nr'], {}), '(nr)\n', (11658, 11662), True, 'import numpy as np\n'), ((13018, 13053), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {'size': '(1)'}), '(0.0, 1.0, size=1)\n', (13035, 13053), True, 'import numpy as np\n'), ((13662, 13676), 'numpy.log10', 'np.log10', (['smin'], {}), '(smin)\n', (13670, 13676), True, 'import numpy as np\n'), ((13678, 13692), 'numpy.log10', 'np.log10', (['smax'], {}), '(smax)\n', (13686, 13692), True, 'import numpy as np\n'), ((15089, 15099), 'sys.exit', 'sys.exit', ([], {}), '()\n', (15097, 15099), False, 'import sys\n'), ((16246, 16258), 'numpy.zeros', 'np.zeros', (['nr'], {}), '(nr)\n', (16254, 16258), True, 'import numpy as np\n'), ((16260, 16272), 'numpy.zeros', 'np.zeros', (['nr'], {}), '(nr)\n', (16268, 16272), True, 'import numpy as np\n'), ((16973, 16984), 'numpy.ones', 'np.ones', (['nr'], {}), '(nr)\n', (16980, 16984), True, 'import numpy as np\n'), ((17744, 17778), 'numpy.allclose', 'np.allclose', (['(ssnew * ratios[j])', 'ss'], {}), '(ssnew * ratios[j], ss)\n', (17755, 17778), True, 'import numpy as np\n'), ((18380, 18391), 'numpy.ones', 'np.ones', (['nr'], {}), '(nr)\n', (18387, 18391), True, 'import numpy as np\n'), ((19821, 19852), 'numpy.stack', 'np.stack', (['[pf_den, pb_num, eps]'], {}), '([pf_den, pb_num, eps])\n', (19829, 19852), True, 'import numpy as np\n'), ((19911, 19946), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {'size': '(1)'}), '(0.0, 1.0, size=1)\n', (19928, 19946), True, 'import numpy as np\n'), ((2854, 2883), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'q.size'}), '(size=q.size)\n', (2870, 2883), True, 'import numpy as np\n'), ((4082, 4100), 'numpy.where', 'np.where', (['(sx > cdf)'], {}), '(sx > cdf)\n', (4090, 4100), True, 'import numpy as np\n'), ((5235, 5265), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'q0.size'}), '(size=q0.size)\n', (5251, 5265), True, 'import numpy as np\n'), ((7626, 7644), 'numpy.where', 'np.where', (['(sx > cdf)'], {}), '(sx > cdf)\n', (7634, 7644), True, 'import numpy as np\n'), ((9024, 9054), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'q0.size'}), '(size=q0.size)\n', (9040, 9054), True, 'import numpy as np\n'), ((10189, 10201), 'numpy.isnan', 'np.isnan', (['pH'], {}), '(pH)\n', (10197, 10201), True, 'import numpy as np\n'), ((10205, 10217), 'numpy.isinf', 'np.isinf', (['pH'], {}), '(pH)\n', (10213, 10217), True, 'import numpy as np\n'), ((10750, 10781), 'numpy.stack', 'np.stack', (['[pf_den, pb_num, eps]'], {}), '([pf_den, pb_num, eps])\n', (10758, 10781), True, 'import numpy as np\n'), ((11568, 11580), 'numpy.isnan', 'np.isnan', (['pH'], {}), '(pH)\n', (11576, 11580), True, 'import numpy as np\n'), ((11584, 11596), 'numpy.isinf', 'np.isinf', (['pH'], {}), '(pH)\n', (11592, 11596), True, 'import numpy as np\n'), ((14238, 14256), 'numpy.where', 'np.where', (['(sx > cdf)'], {}), '(sx > cdf)\n', (14246, 14256), True, 'import numpy as np\n'), ((14724, 14754), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'q0.size'}), '(size=q0.size)\n', (14740, 14754), True, 'import numpy as np\n'), ((16851, 16870), 'numpy.exp', 'np.exp', (['(H1 - Hs[-1])'], {}), '(H1 - Hs[-1])\n', (16857, 16870), True, 'import numpy as np\n'), ((16890, 16902), 'numpy.isnan', 'np.isnan', (['pH'], {}), '(pH)\n', (16898, 16902), True, 'import numpy as np\n'), ((16906, 16918), 'numpy.isinf', 'np.isinf', (['pH'], {}), '(pH)\n', (16914, 16918), True, 'import numpy as np\n'), ((17240, 17271), 'numpy.stack', 'np.stack', (['[pf_den, pb_num, eps]'], {}), '([pf_den, pb_num, eps])\n', (17248, 17271), True, 'import numpy as np\n'), ((18257, 18277), 'numpy.exp', 'np.exp', (['(H1 - Hsb[-1])'], {}), '(H1 - Hsb[-1])\n', (18263, 18277), True, 'import numpy as np\n'), ((18297, 18309), 'numpy.isnan', 'np.isnan', (['pH'], {}), '(pH)\n', (18305, 18309), True, 'import numpy as np\n'), ((18313, 18325), 'numpy.isinf', 'np.isinf', (['pH'], {}), '(pH)\n', (18321, 18325), True, 'import numpy as np\n'), ((19999, 20030), 'numpy.stack', 'np.stack', (['[pf_den, pb_num, eps]'], {}), '([pf_den, pb_num, eps])\n', (20007, 20030), True, 'import numpy as np\n'), ((20115, 20146), 'numpy.stack', 'np.stack', (['[pf_den, pb_num, eps]'], {}), '([pf_den, pb_num, eps])\n', (20123, 20146), True, 'import numpy as np\n'), ((4304, 4325), 'numpy.where', 'np.where', (['(steps > eps)'], {}), '(steps > eps)\n', (4312, 4325), True, 'import numpy as np\n'), ((7848, 7869), 'numpy.where', 'np.where', (['(steps > eps)'], {}), '(steps > eps)\n', (7856, 7869), True, 'import numpy as np\n'), ((14460, 14481), 'numpy.where', 'np.where', (['(steps > eps)'], {}), '(steps > eps)\n', (14468, 14481), True, 'import numpy as np\n')] |
import logging
import multiprocessing
import os
import sys
import time
import cv2
import h5py
import numpy as np
import caiman
from caiman.motion_correction import high_pass_filter_space, motion_correct_iteration_fast, sliding_window, tile_and_correct
from caiman.source_extraction.cnmf import online_cnmf, pre_processing, initialization
from scipy.sparse import csc_matrix, coo_matrix
from modules.fp_detector.model import FpDetector
from modules.laser_handler import LaserHandler
from modules.video_handler import CV2VideoHandler, H5VideoHandler, TIFFVideoHandler
from modules.utils import zscore, HeatMap
class MiniscopeOnACID(online_cnmf.OnACID):
def __init__(self, caiman_params,
seed_file=None, sync_pattern_file=None, fp_detect_method=None,
estimates=None, path=None, dview=None):
"""
caiman_params (CMNFParamas object): Please see https://caiman.readthedocs.io/en/master/core_functions.html?highlight=params#caiman.source_extraction.cnmf.params.CNMFParams for the details.
seed_file (str, optional): Seed file path for seeded initialization.
'init_method' param in 'caiman_params' will automaticaly set to 'seeded'.
sync_pattern_file (str, optional): Sync pattern file path for real-time syncronized detection.
'min_num_trial' param in 'caiman_params' will automaticaly set to 0.
fp_detect_method (str, optional): Select from 'askl', 'tpot', or 'deep'.
Please see https://github.com/jf-lab/cnmfe-reviewer/ for the details.
estimates, path, dview (objects, optional): Params for online_cnmf.OnACID object.
Please see https://caiman.readthedocs.io/en/master/core_functions.html?highlight=OnACID#online-cnmf-onacid for the details.
"""
if seed_file is not None:
caiman_params.change_params({'init_method': 'seeded'})
if sync_pattern_file is not None:
caiman_params.change_params({'min_num_trial': 0})
super().__init__(params=caiman_params, estimates=estimates, path=path, dview=dview)
self.seed_file = seed_file
if sync_pattern_file is None:
self.sync_patterns = None
else:
with h5py.File(sync_pattern_file, 'r') as f:
self.sync_patterns = zscore(f['W'][()], axis=0)
self.laser = LaserHandler()
self.time_frame = 0
self.checked_comps = 0
self.accept_comp_num = 0
self.reject_comp_num = 0
self.fp_detector = FpDetector(fp_detect_method)
def __init_window_status(self, frame_shape, video_bit='uint8'):
self.window_name = 'microscope CNMF-E'
# seekbar texts
self.gain_text = 'gain'
self.fps_text = '0: 05fps\n1: 10fps\n2: 15fps\n3: 30fps\n4: 60fps'
self.x0_text, self.x1_text = 'set_x0', 'set_x1'
self.y0_text, self.y1_text = 'set_y0', 'set_y1'
self.dr_max_text = 'dynamic range: max'
self.dr_min_text = 'dynamic range: min'
self.start_text = 'start analysis!'
self.demixed_bias_text = 'demixed color bias'
# params
h, w = frame_shape
self.fps = 30
self.demixed_bias = 1
self.x0 = 0
self.x1 = w
self.y0 = 0
self.y1 = h
self.dr_min = 0
if video_bit == 'uint8':
self.dr_max = 255
else:
self.dr_max = 65535
# self.dr_max = 65535
def __set_gain(self, x):
gain = [16, 32, 64]
cap.set(14, gain[x])
time.sleep(0.01)
logging.info(f'camera gain was set to {self.cap.get(14)}')
def __set_fps(self, x):
self.fps = [5, 10, 15, 30, 60][x]
logging.info(f'fps was set to {self.fps}')
def __set_plot(self, t, frame_shape, with_demixed=True):
h, w = frame_shape
half_w, half_h = w//2, h//2
even_w, even_h = half_w*2, half_h*2
bg = self.estimates.b0.reshape(self.estimates.dims, order='f')
bg = self.__compress_to_uint8(bg)
if bg.shape[:2] != (half_h, half_w):
bg = cv2.resize(bg, (half_w, half_h), interpolation=cv2.INTER_AREA)
frame_cor = self.__compress_to_uint8(self.current_frame_cor)
if frame_cor.shape[:2] != (half_h, half_w):
frame_cor = cv2.resize(self.current_frame_cor, (half_w, half_h), interpolation=cv2.INTER_AREA)
diff = frame_cor - bg
diff[frame_cor < bg] = 0
plots = np.zeros((h, w), dtype='uint8')
plots[:half_h, :half_w] = frame_cor
plots[:half_h, half_w:even_w] = bg
plots[half_h:even_h, :half_w] = diff
plots = cv2.applyColorMap(plots, cv2.COLORMAP_VIRIDIS)
plots[half_h:, half_w:] = 0
if with_demixed:
A_f = self.estimates.Ab[:, self.params.get('init', 'nb'):] # (size, N)
C_f = self.estimates.C_on[self.params.get('init', 'nb'):self.M, t-1] # (N)
color_map = [[1,0,0],[0,1,0],[0,0,1],[1,1,0],[1,0,1],[0,1,1]]
colored = np.zeros((self.estimates.dims[0], self.estimates.dims[1], 3), dtype='uint8')
for i, c in enumerate(color_map):
tmp = A_f[:, i::len(color_map)] * C_f[i::len(color_map)] * self.demixed_bias
tmp = tmp.astype('uint8')
tmp = tmp.reshape((self.estimates.dims[0], self.estimates.dims[1]), order='f')
for j in range(3):
if c[j] == 1:
colored[:, :, j] += tmp
if colored.shape[:2] != (half_h, half_w):
colored = cv2.resize(colored, (half_w, half_h), interpolation=cv2.INTER_AREA)
plots[half_h:even_h, half_w:even_w] = colored
if self.sync_patterns is not None:
latest = zscore(self.estimates.C_on[self.params.get('init', 'nb'):self.M, self.time_frame-1:self.time_frame])
heatmap_tensor = self.heatmap.get_heatmap(np.concatenate([latest, self.sync_patterns], axis=1))
plots = np.hstack([plots, heatmap_tensor])
self.plot = plots
def __set_results(self, t):
epochs = self.params.get('online', 'epochs')
if self.params.get('online', 'normalize'):
self.estimates.Ab = csc_matrix(self.estimates.Ab.multiply(
self.img_norm.reshape(-1, order='F')[:, np.newaxis]))
self.estimates.A, self.estimates.b = self.estimates.Ab[:, self.params.get('init', 'nb'):], self.estimates.Ab[:, :self.params.get('init', 'nb')].toarray()
self.estimates.C, self.estimates.f = self.estimates.C_on[self.params.get('init', 'nb'):self.M, t - t //
epochs:t], self.estimates.C_on[:self.params.get('init', 'nb'), t - t // epochs:t]
noisyC = self.estimates.noisyC[self.params.get('init', 'nb'):self.M, t - t // epochs:t]
self.estimates.YrA = noisyC - self.estimates.C
if self.estimates.OASISinstances is not None:
self.estimates.bl = [osi.b for osi in self.estimates.OASISinstances]
self.estimates.S = np.stack([osi.s for osi in self.estimates.OASISinstances])
self.estimates.S = self.estimates.S[:, t - t // epochs:t]
else:
self.estimates.bl = [0] * self.estimates.C.shape[0]
self.estimates.S = np.zeros_like(self.estimates.C)
def __save_results(self, frame):
if self.params.get('online', 'ds_factor') > 1:
neuron_num = self.estimates.A.shape[-1]
A = np.hstack([cv2.resize(self.estimates.A[:, i].reshape(self.estimates.dims, order='F').toarray(),
frame.shape[::-1]).reshape(-1, order='F')[:,None] for i in range(neuron_num)])
with h5py.File(self.out_mat_file, 'a') as f:
f['A'].resize(A.shape)
f['A'][()] = A
f['C'].resize(self.estimates.C.shape)
f['C'][()] = self.estimates.C
f['S'].resize(self.estimates.S.shape)
f['S'][()] = self.estimates.S
def __compress_to_uint8(self, frame):
frame = frame.astype('float')
frame[frame < self.dr_min] = self.dr_min
frame[frame > self.dr_max] = self.dr_max
frame -= self.dr_min
frame *= 255 / (self.dr_max - self.dr_min)
frame = frame.astype('uint8')
return frame
def __prepare_window(self, mode, frame_shape, video_bit='uint8'):
h, w = frame_shape
cv2.destroyAllWindows()
cv2.namedWindow(self.window_name)
if mode == 'prepare':
cv2.createTrackbar(self.gain_text, self.window_name, 0, 2, self.__set_gain)
cv2.createTrackbar(self.fps_text, self.window_name, 1, 4, self.__set_fps)
cv2.createTrackbar(self.x0_text, self.window_name, 0, w, lambda x:x)
cv2.createTrackbar(self.x1_text, self.window_name, w, w, lambda x:x)
cv2.createTrackbar(self.y0_text, self.window_name, 0, h, lambda x:x)
cv2.createTrackbar(self.y1_text, self.window_name, h, h, lambda x:x)
cv2.createTrackbar(self.start_text, self.window_name, 0, 1, lambda x: True if x == 0 else False)
elif mode == 'analyze':
cv2.createTrackbar(self.demixed_bias_text, self.window_name, 1, 10, lambda x:x)
if mode != 'initialize':
if video_bit == 'uint8':
cv2.createTrackbar(self.dr_min_text, self.window_name, 0, 255, lambda x:x)
cv2.createTrackbar(self.dr_max_text, self.window_name, 255, 255, lambda x:x)
else:
cv2.createTrackbar(self.dr_min_text, self.window_name, 0, 65535, lambda x:x)
cv2.createTrackbar(self.dr_max_text, self.window_name, 65535, 65535, lambda x:x)
# cv2.createTrackbar(self.dr_min_text, self.window_name, 0, 65535, lambda x:x)
# cv2.createTrackbar(self.dr_max_text, self.window_name, 65535, 65535, lambda x:x)
def __show_next_frame(self, lines, mode, text_color=(255, 255, 255), avi_out=None):
_, frame = self.cap.read()
if mode == 'prepare':
cv2.getTrackbarPos(self.gain_text, self.window_name)
self.x0 = cv2.getTrackbarPos(self.x0_text, self.window_name)
self.x1 = cv2.getTrackbarPos(self.x1_text, self.window_name)
self.y0 = cv2.getTrackbarPos(self.y0_text, self.window_name)
self.y1 = cv2.getTrackbarPos(self.y1_text, self.window_name)
cv2.getTrackbarPos(self.fps_text, self.window_name)
elif mode == 'analyze':
self.demixed_bias = cv2.getTrackbarPos(self.demixed_bias_text, self.window_name)
out_frame = frame[self.y0:self.y1, self.x0:self.x1]
if avi_out != None and self.cap.video_bit == 'uint8':
avi_out.write(out_frame)
v_frame = out_frame
if mode != 'initialize':
self.dr_min = cv2.getTrackbarPos(self.dr_min_text, self.window_name)
self.dr_max = cv2.getTrackbarPos(self.dr_max_text, self.window_name)
v_frame = self.__compress_to_uint8(v_frame)
if mode == 'analyze':
v_frame = np.dstack([v_frame, v_frame, v_frame])
if self.with_plot:
v_frame = np.hstack([v_frame, self.plot])
for i, line in enumerate(lines):
cv2.putText(v_frame, line, (5, (i+1)*20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, text_color)
cv2.imshow(self.window_name, v_frame)
return out_frame
def __get_model_LN(self):
if self.params.get('online', 'ring_CNN'):
logging.info('Using Ring CNN model')
from caiman.utils.nn_models import (fit_NL_model, create_LN_model, quantile_loss, rate_scheduler)
gSig = self.params.get('init', 'gSig')[0]
width = self.params.get('ring_CNN', 'width')
nch = self.params.get('ring_CNN', 'n_channels')
if self.params.get('ring_CNN', 'loss_fn') == 'pct':
loss_fn = quantile_loss(self.params.get('ring_CNN', 'pct'))
else:
loss_fn = self.params.get('ring_CNN', 'loss_fn')
if self.params.get('ring_CNN', 'lr_scheduler') is None:
sch = None
else:
sch = rate_scheduler(*self.params.get('ring_CNN', 'lr_scheduler'))
Y = caiman.base.movies.load(fls[0],
subindices=slice(self.params.get('online', 'init_batch')),
var_name_hdf5=self.params.get('data', 'var_name_hdf5'))
shape = Y.shape[1:] + (1,)
logging.info('Starting background model training.')
model_LN = create_LN_model(Y, shape=shape, n_channels=nch,
lr=self.params.get('ring_CNN', 'lr'), gSig=gSig,
loss=loss_fn, width=width,
use_add=self.params.get('ring_CNN', 'use_add'),
use_bias=self.params.get('ring_CNN', 'use_bias'))
if self.params.get('ring_CNN', 'reuse_model'):
logging.info('Using existing model from {}'.format(self.params.get('ring_CNN', 'path_to_model')))
model_LN.load_weights(self.params.get('ring_CNN', 'path_to_model'))
else:
logging.info('Estimating model from scratch, starting training.')
model_LN, history, path_to_model = fit_NL_model(model_LN, Y,
epochs=self.params.get('ring_CNN', 'max_epochs'),
patience=self.params.get('ring_CNN', 'patience'),
schedule=sch)
logging.info('Training complete. Model saved in {}.'.format(path_to_model))
self.params.set('ring_CNN', {'path_to_model': path_to_model})
else:
model_LN = None
return model_LN
def __initialize_online(self, model_LN, Y):
_, original_d1, original_d2 = Y.shape
opts = self.params.get_group('online')
init_batch = opts['init_batch']
if model_LN is not None:
Y = Y - caiman.movie(np.squeeze(model_LN.predict(np.expand_dims(Y, -1))))
Y = np.maximum(Y, 0)
# Downsample if needed
ds_factor = np.maximum(opts['ds_factor'], 1)
if ds_factor > 1:
Y = Y.resize(1./ds_factor, 1./ds_factor)
self.estimates.shifts = [] # store motion shifts here
self.estimates.time_new_comp = []
if self.params.get('online', 'motion_correct'):
max_shifts_online = self.params.get('online', 'max_shifts_online')
if self.params.get('motion', 'gSig_filt') is None:
mc = Y.motion_correct(max_shifts_online, max_shifts_online)
Y = mc[0].astype(np.float32)
else:
Y_filt = np.stack([high_pass_filter_space(yf, self.params.motion['gSig_filt']) for yf in Y], axis=0)
Y_filt = caiman.movie(Y_filt)
mc = Y_filt.motion_correct(max_shifts_online, max_shifts_online)
Y = Y.apply_shifts(mc[1])
if self.params.get('motion', 'pw_rigid'):
n_p = len([(it[0], it[1])
for it in sliding_window(Y[0], self.params.get('motion', 'overlaps'), self.params.get('motion', 'strides'))])
for sh in mc[1]:
self.estimates.shifts.append([tuple(sh) for i in range(n_p)])
else:
self.estimates.shifts.extend(mc[1])
self.img_min = Y.min()
self.current_frame_cor = Y[-1]
if self.params.get('online', 'normalize'):
Y -= self.img_min
img_norm = np.std(Y, axis=0)
img_norm += np.median(img_norm) # normalize data to equalize the FOV
logging.info('Frame size:' + str(img_norm.shape))
if self.params.get('online', 'normalize'):
Y = Y/img_norm[None, :, :]
total_frame, d1, d2 = Y.shape
Yr = Y.to_2D().T # convert data into 2D array
self.img_norm = img_norm
if self.params.get('online', 'init_method') == 'bare':
logging.info('Using bare init')
init = self.params.get_group('init').copy()
is1p = (init['method_init'] == 'corr_pnr' and init['ring_size_factor'] is not None)
if is1p:
self.estimates.sn, psx = pre_processing.get_noise_fft(
Yr, noise_range=self.params.get('preprocess', 'noise_range'),
noise_method=self.params.get('preprocess', 'noise_method'),
max_num_samples_fft=self.params.get('preprocess', 'max_num_samples_fft'))
for key in ('K', 'nb', 'gSig', 'method_init'):
init.pop(key, None)
tmp = online_cnmf.bare_initialization(
Y.transpose(1, 2, 0), init_batch=self.params.get('online', 'init_batch'),
k=self.params.get('init', 'K'), gnb=self.params.get('init', 'nb'),
method_init=self.params.get('init', 'method_init'), sn=self.estimates.sn,
gSig=self.params.get('init', 'gSig'), return_object=False,
options_total=self.params.to_dict(), **init)
if is1p:
(self.estimates.A, self.estimates.b, self.estimates.C, self.estimates.f,
self.estimates.YrA, self.estimates.W, self.estimates.b0) = tmp
else:
(self.estimates.A, self.estimates.b, self.estimates.C, self.estimates.f,
self.estimates.YrA) = tmp
if self.fp_detector.method is not None:
self.__reject_fp_comps(Y.shape[1:], max_bright=Y.max())
self.estimates.S = np.zeros_like(self.estimates.C)
nr = self.estimates.C.shape[0]
self.estimates.g = np.array([-np.poly([0.9] * max(self.params.get('preprocess', 'p'), 1))[1:]
for gg in np.ones(nr)])
self.estimates.bl = np.zeros(nr)
self.estimates.c1 = np.zeros(nr)
self.estimates.neurons_sn = np.std(self.estimates.YrA, axis=-1)
self.estimates.lam = np.zeros(nr)
elif self.params.get('online', 'init_method') == 'seeded':
init = self.params.get_group('init').copy()
is1p = (init['method_init'] == 'corr_pnr' and init['ring_size_factor'] is not None)
if self.seed_file is None:
raise ValueError('Please input analyzed mat file path as seed_file.')
with h5py.File(self.seed_file, 'r') as f:
Ain = f['A'][()]
try:
Ain = Ain.reshape((original_d1, original_d2, -1))
except:
raise ValueError('The shape of A does not match the video source!')
window_name = 'please check A_seed'
Ain_gray = Ain.sum(axis=2)
cv2.imshow(window_name, np.dstack([Ain_gray, Ain_gray, Ain_gray]))
cv2.waitKey(0)
cv2.destroyWindow(window_name)
Ain = cv2.resize(Ain, (d2, d1))
Ain = Ain.reshape((-1, Ain.shape[-1]), order='F')
Ain_norm = (Ain - Ain.min(0)[None, :]) / (Ain.max(0) - Ain.min(0))
A_seed = Ain_norm > 0.5
tmp = online_cnmf.seeded_initialization(
Y.transpose(1, 2, 0), A_seed, k=self.params.get('init', 'K'),
gSig=self.params.get('init', 'gSig'), return_object=False)
self.estimates.A, self.estimates.b, self.estimates.C, self.estimates.f, self.estimates.YrA = tmp
if is1p:
ssub_B = self.params.get('init', 'ssub_B') * self.params.get('init', 'ssub')
ring_size_factor = self.params.get('init', 'ring_size_factor')
gSiz = 2 * np.array(self.params.get('init', 'gSiz')) // 2 + 1
W, b0 = initialization.compute_W(
Y.transpose(1, 2, 0).reshape((-1, total_frame), order='F'),
self.estimates.A, self.estimates.C, (d1, d2), ring_size_factor * gSiz[0], ssub=ssub_B)
self.estimates.W, self.estimates.b0 = W, b0
self.estimates.S = np.zeros_like(self.estimates.C)
nr = self.estimates.C.shape[0]
self.estimates.g = np.array([-np.poly([0.9] * max(self.params.get('preprocess', 'p'), 1))[1:]
for gg in np.ones(nr)])
self.estimates.bl = np.zeros(nr)
self.estimates.c1 = np.zeros(nr)
self.estimates.neurons_sn = np.std(self.estimates.YrA, axis=-1)
self.estimates.lam = np.zeros(nr)
else:
raise Exception('Unknown initialization method!')
T1 = init_batch * self.params.get('online', 'epochs')
self.params.set('data', {'dims': Y.shape[1:]})
self._prepare_object(Yr, T1)
return self
def __fit_next_frame(self, frame, t, model_LN=None, out=None):
ssub_B = self.params.get('init', 'ssub_B') * self.params.get('init', 'ssub')
d1, d2 = self.params.get('data', 'dims')
max_shifts_online = self.params.get('online', 'max_shifts_online')
if model_LN is not None:
if self.params.get('ring_CNN', 'remove_activity'):
activity = self.estimates.Ab[:,:self.N].dot(self.estimates.C_on[:self.N, t-1]).reshape(self.params.get('data', 'dims'), order='F')
if self.params.get('online', 'normalize'):
activity *= self.img_norm
else:
activity = 0.
# frame = frame.astype(np.float32) - activity
frame = frame - np.squeeze(model_LN.predict(np.expand_dims(np.expand_dims(frame.astype(np.float32) - activity, 0), -1)))
frame = np.maximum(frame, 0)
t_frame_start = time.time()
if np.isnan(np.sum(frame)):
raise Exception('Current frame contains NaN')
frame_ = frame.copy().astype(np.float32)
if self.params.get('online', 'ds_factor') > 1:
frame_ = cv2.resize(frame_, self.img_norm.shape[::-1])
if self.params.get('online', 'normalize'):
frame_ -= self.img_min # make data non-negative
if self.params.get('online', 'motion_correct'):
templ = self.estimates.Ab.dot(
np.median(self.estimates.C_on[:self.M, t-51:t-1], 1)).reshape(self.params.get('data', 'dims'), order='F')#*self.img_norm
if self.is1p and self.estimates.W is not None:
if ssub_B == 1:
B = self.estimates.W.dot((frame_ - templ).flatten(order='F') - self.estimates.b0) + self.estimates.b0
B = B.reshape(self.params.get('data', 'dims'), order='F')
else:
b0 = self.estimates.b0.reshape((d1, d2), order='F')#*self.img_norm
bc2 = initialization.downscale(frame_ - templ - b0, (ssub_B, ssub_B)).flatten(order='F')
Wb = self.estimates.W.dot(bc2).reshape(((d1 - 1) // ssub_B + 1, (d2 - 1) // ssub_B + 1), order='F')
B = b0 + np.repeat(np.repeat(Wb, ssub_B, 0), ssub_B, 1)[:d1, :d2]
templ += B
if self.params.get('online', 'normalize'):
templ *= self.img_norm
if self.is1p:
templ = high_pass_filter_space(templ, self.params.motion['gSig_filt'])
if self.params.get('motion', 'pw_rigid'):
frame_cor, shift, _, xy_grid = tile_and_correct(frame_, templ, self.params.motion['strides'], self.params.motion['overlaps'],
self.params.motion['max_shifts'], newoverlaps=None, newstrides=None, upsample_factor_grid=4,
upsample_factor_fft=10, show_movie=False, max_deviation_rigid=self.params.motion['max_deviation_rigid'],
add_to_movie=0, shifts_opencv=True, gSig_filt=None,
use_cuda=False, border_nan='copy')
else:
if self.is1p:
frame_orig = frame_.copy()
frame_ = high_pass_filter_space(frame_, self.params.motion['gSig_filt'])
frame_cor, shift = motion_correct_iteration_fast(
frame_, templ, max_shifts_online, max_shifts_online)
if self.is1p:
M = np.float32([[1, 0, shift[1]], [0, 1, shift[0]]])
frame_cor = cv2.warpAffine(
frame_orig, M, frame_.shape[::-1], flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REFLECT)
self.estimates.shifts.append(shift)
else:
templ = None
frame_cor = frame_
self.current_frame_cor = frame_cor
if self.params.get('online', 'normalize'):
frame_cor = frame_cor/self.img_norm
self.fit_next(t, frame_cor.reshape(-1, order='F'))
if self.fp_detector.method is not None:
self.__reject_fp_comps((d1, d2), max_bright=frame_.max())
def __reject_fp_comps(self, dims, max_bright):
if self.estimates.C.shape[1] < 500:
return False
if self.estimates.A.shape[1] == self.checked_comps:
return False
logging.info('fp detector effected')
checked_A = self.estimates.A.toarray()[:, :self.checked_comps]
checked_C = self.estimates.C[:self.checked_comps]
checked_YrA = self.estimates.YrA[:self.checked_comps]
unchecked_A = self.estimates.A.toarray()[:, self.checked_comps:]
unchecked_C = self.estimates.C[self.checked_comps:]
unchecked_YrA = self.estimates.YrA[self.checked_comps:]
reshaped_unchecked_A = unchecked_A.reshape(dims + (-1,), order='F').transpose(2, 0, 1)
pred, thred = self.fp_detector.predict(
reshaped_unchecked_A / reshaped_unchecked_A.max(),
unchecked_C / unchecked_C.max())
unchecked_A = unchecked_A[:, pred >= thred]
unchecked_C = unchecked_C[pred >= thred]
unchecked_YrA = unchecked_YrA[pred >= thred]
self.accept_comp_num += (pred >= thred).sum()
self.reject_comp_num += (pred < thred).sum()
if len(unchecked_A.shape) == 2:
self.estimates.A = coo_matrix(np.concatenate([checked_A, unchecked_A], axis=1))
self.estimates.C = np.concatenate([checked_C, unchecked_C], axis=0)
self.estimates.YrA = np.concatenate([checked_YrA, unchecked_YrA], axis=0)
self.checked_comps = self.estimates.C.shape[0] # update checked_comps
def __fit(self, cap, output_dir='data/out/sample/', with_plot=True):
self.cap = cap
self.with_plot = with_plot
self.out_mat_file = os.path.join(output_dir, 'neurons.mat')
self.out_avi_file = os.path.join(output_dir, 'rec.avi')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_LN = self.__get_model_LN()
ret, frame = self.cap.read()
if not ret:
raise Exception('frame cannot read.')
max_h, max_w = frame.shape
self.__init_window_status((max_h, max_w), video_bit=self.cap.video_bit)
self.__prepare_window(mode='prepare', frame_shape=(max_h, max_w), video_bit=self.cap.video_bit)
prev_time = time.time()
while True:
time_d = 1 / self.fps
while time.time() - prev_time < time_d:
pass
prev_time = time.time()
frame = self.__show_next_frame(['prepareing...'], mode='prepare')
if cv2.waitKey(1) & 0xFF == ord('q') or cv2.getTrackbarPos(self.start_text, self.window_name):
break
h, w = frame.shape
self.heatmap = HeatMap(figsize=(500, h))
avi_out = cv2.VideoWriter(self.out_avi_file, cv2.VideoWriter_fourcc(*'XVID'), 10.0, (w, h))
self.__prepare_window(mode='initialize', frame_shape=(h, w), video_bit=self.cap.video_bit)
init_Y = np.empty((self.params.get('online', 'init_batch'),) + frame.shape)
with h5py.File(self.out_mat_file, 'w') as f:
f['initialize_first_frame_t'] = time.time()
prev_time = time.time()
time_d = 1 / self.fps
for i in range(self.params.get('online', 'init_batch')):
while time.time() - prev_time < time_d:
pass
prev_time = time.time()
frame = self.__show_next_frame(['initialize'], mode='initialize', avi_out=avi_out)
init_Y[i] = frame.copy()
cv2.waitKey(1)
self.time_frame += self.params.get('online', 'init_batch')
with h5py.File(self.out_mat_file, 'a') as f:
f['initialize_last_frame_t'] = time.time()
self.__initialize_online(
model_LN=model_LN,
Y=caiman.base.movies.movie(init_Y.astype(np.float32)))
self.__prepare_window(mode='analyze', frame_shape=(h, w), video_bit=self.cap.video_bit)
with h5py.File(self.out_mat_file, 'a') as f:
f['cnmfe_first_frame_t'] = time.time()
A_size = frame.shape[0] * frame.shape[1]
f.create_dataset('A', (A_size, self.N), maxshape=(A_size, None))
f.create_dataset('C', (self.N, 100), maxshape=(None, None))
f.create_dataset('S', (self.N, 100), maxshape=(None, None))
online_start_frame = self.time_frame
online_start_time = prev_time = time.time()
while True:
try:
while time.time() - prev_time < time_d:
pass
fps = 1 / (time.time() - prev_time)
prev_time = time.time()
if self.with_plot:
self.__set_plot(self.time_frame, frame_shape=(h, w), with_demixed=True)
if self.time_frame % 100 == 0:
self.__set_results(self.time_frame)
self.estimates.noisyC = np.hstack(
(self.estimates.noisyC, np.zeros((self.estimates.noisyC.shape[0], 100))))
self.estimates.C_on = np.hstack(
(self.estimates.C_on, np.zeros((self.estimates.C_on.shape[0], 100))))
p = multiprocessing.Process(target=self.__save_results, args=[frame])
p.start()
comp_num = self.M - self.params.get('init', 'nb')
lines = [f'FPS: {fps:.4f}', f'neurons: {comp_num}', f'{self.time_frame} frame']
if self.sync_patterns is not None and self.laser.is_shooting.value == 1:
lines.append('now shooting')
frame = self.__show_next_frame(lines, mode='analyze', avi_out=avi_out, text_color=(0, 0, 255))
else:
frame = self.__show_next_frame(lines, mode='analyze', avi_out=avi_out)
self.__fit_next_frame(frame, self.time_frame, model_LN=model_LN)
if not self.sync_patterns is None:
latest = self.estimates.C_on[self.params.get('init', 'nb'):self.M, self.time_frame-1:self.time_frame]
if np.any(np.all(self.sync_patterns < zscore(latest), axis=0)):
self.laser.shoot_laser()
self.time_frame += 1
except:
pass
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print('Overall FPS:', (self.time_frame - online_start_frame) / (time.time() - online_start_time))
print('accept num, reject num:', self.accept_comp_num, self.reject_comp_num)
print('accept rate:', self.accept_comp_num / (self.accept_comp_num + self.reject_comp_num))
print('reject rate:', self.reject_comp_num / (self.accept_comp_num + self.reject_comp_num))
with h5py.File(self.out_mat_file, 'a') as f:
f['cnmfe_last_frame_t'] = time.time()
f.create_dataset('b0', data=self.estimates.b0)
f.create_dataset('W', data=self.estimates.W.toarray())
avi_out.release()
try:
self.laser.ser.close()
except:
pass
def fit_from_scope(self, input_camera_id, output_dir=None):
self.__fit(
CV2VideoHandler(input_camera_id),
output_dir=output_dir)
def fit_from_file(self, input_video_path, mov_key=None, output_dir=None, with_plot=True):
_, ext = os.path.splitext(input_video_path)
if ext == '.avi':
cap = CV2VideoHandler(input_video_path)
elif ext in ['.h5', '.hdf5', '.mat']:
if mov_key is not None:
cap = H5VideoHandler(input_video_path, mov_key=mov_key)
else:
raise ValueError('`mov_key` is needed if use .h5 or .mat video file.')
elif input_video_path[-1] == '/':
cap = TIFFVideoHandler(input_video_path)
else:
raise ValueError('We only supports .avi, .h5, .hdf5, or .mat video file.')
self.__fit(cap, output_dir=output_dir, with_plot=with_plot)
| [
"modules.laser_handler.LaserHandler",
"numpy.hstack",
"multiprocessing.Process",
"modules.video_handler.CV2VideoHandler",
"time.sleep",
"cv2.imshow",
"cv2.destroyAllWindows",
"logging.info",
"os.path.exists",
"caiman.source_extraction.cnmf.initialization.downscale",
"numpy.repeat",
"modules.fp... | [((2647, 2675), 'modules.fp_detector.model.FpDetector', 'FpDetector', (['fp_detect_method'], {}), '(fp_detect_method)\n', (2657, 2675), False, 'from modules.fp_detector.model import FpDetector\n'), ((3669, 3685), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (3679, 3685), False, 'import time\n'), ((3832, 3874), 'logging.info', 'logging.info', (['f"""fps was set to {self.fps}"""'], {}), "(f'fps was set to {self.fps}')\n", (3844, 3874), False, 'import logging\n'), ((4593, 4624), 'numpy.zeros', 'np.zeros', (['(h, w)'], {'dtype': '"""uint8"""'}), "((h, w), dtype='uint8')\n", (4601, 4624), True, 'import numpy as np\n'), ((4773, 4819), 'cv2.applyColorMap', 'cv2.applyColorMap', (['plots', 'cv2.COLORMAP_VIRIDIS'], {}), '(plots, cv2.COLORMAP_VIRIDIS)\n', (4790, 4819), False, 'import cv2\n'), ((8521, 8544), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (8542, 8544), False, 'import cv2\n'), ((8553, 8586), 'cv2.namedWindow', 'cv2.namedWindow', (['self.window_name'], {}), '(self.window_name)\n', (8568, 8586), False, 'import cv2\n'), ((11470, 11507), 'cv2.imshow', 'cv2.imshow', (['self.window_name', 'v_frame'], {}), '(self.window_name, v_frame)\n', (11480, 11507), False, 'import cv2\n'), ((14472, 14504), 'numpy.maximum', 'np.maximum', (["opts['ds_factor']", '(1)'], {}), "(opts['ds_factor'], 1)\n", (14482, 14504), True, 'import numpy as np\n'), ((15911, 15928), 'numpy.std', 'np.std', (['Y'], {'axis': '(0)'}), '(Y, axis=0)\n', (15917, 15928), True, 'import numpy as np\n'), ((15949, 15968), 'numpy.median', 'np.median', (['img_norm'], {}), '(img_norm)\n', (15958, 15968), True, 'import numpy as np\n'), ((22010, 22021), 'time.time', 'time.time', ([], {}), '()\n', (22019, 22021), False, 'import time\n'), ((25581, 25617), 'logging.info', 'logging.info', (['"""fp detector effected"""'], {}), "('fp detector effected')\n", (25593, 25617), False, 'import logging\n'), ((27058, 27097), 'os.path.join', 'os.path.join', (['output_dir', '"""neurons.mat"""'], {}), "(output_dir, 'neurons.mat')\n", (27070, 27097), False, 'import os\n'), ((27126, 27161), 'os.path.join', 'os.path.join', (['output_dir', '"""rec.avi"""'], {}), "(output_dir, 'rec.avi')\n", (27138, 27161), False, 'import os\n'), ((27630, 27641), 'time.time', 'time.time', ([], {}), '()\n', (27639, 27641), False, 'import time\n'), ((28063, 28088), 'modules.utils.HeatMap', 'HeatMap', ([], {'figsize': '(500, h)'}), '(figsize=(500, h))\n', (28070, 28088), False, 'from modules.utils import zscore, HeatMap\n'), ((28504, 28515), 'time.time', 'time.time', ([], {}), '()\n', (28513, 28515), False, 'import time\n'), ((29749, 29760), 'time.time', 'time.time', ([], {}), '()\n', (29758, 29760), False, 'import time\n'), ((32698, 32732), 'os.path.splitext', 'os.path.splitext', (['input_video_path'], {}), '(input_video_path)\n', (32714, 32732), False, 'import os\n'), ((2480, 2494), 'modules.laser_handler.LaserHandler', 'LaserHandler', ([], {}), '()\n', (2492, 2494), False, 'from modules.laser_handler import LaserHandler\n'), ((4220, 4282), 'cv2.resize', 'cv2.resize', (['bg', '(half_w, half_h)'], {'interpolation': 'cv2.INTER_AREA'}), '(bg, (half_w, half_h), interpolation=cv2.INTER_AREA)\n', (4230, 4282), False, 'import cv2\n'), ((4429, 4516), 'cv2.resize', 'cv2.resize', (['self.current_frame_cor', '(half_w, half_h)'], {'interpolation': 'cv2.INTER_AREA'}), '(self.current_frame_cor, (half_w, half_h), interpolation=cv2.\n INTER_AREA)\n', (4439, 4516), False, 'import cv2\n'), ((5148, 5224), 'numpy.zeros', 'np.zeros', (['(self.estimates.dims[0], self.estimates.dims[1], 3)'], {'dtype': '"""uint8"""'}), "((self.estimates.dims[0], self.estimates.dims[1], 3), dtype='uint8')\n", (5156, 5224), True, 'import numpy as np\n'), ((6118, 6152), 'numpy.hstack', 'np.hstack', (['[plots, heatmap_tensor]'], {}), '([plots, heatmap_tensor])\n', (6127, 6152), True, 'import numpy as np\n'), ((7156, 7214), 'numpy.stack', 'np.stack', (['[osi.s for osi in self.estimates.OASISinstances]'], {}), '([osi.s for osi in self.estimates.OASISinstances])\n', (7164, 7214), True, 'import numpy as np\n'), ((7394, 7425), 'numpy.zeros_like', 'np.zeros_like', (['self.estimates.C'], {}), '(self.estimates.C)\n', (7407, 7425), True, 'import numpy as np\n'), ((7811, 7844), 'h5py.File', 'h5py.File', (['self.out_mat_file', '"""a"""'], {}), "(self.out_mat_file, 'a')\n", (7820, 7844), False, 'import h5py\n'), ((8629, 8704), 'cv2.createTrackbar', 'cv2.createTrackbar', (['self.gain_text', 'self.window_name', '(0)', '(2)', 'self.__set_gain'], {}), '(self.gain_text, self.window_name, 0, 2, self.__set_gain)\n', (8647, 8704), False, 'import cv2\n'), ((8717, 8790), 'cv2.createTrackbar', 'cv2.createTrackbar', (['self.fps_text', 'self.window_name', '(1)', '(4)', 'self.__set_fps'], {}), '(self.fps_text, self.window_name, 1, 4, self.__set_fps)\n', (8735, 8790), False, 'import cv2\n'), ((8803, 8872), 'cv2.createTrackbar', 'cv2.createTrackbar', (['self.x0_text', 'self.window_name', '(0)', 'w', '(lambda x: x)'], {}), '(self.x0_text, self.window_name, 0, w, lambda x: x)\n', (8821, 8872), False, 'import cv2\n'), ((8884, 8953), 'cv2.createTrackbar', 'cv2.createTrackbar', (['self.x1_text', 'self.window_name', 'w', 'w', '(lambda x: x)'], {}), '(self.x1_text, self.window_name, w, w, lambda x: x)\n', (8902, 8953), False, 'import cv2\n'), ((8965, 9034), 'cv2.createTrackbar', 'cv2.createTrackbar', (['self.y0_text', 'self.window_name', '(0)', 'h', '(lambda x: x)'], {}), '(self.y0_text, self.window_name, 0, h, lambda x: x)\n', (8983, 9034), False, 'import cv2\n'), ((9046, 9115), 'cv2.createTrackbar', 'cv2.createTrackbar', (['self.y1_text', 'self.window_name', 'h', 'h', '(lambda x: x)'], {}), '(self.y1_text, self.window_name, h, h, lambda x: x)\n', (9064, 9115), False, 'import cv2\n'), ((9127, 9227), 'cv2.createTrackbar', 'cv2.createTrackbar', (['self.start_text', 'self.window_name', '(0)', '(1)', '(lambda x: True if x == 0 else False)'], {}), '(self.start_text, self.window_name, 0, 1, lambda x: True if\n x == 0 else False)\n', (9145, 9227), False, 'import cv2\n'), ((10172, 10224), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['self.gain_text', 'self.window_name'], {}), '(self.gain_text, self.window_name)\n', (10190, 10224), False, 'import cv2\n'), ((10247, 10297), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['self.x0_text', 'self.window_name'], {}), '(self.x0_text, self.window_name)\n', (10265, 10297), False, 'import cv2\n'), ((10320, 10370), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['self.x1_text', 'self.window_name'], {}), '(self.x1_text, self.window_name)\n', (10338, 10370), False, 'import cv2\n'), ((10393, 10443), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['self.y0_text', 'self.window_name'], {}), '(self.y0_text, self.window_name)\n', (10411, 10443), False, 'import cv2\n'), ((10466, 10516), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['self.y1_text', 'self.window_name'], {}), '(self.y1_text, self.window_name)\n', (10484, 10516), False, 'import cv2\n'), ((10529, 10580), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['self.fps_text', 'self.window_name'], {}), '(self.fps_text, self.window_name)\n', (10547, 10580), False, 'import cv2\n'), ((10954, 11008), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['self.dr_min_text', 'self.window_name'], {}), '(self.dr_min_text, self.window_name)\n', (10972, 11008), False, 'import cv2\n'), ((11035, 11089), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['self.dr_max_text', 'self.window_name'], {}), '(self.dr_max_text, self.window_name)\n', (11053, 11089), False, 'import cv2\n'), ((11195, 11233), 'numpy.dstack', 'np.dstack', (['[v_frame, v_frame, v_frame]'], {}), '([v_frame, v_frame, v_frame])\n', (11204, 11233), True, 'import numpy as np\n'), ((11377, 11469), 'cv2.putText', 'cv2.putText', (['v_frame', 'line', '(5, (i + 1) * 20)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.7)', 'text_color'], {}), '(v_frame, line, (5, (i + 1) * 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\n text_color)\n', (11388, 11469), False, 'import cv2\n'), ((11627, 11663), 'logging.info', 'logging.info', (['"""Using Ring CNN model"""'], {}), "('Using Ring CNN model')\n", (11639, 11663), False, 'import logging\n'), ((12658, 12709), 'logging.info', 'logging.info', (['"""Starting background model training."""'], {}), "('Starting background model training.')\n", (12670, 12709), False, 'import logging\n'), ((14404, 14420), 'numpy.maximum', 'np.maximum', (['Y', '(0)'], {}), '(Y, 0)\n', (14414, 14420), True, 'import numpy as np\n'), ((16362, 16393), 'logging.info', 'logging.info', (['"""Using bare init"""'], {}), "('Using bare init')\n", (16374, 16393), False, 'import logging\n'), ((17936, 17967), 'numpy.zeros_like', 'np.zeros_like', (['self.estimates.C'], {}), '(self.estimates.C)\n', (17949, 17967), True, 'import numpy as np\n'), ((18204, 18216), 'numpy.zeros', 'np.zeros', (['nr'], {}), '(nr)\n', (18212, 18216), True, 'import numpy as np\n'), ((18249, 18261), 'numpy.zeros', 'np.zeros', (['nr'], {}), '(nr)\n', (18257, 18261), True, 'import numpy as np\n'), ((18302, 18337), 'numpy.std', 'np.std', (['self.estimates.YrA'], {'axis': '(-1)'}), '(self.estimates.YrA, axis=-1)\n', (18308, 18337), True, 'import numpy as np\n'), ((18371, 18383), 'numpy.zeros', 'np.zeros', (['nr'], {}), '(nr)\n', (18379, 18383), True, 'import numpy as np\n'), ((21964, 21984), 'numpy.maximum', 'np.maximum', (['frame', '(0)'], {}), '(frame, 0)\n', (21974, 21984), True, 'import numpy as np\n'), ((22042, 22055), 'numpy.sum', 'np.sum', (['frame'], {}), '(frame)\n', (22048, 22055), True, 'import numpy as np\n'), ((22242, 22287), 'cv2.resize', 'cv2.resize', (['frame_', 'self.img_norm.shape[::-1]'], {}), '(frame_, self.img_norm.shape[::-1])\n', (22252, 22287), False, 'import cv2\n'), ((26685, 26733), 'numpy.concatenate', 'np.concatenate', (['[checked_C, unchecked_C]'], {'axis': '(0)'}), '([checked_C, unchecked_C], axis=0)\n', (26699, 26733), True, 'import numpy as np\n'), ((26767, 26819), 'numpy.concatenate', 'np.concatenate', (['[checked_YrA, unchecked_YrA]'], {'axis': '(0)'}), '([checked_YrA, unchecked_YrA], axis=0)\n', (26781, 26819), True, 'import numpy as np\n'), ((27177, 27203), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (27191, 27203), False, 'import os\n'), ((27217, 27240), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (27228, 27240), False, 'import os\n'), ((27793, 27804), 'time.time', 'time.time', ([], {}), '()\n', (27802, 27804), False, 'import time\n'), ((28142, 28173), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (28164, 28173), False, 'import cv2\n'), ((28387, 28420), 'h5py.File', 'h5py.File', (['self.out_mat_file', '"""w"""'], {}), "(self.out_mat_file, 'w')\n", (28396, 28420), False, 'import h5py\n'), ((28471, 28482), 'time.time', 'time.time', ([], {}), '()\n', (28480, 28482), False, 'import time\n'), ((28708, 28719), 'time.time', 'time.time', ([], {}), '()\n', (28717, 28719), False, 'import time\n'), ((28864, 28878), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (28875, 28878), False, 'import cv2\n'), ((28960, 28993), 'h5py.File', 'h5py.File', (['self.out_mat_file', '"""a"""'], {}), "(self.out_mat_file, 'a')\n", (28969, 28993), False, 'import h5py\n'), ((29043, 29054), 'time.time', 'time.time', ([], {}), '()\n', (29052, 29054), False, 'import time\n'), ((29298, 29331), 'h5py.File', 'h5py.File', (['self.out_mat_file', '"""a"""'], {}), "(self.out_mat_file, 'a')\n", (29307, 29331), False, 'import h5py\n'), ((29377, 29388), 'time.time', 'time.time', ([], {}), '()\n', (29386, 29388), False, 'import time\n'), ((32097, 32130), 'h5py.File', 'h5py.File', (['self.out_mat_file', '"""a"""'], {}), "(self.out_mat_file, 'a')\n", (32106, 32130), False, 'import h5py\n'), ((32175, 32186), 'time.time', 'time.time', ([], {}), '()\n', (32184, 32186), False, 'import time\n'), ((32517, 32549), 'modules.video_handler.CV2VideoHandler', 'CV2VideoHandler', (['input_camera_id'], {}), '(input_camera_id)\n', (32532, 32549), False, 'from modules.video_handler import CV2VideoHandler, H5VideoHandler, TIFFVideoHandler\n'), ((32777, 32810), 'modules.video_handler.CV2VideoHandler', 'CV2VideoHandler', (['input_video_path'], {}), '(input_video_path)\n', (32792, 32810), False, 'from modules.video_handler import CV2VideoHandler, H5VideoHandler, TIFFVideoHandler\n'), ((2351, 2384), 'h5py.File', 'h5py.File', (['sync_pattern_file', '"""r"""'], {}), "(sync_pattern_file, 'r')\n", (2360, 2384), False, 'import h5py\n'), ((2428, 2454), 'modules.utils.zscore', 'zscore', (["f['W'][()]"], {'axis': '(0)'}), "(f['W'][()], axis=0)\n", (2434, 2454), False, 'from modules.utils import zscore, HeatMap\n'), ((5698, 5765), 'cv2.resize', 'cv2.resize', (['colored', '(half_w, half_h)'], {'interpolation': 'cv2.INTER_AREA'}), '(colored, (half_w, half_h), interpolation=cv2.INTER_AREA)\n', (5708, 5765), False, 'import cv2\n'), ((6044, 6096), 'numpy.concatenate', 'np.concatenate', (['[latest, self.sync_patterns]'], {'axis': '(1)'}), '([latest, self.sync_patterns], axis=1)\n', (6058, 6096), True, 'import numpy as np\n'), ((9268, 9353), 'cv2.createTrackbar', 'cv2.createTrackbar', (['self.demixed_bias_text', 'self.window_name', '(1)', '(10)', '(lambda x: x)'], {}), '(self.demixed_bias_text, self.window_name, 1, 10, lambda x: x\n )\n', (9286, 9353), False, 'import cv2\n'), ((9435, 9510), 'cv2.createTrackbar', 'cv2.createTrackbar', (['self.dr_min_text', 'self.window_name', '(0)', '(255)', '(lambda x: x)'], {}), '(self.dr_min_text, self.window_name, 0, 255, lambda x: x)\n', (9453, 9510), False, 'import cv2\n'), ((9526, 9603), 'cv2.createTrackbar', 'cv2.createTrackbar', (['self.dr_max_text', 'self.window_name', '(255)', '(255)', '(lambda x: x)'], {}), '(self.dr_max_text, self.window_name, 255, 255, lambda x: x)\n', (9544, 9603), False, 'import cv2\n'), ((9637, 9714), 'cv2.createTrackbar', 'cv2.createTrackbar', (['self.dr_min_text', 'self.window_name', '(0)', '(65535)', '(lambda x: x)'], {}), '(self.dr_min_text, self.window_name, 0, 65535, lambda x: x)\n', (9655, 9714), False, 'import cv2\n'), ((9730, 9815), 'cv2.createTrackbar', 'cv2.createTrackbar', (['self.dr_max_text', 'self.window_name', '(65535)', '(65535)', '(lambda x: x)'], {}), '(self.dr_max_text, self.window_name, 65535, 65535, lambda\n x: x)\n', (9748, 9815), False, 'import cv2\n'), ((10645, 10705), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['self.demixed_bias_text', 'self.window_name'], {}), '(self.demixed_bias_text, self.window_name)\n', (10663, 10705), False, 'import cv2\n'), ((11291, 11322), 'numpy.hstack', 'np.hstack', (['[v_frame, self.plot]'], {}), '([v_frame, self.plot])\n', (11300, 11322), True, 'import numpy as np\n'), ((13402, 13467), 'logging.info', 'logging.info', (['"""Estimating model from scratch, starting training."""'], {}), "('Estimating model from scratch, starting training.')\n", (13414, 13467), False, 'import logging\n'), ((15168, 15188), 'caiman.movie', 'caiman.movie', (['Y_filt'], {}), '(Y_filt)\n', (15180, 15188), False, 'import caiman\n'), ((19182, 19196), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (19193, 19196), False, 'import cv2\n'), ((19209, 19239), 'cv2.destroyWindow', 'cv2.destroyWindow', (['window_name'], {}), '(window_name)\n', (19226, 19239), False, 'import cv2\n'), ((19258, 19283), 'cv2.resize', 'cv2.resize', (['Ain', '(d2, d1)'], {}), '(Ain, (d2, d1))\n', (19268, 19283), False, 'import cv2\n'), ((20377, 20408), 'numpy.zeros_like', 'np.zeros_like', (['self.estimates.C'], {}), '(self.estimates.C)\n', (20390, 20408), True, 'import numpy as np\n'), ((20645, 20657), 'numpy.zeros', 'np.zeros', (['nr'], {}), '(nr)\n', (20653, 20657), True, 'import numpy as np\n'), ((20690, 20702), 'numpy.zeros', 'np.zeros', (['nr'], {}), '(nr)\n', (20698, 20702), True, 'import numpy as np\n'), ((20743, 20778), 'numpy.std', 'np.std', (['self.estimates.YrA'], {'axis': '(-1)'}), '(self.estimates.YrA, axis=-1)\n', (20749, 20778), True, 'import numpy as np\n'), ((20812, 20824), 'numpy.zeros', 'np.zeros', (['nr'], {}), '(nr)\n', (20820, 20824), True, 'import numpy as np\n'), ((23531, 23593), 'caiman.motion_correction.high_pass_filter_space', 'high_pass_filter_space', (['templ', "self.params.motion['gSig_filt']"], {}), "(templ, self.params.motion['gSig_filt'])\n", (23553, 23593), False, 'from caiman.motion_correction import high_pass_filter_space, motion_correct_iteration_fast, sliding_window, tile_and_correct\n'), ((23695, 24097), 'caiman.motion_correction.tile_and_correct', 'tile_and_correct', (['frame_', 'templ', "self.params.motion['strides']", "self.params.motion['overlaps']", "self.params.motion['max_shifts']"], {'newoverlaps': 'None', 'newstrides': 'None', 'upsample_factor_grid': '(4)', 'upsample_factor_fft': '(10)', 'show_movie': '(False)', 'max_deviation_rigid': "self.params.motion['max_deviation_rigid']", 'add_to_movie': '(0)', 'shifts_opencv': '(True)', 'gSig_filt': 'None', 'use_cuda': '(False)', 'border_nan': '"""copy"""'}), "(frame_, templ, self.params.motion['strides'], self.params.\n motion['overlaps'], self.params.motion['max_shifts'], newoverlaps=None,\n newstrides=None, upsample_factor_grid=4, upsample_factor_fft=10,\n show_movie=False, max_deviation_rigid=self.params.motion[\n 'max_deviation_rigid'], add_to_movie=0, shifts_opencv=True, gSig_filt=\n None, use_cuda=False, border_nan='copy')\n", (23711, 24097), False, 'from caiman.motion_correction import high_pass_filter_space, motion_correct_iteration_fast, sliding_window, tile_and_correct\n'), ((24554, 24640), 'caiman.motion_correction.motion_correct_iteration_fast', 'motion_correct_iteration_fast', (['frame_', 'templ', 'max_shifts_online', 'max_shifts_online'], {}), '(frame_, templ, max_shifts_online,\n max_shifts_online)\n', (24583, 24640), False, 'from caiman.motion_correction import high_pass_filter_space, motion_correct_iteration_fast, sliding_window, tile_and_correct\n'), ((26604, 26652), 'numpy.concatenate', 'np.concatenate', (['[checked_A, unchecked_A]'], {'axis': '(1)'}), '([checked_A, unchecked_A], axis=1)\n', (26618, 26652), True, 'import numpy as np\n'), ((27935, 27988), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['self.start_text', 'self.window_name'], {}), '(self.start_text, self.window_name)\n', (27953, 27988), False, 'import cv2\n'), ((29959, 29970), 'time.time', 'time.time', ([], {}), '()\n', (29968, 29970), False, 'import time\n'), ((18747, 18777), 'h5py.File', 'h5py.File', (['self.seed_file', '"""r"""'], {}), "(self.seed_file, 'r')\n", (18756, 18777), False, 'import h5py\n'), ((19127, 19168), 'numpy.dstack', 'np.dstack', (['[Ain_gray, Ain_gray, Ain_gray]'], {}), '([Ain_gray, Ain_gray, Ain_gray])\n', (19136, 19168), True, 'import numpy as np\n'), ((24455, 24518), 'caiman.motion_correction.high_pass_filter_space', 'high_pass_filter_space', (['frame_', "self.params.motion['gSig_filt']"], {}), "(frame_, self.params.motion['gSig_filt'])\n", (24477, 24518), False, 'from caiman.motion_correction import high_pass_filter_space, motion_correct_iteration_fast, sliding_window, tile_and_correct\n'), ((24716, 24764), 'numpy.float32', 'np.float32', (['[[1, 0, shift[1]], [0, 1, shift[0]]]'], {}), '([[1, 0, shift[1]], [0, 1, shift[0]]])\n', (24726, 24764), True, 'import numpy as np\n'), ((24797, 24904), 'cv2.warpAffine', 'cv2.warpAffine', (['frame_orig', 'M', 'frame_.shape[::-1]'], {'flags': 'cv2.INTER_CUBIC', 'borderMode': 'cv2.BORDER_REFLECT'}), '(frame_orig, M, frame_.shape[::-1], flags=cv2.INTER_CUBIC,\n borderMode=cv2.BORDER_REFLECT)\n', (24811, 24904), False, 'import cv2\n'), ((27714, 27725), 'time.time', 'time.time', ([], {}), '()\n', (27723, 27725), False, 'import time\n'), ((28629, 28640), 'time.time', 'time.time', ([], {}), '()\n', (28638, 28640), False, 'import time\n'), ((30526, 30591), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'self.__save_results', 'args': '[frame]'}), '(target=self.__save_results, args=[frame])\n', (30549, 30591), False, 'import multiprocessing\n'), ((31634, 31648), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (31645, 31648), False, 'import cv2\n'), ((31764, 31775), 'time.time', 'time.time', ([], {}), '()\n', (31773, 31775), False, 'import time\n'), ((32915, 32964), 'modules.video_handler.H5VideoHandler', 'H5VideoHandler', (['input_video_path'], {'mov_key': 'mov_key'}), '(input_video_path, mov_key=mov_key)\n', (32929, 32964), False, 'from modules.video_handler import CV2VideoHandler, H5VideoHandler, TIFFVideoHandler\n'), ((33130, 33164), 'modules.video_handler.TIFFVideoHandler', 'TIFFVideoHandler', (['input_video_path'], {}), '(input_video_path)\n', (33146, 33164), False, 'from modules.video_handler import CV2VideoHandler, H5VideoHandler, TIFFVideoHandler\n'), ((15061, 15120), 'caiman.motion_correction.high_pass_filter_space', 'high_pass_filter_space', (['yf', "self.params.motion['gSig_filt']"], {}), "(yf, self.params.motion['gSig_filt'])\n", (15083, 15120), False, 'from caiman.motion_correction import high_pass_filter_space, motion_correct_iteration_fast, sliding_window, tile_and_correct\n'), ((18158, 18169), 'numpy.ones', 'np.ones', (['nr'], {}), '(nr)\n', (18165, 18169), True, 'import numpy as np\n'), ((22524, 22580), 'numpy.median', 'np.median', (['self.estimates.C_on[:self.M, t - 51:t - 1]', '(1)'], {}), '(self.estimates.C_on[:self.M, t - 51:t - 1], 1)\n', (22533, 22580), True, 'import numpy as np\n'), ((27898, 27912), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (27909, 27912), False, 'import cv2\n'), ((29820, 29831), 'time.time', 'time.time', ([], {}), '()\n', (29829, 29831), False, 'import time\n'), ((29906, 29917), 'time.time', 'time.time', ([], {}), '()\n', (29915, 29917), False, 'import time\n'), ((14363, 14384), 'numpy.expand_dims', 'np.expand_dims', (['Y', '(-1)'], {}), '(Y, -1)\n', (14377, 14384), True, 'import numpy as np\n'), ((20599, 20610), 'numpy.ones', 'np.ones', (['nr'], {}), '(nr)\n', (20606, 20610), True, 'import numpy as np\n'), ((23071, 23134), 'caiman.source_extraction.cnmf.initialization.downscale', 'initialization.downscale', (['(frame_ - templ - b0)', '(ssub_B, ssub_B)'], {}), '(frame_ - templ - b0, (ssub_B, ssub_B))\n', (23095, 23134), False, 'from caiman.source_extraction.cnmf import online_cnmf, pre_processing, initialization\n'), ((30305, 30352), 'numpy.zeros', 'np.zeros', (['(self.estimates.noisyC.shape[0], 100)'], {}), '((self.estimates.noisyC.shape[0], 100))\n', (30313, 30352), True, 'import numpy as np\n'), ((30454, 30499), 'numpy.zeros', 'np.zeros', (['(self.estimates.C_on.shape[0], 100)'], {}), '((self.estimates.C_on.shape[0], 100))\n', (30462, 30499), True, 'import numpy as np\n'), ((23313, 23337), 'numpy.repeat', 'np.repeat', (['Wb', 'ssub_B', '(0)'], {}), '(Wb, ssub_B, 0)\n', (23322, 23337), True, 'import numpy as np\n'), ((31464, 31478), 'modules.utils.zscore', 'zscore', (['latest'], {}), '(latest)\n', (31470, 31478), False, 'from modules.utils import zscore, HeatMap\n')] |
import numpy as np
import cv2
from shapely.geometry import Polygon
import pyclipper
class MakeShrinkMap():
r'''
Making binary mask from detection data with ICDAR format.
Typically following the process of class `MakeICDARData`.
'''
def __init__(self, min_text_size=8, shrink_ratio=0.4):
self.min_text_size = min_text_size
self.shrink_ratio = shrink_ratio
def __call__(self, data: dict) -> dict:
"""
从scales中随机选择一个尺度,对图片和文本框进行缩放
:param data: {'img':,'text_polys':,'texts':,'ignore_tags':}
:return:
"""
image = data['img']
text_polys = data['text_polys']
ignore_tags = data['ignore_tags']
h, w = image.shape[:2]
text_polys, ignore_tags = self.validate_polygons(text_polys, ignore_tags, h, w)
gt = np.zeros((h, w), dtype=np.float32)
mask = np.ones((h, w), dtype=np.float32)
for i in range(len(text_polys)):
polygon = text_polys[i]
height = max(polygon[:, 1]) - min(polygon[:, 1])
width = max(polygon[:, 0]) - min(polygon[:, 0])
if ignore_tags[i] or min(height, width) < self.min_text_size:
cv2.fillPoly(mask, polygon.astype(np.int32)[np.newaxis, :, :], 0)
ignore_tags[i] = True
else:
polygon_shape = Polygon(polygon)
distance = polygon_shape.area * (1 - np.power(self.shrink_ratio, 2)) / polygon_shape.length
subject = [tuple(l) for l in text_polys[i]]
padding = pyclipper.PyclipperOffset()
padding.AddPath(subject, pyclipper.JT_ROUND,
pyclipper.ET_CLOSEDPOLYGON)
shrinked = padding.Execute(-distance)
if shrinked == []:
cv2.fillPoly(mask, polygon.astype(np.int32)[np.newaxis, :, :], 0)
ignore_tags[i] = True
continue
shrinked = np.array(shrinked[0]).reshape(-1, 2)
cv2.fillPoly(gt, [shrinked.astype(np.int32)], 1)
data['shrink_map'] = gt
data['shrink_mask'] = mask
return data
def validate_polygons(self, polygons, ignore_tags, h, w):
'''
polygons (numpy.array, required): of shape (num_instances, num_points, 2)
'''
if len(polygons) == 0:
return polygons, ignore_tags
assert len(polygons) == len(ignore_tags)
for polygon in polygons:
polygon[:, 0] = np.clip(polygon[:, 0], 0, w - 1)
polygon[:, 1] = np.clip(polygon[:, 1], 0, h - 1)
for i in range(len(polygons)):
area = self.polygon_area(polygons[i])
if abs(area) < 1:
ignore_tags[i] = True
if area > 0:
polygons[i] = polygons[i][::-1, :]
return polygons, ignore_tags
def polygon_area(self, polygon):
edge = 0
for i in range(polygon.shape[0]):
next_index = (i + 1) % polygon.shape[0]
edge += (polygon[next_index, 0] - polygon[i, 0]) * (polygon[next_index, 1] - polygon[i, 1])
return edge / 2.
| [
"numpy.clip",
"numpy.ones",
"numpy.power",
"numpy.array",
"numpy.zeros",
"shapely.geometry.Polygon",
"pyclipper.PyclipperOffset"
] | [((828, 862), 'numpy.zeros', 'np.zeros', (['(h, w)'], {'dtype': 'np.float32'}), '((h, w), dtype=np.float32)\n', (836, 862), True, 'import numpy as np\n'), ((878, 911), 'numpy.ones', 'np.ones', (['(h, w)'], {'dtype': 'np.float32'}), '((h, w), dtype=np.float32)\n', (885, 911), True, 'import numpy as np\n'), ((2528, 2560), 'numpy.clip', 'np.clip', (['polygon[:, 0]', '(0)', '(w - 1)'], {}), '(polygon[:, 0], 0, w - 1)\n', (2535, 2560), True, 'import numpy as np\n'), ((2589, 2621), 'numpy.clip', 'np.clip', (['polygon[:, 1]', '(0)', '(h - 1)'], {}), '(polygon[:, 1], 0, h - 1)\n', (2596, 2621), True, 'import numpy as np\n'), ((1354, 1370), 'shapely.geometry.Polygon', 'Polygon', (['polygon'], {}), '(polygon)\n', (1361, 1370), False, 'from shapely.geometry import Polygon\n'), ((1565, 1592), 'pyclipper.PyclipperOffset', 'pyclipper.PyclipperOffset', ([], {}), '()\n', (1590, 1592), False, 'import pyclipper\n'), ((1987, 2008), 'numpy.array', 'np.array', (['shrinked[0]'], {}), '(shrinked[0])\n', (1995, 2008), True, 'import numpy as np\n'), ((1424, 1454), 'numpy.power', 'np.power', (['self.shrink_ratio', '(2)'], {}), '(self.shrink_ratio, 2)\n', (1432, 1454), True, 'import numpy as np\n')] |
# imports
from __future__ import print_function
from IPython.display import display, Image
from six.moves import cPickle as pickle
from six.moves.urllib.request import urlretrieve
from sklearn.linear_model import LogisticRegression
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
from constants import *
from commonconstants import NOT_MNIST_ZIPS_DIR, NOT_MNIST_IMAGES_DIR, NOT_MNIST_PICKLES_DIR
from file_helper import get_file_name, join_paths
np.random.seed(NUMPY_SEED)
last_percent_reported = None
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 5% change in download progress.
"""
global last_percent_reported
percent = int(count*blockSize*100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write('%s%%' % percent)
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
dest_filename = os.path.join(NOT_MNIST_ZIPS_DIR, filename)
if force or not os.path.exists(dest_filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(
DATASET_DOWNLOAD_URL + filename, dest_filename, reporthook=download_progress_hook)
print('\nDownload complete!')
statinfo = os.stat(dest_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', dest_filename)
else:
raise Exception(
'Failed to verify ' + dest_filename + '. Get using abrowser.'
)
return dest_filename
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(get_file_name(filename))[0])[0] # remove .tar.gz
root = join_paths(NOT_MNIST_IMAGES_DIR, root)
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' %
(root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall(NOT_MNIST_IMAGES_DIR)
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != NUM_OF_CLASSES:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
NUM_OF_CLASSES, len(data_folders)))
print(data_folders)
return data_folders
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), IMAGE_SIZE, IMAGE_SIZE),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (imageio.imread(image_file).astype(float) -
PIXEl_DEPTh / 2) / PIXEl_DEPTh
if image_data.shape != (IMAGE_SIZE, IMAGE_SIZE):
raise Exception('Unexpected image shape: %s' %
str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except (IOError, ValueError) as e:
print('Could not read:', image_file,
':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
NUM_OF_CLASSES = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, IMAGE_SIZE)
train_dataset, train_labels = make_arrays(train_size, IMAGE_SIZE)
vsize_per_class = valid_size // NUM_OF_CLASSES
tsize_per_class = train_size // NUM_OF_CLASSES
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class+tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
def get_dataset_filenames():
train_filename = maybe_download(NOT_MNIST_FILENAME_LARGE, 247336696)
test_filename = maybe_download(NOT_MNIST_FILENAME_SMALL, 8458043)
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
train_datasets = maybe_pickle(
train_folders, MINIMUM_TRAIN_SAMPLES_PER_CLASS)
test_datasets = maybe_pickle(test_folders, MINIMUM_TEST_SAMPLES_PER_CLASS)
return train_datasets, test_datasets
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation, :, :]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
def main(train_size, valid_size, test_size, filename):
train_datasets, test_datasets = get_dataset_filenames()
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
pickle_file = os.path.join(NOT_MNIST_PICKLES_DIR, filename)
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
if __name__ == '__main__':
# Large
main(TRAINING_SIZE, VALIDATION_SIZE, TEST_SIZE, FINAL_DATASET_FILENAME)
# Small
main(TRAINING_SIZE_SMALL, VALIDATION_SIZE_SMALL, TEST_SIZE_SMALL, FINAL_DATASET_FILENAME_SMALL)
| [
"tarfile.open",
"file_helper.join_paths",
"sys.stdout.write",
"file_helper.get_file_name",
"numpy.mean",
"os.path.exists",
"os.listdir",
"six.moves.cPickle.load",
"six.moves.cPickle.dump",
"os.path.isdir",
"numpy.random.seed",
"sys.stdout.flush",
"numpy.random.permutation",
"numpy.std",
... | [((502, 528), 'numpy.random.seed', 'np.random.seed', (['NUMPY_SEED'], {}), '(NUMPY_SEED)\n', (516, 528), True, 'import numpy as np\n'), ((1229, 1271), 'os.path.join', 'os.path.join', (['NOT_MNIST_ZIPS_DIR', 'filename'], {}), '(NOT_MNIST_ZIPS_DIR, filename)\n', (1241, 1271), False, 'import os\n'), ((1557, 1579), 'os.stat', 'os.stat', (['dest_filename'], {}), '(dest_filename)\n', (1564, 1579), False, 'import os\n'), ((1968, 2006), 'file_helper.join_paths', 'join_paths', (['NOT_MNIST_IMAGES_DIR', 'root'], {}), '(NOT_MNIST_IMAGES_DIR, root)\n', (1978, 2006), False, 'from file_helper import get_file_name, join_paths\n'), ((2907, 2925), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (2917, 2925), False, 'import os\n'), ((7220, 7258), 'numpy.random.permutation', 'np.random.permutation', (['labels.shape[0]'], {}), '(labels.shape[0])\n', (7241, 7258), True, 'import numpy as np\n'), ((8145, 8190), 'os.path.join', 'os.path.join', (['NOT_MNIST_PICKLES_DIR', 'filename'], {}), '(NOT_MNIST_PICKLES_DIR, filename)\n', (8157, 8190), False, 'import os\n'), ((1396, 1495), 'six.moves.urllib.request.urlretrieve', 'urlretrieve', (['(DATASET_DOWNLOAD_URL + filename)', 'dest_filename'], {'reporthook': 'download_progress_hook'}), '(DATASET_DOWNLOAD_URL + filename, dest_filename, reporthook=\n download_progress_hook)\n', (1407, 1495), False, 'from six.moves.urllib.request import urlretrieve\n'), ((2014, 2033), 'os.path.isdir', 'os.path.isdir', (['root'], {}), '(root)\n', (2027, 2033), False, 'import os\n'), ((2305, 2327), 'tarfile.open', 'tarfile.open', (['filename'], {}), '(filename)\n', (2317, 2327), False, 'import tarfile\n'), ((2336, 2354), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2352, 2354), False, 'import sys\n'), ((2449, 2470), 'os.path.join', 'os.path.join', (['root', 'd'], {}), '(root, d)\n', (2461, 2470), False, 'import os\n'), ((3132, 3159), 'os.path.join', 'os.path.join', (['folder', 'image'], {}), '(folder, image)\n', (3144, 3159), False, 'import os\n'), ((3983, 3999), 'numpy.mean', 'np.mean', (['dataset'], {}), '(dataset)\n', (3990, 3999), True, 'import numpy as np\n'), ((4034, 4049), 'numpy.std', 'np.std', (['dataset'], {}), '(dataset)\n', (4040, 4049), True, 'import numpy as np\n'), ((4940, 4999), 'numpy.ndarray', 'np.ndarray', (['(nb_rows, img_size, img_size)'], {'dtype': 'np.float32'}), '((nb_rows, img_size, img_size), dtype=np.float32)\n', (4950, 4999), True, 'import numpy as np\n'), ((5017, 5052), 'numpy.ndarray', 'np.ndarray', (['nb_rows'], {'dtype': 'np.int32'}), '(nb_rows, dtype=np.int32)\n', (5027, 5052), True, 'import numpy as np\n'), ((8526, 8571), 'six.moves.cPickle.dump', 'pickle.dump', (['save', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(save, f, pickle.HIGHEST_PROTOCOL)\n', (8537, 8571), True, 'from six.moves import cPickle as pickle\n'), ((963, 997), 'sys.stdout.write', 'sys.stdout.write', (["('%s%%' % percent)"], {}), "('%s%%' % percent)\n", (979, 997), False, 'import sys\n'), ((1010, 1028), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1026, 1028), False, 'import sys\n'), ((1055, 1076), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (1071, 1076), False, 'import sys\n'), ((1089, 1107), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1105, 1107), False, 'import sys\n'), ((1292, 1321), 'os.path.exists', 'os.path.exists', (['dest_filename'], {}), '(dest_filename)\n', (1306, 1321), False, 'import os\n'), ((4294, 4322), 'os.path.exists', 'os.path.exists', (['set_filename'], {}), '(set_filename)\n', (4308, 4322), False, 'import os\n'), ((2487, 2503), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (2497, 2503), False, 'import os\n'), ((2530, 2551), 'os.path.join', 'os.path.join', (['root', 'd'], {}), '(root, d)\n', (2542, 2551), False, 'import os\n'), ((5739, 5753), 'six.moves.cPickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5750, 5753), True, 'from six.moves import cPickle as pickle\n'), ((5857, 5886), 'numpy.random.shuffle', 'np.random.shuffle', (['letter_set'], {}), '(letter_set)\n', (5874, 5886), True, 'import numpy as np\n'), ((1907, 1930), 'file_helper.get_file_name', 'get_file_name', (['filename'], {}), '(filename)\n', (1920, 1930), False, 'from file_helper import get_file_name, join_paths\n'), ((4688, 4736), 'six.moves.cPickle.dump', 'pickle.dump', (['dataset', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(dataset, f, pickle.HIGHEST_PROTOCOL)\n', (4699, 4736), True, 'from six.moves import cPickle as pickle\n'), ((3199, 3225), 'imageio.imread', 'imageio.imread', (['image_file'], {}), '(image_file)\n', (3213, 3225), False, 'import imageio\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.